mirror of https://github.com/kubernetes/kops.git
Update k8s dependencies to v1.18.6
This commit is contained in:
parent
e5d851373e
commit
cf06d6ae93
54
go.mod
54
go.mod
|
|
@ -2,49 +2,49 @@ module k8s.io/kops
|
|||
|
||||
go 1.14
|
||||
|
||||
// Version kubernetes-1.18.0 => tag v0.18.1
|
||||
// Version kubernetes-1.18.0 => tag v0.18.6
|
||||
|
||||
replace k8s.io/api => k8s.io/api v0.18.1
|
||||
replace k8s.io/api => k8s.io/api v0.18.6
|
||||
|
||||
replace k8s.io/apimachinery => k8s.io/apimachinery v0.18.1
|
||||
replace k8s.io/apimachinery => k8s.io/apimachinery v0.18.6
|
||||
|
||||
replace k8s.io/client-go => k8s.io/client-go v0.18.1
|
||||
replace k8s.io/client-go => k8s.io/client-go v0.18.6
|
||||
|
||||
replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.1
|
||||
replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.6
|
||||
|
||||
replace k8s.io/kubectl => k8s.io/kubectl v0.18.1
|
||||
replace k8s.io/kubectl => k8s.io/kubectl v0.18.6
|
||||
|
||||
replace k8s.io/apiserver => k8s.io/apiserver v0.18.1
|
||||
replace k8s.io/apiserver => k8s.io/apiserver v0.18.6
|
||||
|
||||
replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.1
|
||||
replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.6
|
||||
|
||||
replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.1
|
||||
replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.6
|
||||
|
||||
replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.1
|
||||
replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.6
|
||||
|
||||
replace k8s.io/cri-api => k8s.io/cri-api v0.18.1
|
||||
replace k8s.io/cri-api => k8s.io/cri-api v0.18.6
|
||||
|
||||
replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.1
|
||||
replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.6
|
||||
|
||||
replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.1
|
||||
replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.6
|
||||
|
||||
replace k8s.io/component-base => k8s.io/component-base v0.18.1
|
||||
replace k8s.io/component-base => k8s.io/component-base v0.18.6
|
||||
|
||||
replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.1
|
||||
replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.6
|
||||
|
||||
replace k8s.io/metrics => k8s.io/metrics v0.18.1
|
||||
replace k8s.io/metrics => k8s.io/metrics v0.18.6
|
||||
|
||||
replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.1
|
||||
replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.6
|
||||
|
||||
replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.1
|
||||
replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.6
|
||||
|
||||
replace k8s.io/kubelet => k8s.io/kubelet v0.18.1
|
||||
replace k8s.io/kubelet => k8s.io/kubelet v0.18.6
|
||||
|
||||
replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.1
|
||||
replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.6
|
||||
|
||||
replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.1
|
||||
replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.6
|
||||
|
||||
replace k8s.io/code-generator => k8s.io/code-generator v0.18.1
|
||||
replace k8s.io/code-generator => k8s.io/code-generator v0.18.6
|
||||
|
||||
replace github.com/gophercloud/gophercloud => github.com/gophercloud/gophercloud v0.9.0
|
||||
|
||||
|
|
@ -108,12 +108,12 @@ require (
|
|||
gopkg.in/inf.v0 v0.9.1
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
honnef.co/go/tools v0.0.1-2020.1.4
|
||||
k8s.io/api v0.18.1
|
||||
k8s.io/apimachinery v0.18.1
|
||||
k8s.io/cli-runtime v0.18.1
|
||||
k8s.io/client-go v0.18.1
|
||||
k8s.io/api v0.18.6
|
||||
k8s.io/apimachinery v0.18.6
|
||||
k8s.io/cli-runtime v0.18.6
|
||||
k8s.io/client-go v0.18.6
|
||||
k8s.io/cloud-provider-openstack v1.17.0
|
||||
k8s.io/component-base v0.18.1
|
||||
k8s.io/component-base v0.18.6
|
||||
k8s.io/gengo v0.0.0-20200710205751-c0d492a0f3ca
|
||||
k8s.io/helm v2.9.0+incompatible
|
||||
k8s.io/klog v1.0.0
|
||||
|
|
|
|||
67
go.sum
67
go.sum
|
|
@ -1129,29 +1129,28 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.18.1 h1:pnHr0LH69kvL29eHldoepUDKTuiOejNZI2A1gaxve3Q=
|
||||
k8s.io/api v0.18.1/go.mod h1:3My4jorQWzSs5a+l7Ge6JBbIxChLnY8HnuT58ZWolss=
|
||||
k8s.io/apiextensions-apiserver v0.18.1 h1:nHxmulAeoyx2YkcZLRqEAI5OmL8NsI5sfXjc5ph3ha8=
|
||||
k8s.io/apiextensions-apiserver v0.18.1/go.mod h1:O6BY08Jq7b4AkLNYgbEp4HOqJ0rYkFIMtXLZuBYfGes=
|
||||
k8s.io/apimachinery v0.18.1 h1:hKPYcQRPLQoG2e7fKkVl0YCvm9TBefXTfGILa9vjVVk=
|
||||
k8s.io/apimachinery v0.18.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||
k8s.io/apiserver v0.18.1/go.mod h1:RlG/JfWP0FMn1ZS8iUkMY+yIQ1n48Eqxfhh9xyJ2KmI=
|
||||
k8s.io/cli-runtime v0.18.1 h1:2FcLBAEbILCW7wykG0+PPRMD3PjqF5RpIlVGOUHLKrE=
|
||||
k8s.io/cli-runtime v0.18.1/go.mod h1:tRhwtJJy3h97Yikid4Id19A0LI4ojN8t980h0Ax7OuY=
|
||||
k8s.io/client-go v0.18.1 h1:2+fnu4LwKJjZVOwijkm1UqZG9aQoFsKEpipOzdfcTD8=
|
||||
k8s.io/client-go v0.18.1/go.mod h1:iCikYRiXOj/yRRFE/aWqrpPtDt4P2JVWhtHkmESTcfY=
|
||||
k8s.io/cloud-provider v0.18.1 h1:hx8FhsyreFntUlXeJPMXpOHluRWNWzkZbgXWOLWY1QA=
|
||||
k8s.io/cloud-provider v0.18.1/go.mod h1:AD7y8xd55v2W3UHtD9ukhVnRU/G8S4DeeHX2jZw8xr8=
|
||||
k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE=
|
||||
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
|
||||
k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo=
|
||||
k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M=
|
||||
k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
|
||||
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
||||
k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg=
|
||||
k8s.io/cli-runtime v0.18.6 h1:I8BkH5NyqMQ4zqUBmpXJ1LxIqpCH88H/1edPkPVWzjQ=
|
||||
k8s.io/cli-runtime v0.18.6/go.mod h1:+G/WTNqHgUv636e5y7rhOQ7epUbRXnwmPnhOhD6t9uM=
|
||||
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw=
|
||||
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
|
||||
k8s.io/cloud-provider v0.18.6 h1:olYNA3/gQoy7XiSlYP16HFMK2jBWIVWHo0DXTtSvPuo=
|
||||
k8s.io/cloud-provider v0.18.6/go.mod h1:QnPLLdFkvtx1dEyVMaPUdzVWB+ECzEf+PA3DXwIr8bo=
|
||||
k8s.io/cloud-provider-openstack v1.17.0 h1:n6hkiJSekJEa2LIlu+1uGfMt5jwIvPO2qnx5tcJtlYo=
|
||||
k8s.io/cloud-provider-openstack v1.17.0/go.mod h1:0pc0EbtR72bEHmhjKwzrY60oeiQzlCONjZeDe8DEgN8=
|
||||
k8s.io/cluster-bootstrap v0.18.1/go.mod h1:BjicFjTnVRiO2YpeRKQJZRdKRozYO7imxqkOKUcUuYs=
|
||||
k8s.io/code-generator v0.18.1 h1:emaWQKnyNOhMgmp6NKxGjB3p9cCLylM+fAir45F2ZlQ=
|
||||
k8s.io/code-generator v0.18.1/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
|
||||
k8s.io/component-base v0.18.1 h1:gWqHqhLhLoFz1P+MpxWoF3cdDjfMalrGein/UgyjzfM=
|
||||
k8s.io/component-base v0.18.1/go.mod h1:r/fjelIVTKZaRczQRx/YzP2CJfhefUsUE4llMc9Pg4g=
|
||||
k8s.io/cri-api v0.18.1/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s=
|
||||
k8s.io/csi-translation-lib v0.18.1 h1:wF3JZkI6YkFmHGz3EQ5z0K9IhUrx/qZ9pGDFRUlj6GE=
|
||||
k8s.io/csi-translation-lib v0.18.1/go.mod h1:8KSCH4LLiGytrz3YPdymTv6F/+5xNHbu4ZUdxXxk8tA=
|
||||
k8s.io/cluster-bootstrap v0.18.6/go.mod h1:lnM1CXtPImlEBTh5874ZI+ofZzdIy1t2JV9Y+NxvojU=
|
||||
k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||
k8s.io/component-base v0.18.6 h1:Wd6cHGwJN2qpufnirVOB3oMhyhbioGsKEi5HeDBsV+s=
|
||||
k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14=
|
||||
k8s.io/cri-api v0.18.6/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s=
|
||||
k8s.io/csi-translation-lib v0.18.6 h1:RNtZr7+SScf0QXf2I5HC09fjDKsobCpBxN7ZHrXGK7U=
|
||||
k8s.io/csi-translation-lib v0.18.6/go.mod h1:w13PRDbRWol3Z9lM3RjxRd5vi/R9wog1DQHAbzzuKOI=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
|
|
@ -1170,23 +1169,23 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
|||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/kube-aggregator v0.18.1/go.mod h1:cXwR5+w/IZ/6tbDGFz3aEYrZctFN9R3X6u0gUcWwVzA=
|
||||
k8s.io/kube-controller-manager v0.18.1/go.mod h1:HFp15+aGPbGns4K9jD9TxJVuc9eeiylCtjgCunRV3B4=
|
||||
k8s.io/kube-aggregator v0.18.6/go.mod h1:MKm8inLHdeiXQJCl6UdmgMosRrqJgyxO2obTXOkey/s=
|
||||
k8s.io/kube-controller-manager v0.18.6/go.mod h1:T+Ayh47y1IrvwDSUAh4QT/aIrRcKWlvgdqV5PHrMwNs=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-proxy v0.18.1/go.mod h1:kr+mNt6qTumf4alT9NN4wpMefHDejdtzy5CLINUjj3g=
|
||||
k8s.io/kube-scheduler v0.18.1/go.mod h1:yHhIPctl3FcdZDLHsOqDe48cqqEgiokPE/wGpqaCWS0=
|
||||
k8s.io/kubectl v0.18.1 h1:2b5x4BPlZd+ypSk0NBOKoVjbqkTj5Xp3kAzHg9O12yo=
|
||||
k8s.io/kubectl v0.18.1/go.mod h1:7bLKKp5gSrtuSCIbclgifVOCwdLBs+h3oETVqLn1928=
|
||||
k8s.io/kubelet v0.18.1/go.mod h1:wlhv8yVMnbTp0haWYfuOzD7fqq7F35bWcIFIpdy9w9A=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-proxy v0.18.6/go.mod h1:r3ScLxYTuskh8l2dDfAPdrFK3QnWIMsZI/+Bq5kkmWc=
|
||||
k8s.io/kube-scheduler v0.18.6/go.mod h1:J+GApeR/QkU6eYonXir0i7+rcUVWzZPZbNHqjq4FpoQ=
|
||||
k8s.io/kubectl v0.18.6 h1:IFPNuLPkZ59vSGQzynXY8XGz9yuOSRpkJupnobdYvO4=
|
||||
k8s.io/kubectl v0.18.6/go.mod h1:3TLzFOrF9h4mlRPAvdNkDbs5NWspN4e0EnPnEB41CGo=
|
||||
k8s.io/kubelet v0.18.6/go.mod h1:5e0PJYialWMWZgsYWJqI6zVW58y+MaQvmOQwEGFF4Xc=
|
||||
k8s.io/kubernetes v1.17.0/go.mod h1:NbNV+69yL3eKiKDJ+ZEjqOplN3BFXKBeunzkoOy8WLo=
|
||||
k8s.io/legacy-cloud-providers v0.18.1 h1:zgThxmGv3o+I6jWq34z3HjROXaWiGsQ+Re8KwA4kpyI=
|
||||
k8s.io/legacy-cloud-providers v0.18.1/go.mod h1:Qfp8J+lK/DnBVZlha1AYJMZkj1rS8F3T86gBNdMokWI=
|
||||
k8s.io/metrics v0.18.1/go.mod h1:UnoYSbaiakbuBYgpDEsXjVjk843af/WRHNPR3Wx+2XI=
|
||||
k8s.io/legacy-cloud-providers v0.18.6 h1:KTrEUdTj3FWgQw8KANgZFmWrdxxKWnrPUYyFWMujnk0=
|
||||
k8s.io/legacy-cloud-providers v0.18.6/go.mod h1:0bU6t0dTOd0YkcByIdjx7WD4ihApa+aUrTgVJpqciZU=
|
||||
k8s.io/metrics v0.18.6/go.mod h1:iAwGeabusQNO3duHDM7BBExTUB8L+iq8PM7N9EtQw6g=
|
||||
k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8=
|
||||
k8s.io/sample-apiserver v0.18.1/go.mod h1:ir6PkLhIblVb9nNHzd8LG77QW4mhQAwahznNAIHhOEM=
|
||||
k8s.io/sample-apiserver v0.18.6/go.mod h1:NSRGjwumFclVpq8zewaqGVwiyIR7DQbLAE6wQZ0uljI=
|
||||
k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||
|
|
|
|||
|
|
@ -873,6 +873,9 @@ const (
|
|||
// FieldManagerConflict is used to report when another client claims to manage this field,
|
||||
// It should only be returned for a request using server-side apply.
|
||||
CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict"
|
||||
// CauseTypeResourceVersionTooLarge is used to report that the requested resource version
|
||||
// is newer than the data observed by the API server, so the request cannot be served.
|
||||
CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
|
|
|||
|
|
@ -178,7 +178,7 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
|
|||
default:
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("operation"), fields.Operation, "must be `Apply` or `Update`"))
|
||||
}
|
||||
if fields.FieldsType != "FieldsV1" {
|
||||
if len(fields.FieldsType) > 0 && fields.FieldsType != "FieldsV1" {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`"))
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,11 +66,36 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertSliceNumbers(*v, 0)
|
||||
|
||||
case *interface{}:
|
||||
// Build a decoder from the given data
|
||||
decoder := json.NewDecoder(bytes.NewBuffer(data))
|
||||
// Preserve numbers, rather than casting to float64 automatically
|
||||
decoder.UseNumber()
|
||||
// Run the decode
|
||||
if err := decoder.Decode(v); err != nil {
|
||||
return err
|
||||
}
|
||||
// If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64
|
||||
return convertInterfaceNumbers(v, 0)
|
||||
|
||||
default:
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
}
|
||||
|
||||
func convertInterfaceNumbers(v *interface{}, depth int) error {
|
||||
var err error
|
||||
switch v2 := (*v).(type) {
|
||||
case json.Number:
|
||||
*v, err = convertNumber(v2)
|
||||
case map[string]interface{}:
|
||||
err = convertMapNumbers(v2, depth+1)
|
||||
case []interface{}:
|
||||
err = convertSliceNumbers(v2, depth+1)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64.
|
||||
// values which are map[string]interface{} or []interface{} are recursively visited
|
||||
func convertMapNumbers(m map[string]interface{}, depth int) error {
|
||||
|
|
|
|||
|
|
@ -55,6 +55,12 @@ func JoinPreservingTrailingSlash(elem ...string) string {
|
|||
return result
|
||||
}
|
||||
|
||||
// IsTimeout returns true if the given error is a network timeout error
|
||||
func IsTimeout(err error) bool {
|
||||
neterr, ok := err.(net.Error)
|
||||
return ok && neterr != nil && neterr.Timeout()
|
||||
}
|
||||
|
||||
// IsProbableEOF returns true if the given error resembles a connection termination
|
||||
// scenario that would justify assuming that the watch is empty.
|
||||
// These errors are what the Go http stack returns back to us which are general
|
||||
|
|
@ -440,7 +446,7 @@ redirectLoop:
|
|||
|
||||
// Only follow redirects to the same host. Otherwise, propagate the redirect response back.
|
||||
if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() {
|
||||
break redirectLoop
|
||||
return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname())
|
||||
}
|
||||
|
||||
// Reset the connection.
|
||||
|
|
|
|||
|
|
@ -286,8 +286,9 @@ func contextForChannel(parentCh <-chan struct{}) (context.Context, context.Cance
|
|||
}
|
||||
|
||||
// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides
|
||||
// an interface to return a timer for backoff, and caller shall backoff until Timer.C returns. If the second Backoff()
|
||||
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained.
|
||||
// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff()
|
||||
// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in
|
||||
// undetermined behavior.
|
||||
// The BackoffManager is supposed to be called in a single-threaded environment.
|
||||
type BackoffManager interface {
|
||||
Backoff() clock.Timer
|
||||
|
|
@ -317,7 +318,7 @@ func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Du
|
|||
Steps: math.MaxInt32,
|
||||
Cap: maxBackoff,
|
||||
},
|
||||
backoffTimer: c.NewTimer(0),
|
||||
backoffTimer: nil,
|
||||
initialBackoff: initBackoff,
|
||||
lastBackoffStart: c.Now(),
|
||||
backoffResetDuration: resetDuration,
|
||||
|
|
@ -334,9 +335,14 @@ func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration {
|
|||
return b.backoff.Step()
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for backoff.
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff.
|
||||
// The returned timer must be drained before calling Backoff() the second time
|
||||
func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer {
|
||||
b.backoffTimer.Reset(b.getNextBackoff())
|
||||
if b.backoffTimer == nil {
|
||||
b.backoffTimer = b.clock.NewTimer(b.getNextBackoff())
|
||||
} else {
|
||||
b.backoffTimer.Reset(b.getNextBackoff())
|
||||
}
|
||||
return b.backoffTimer
|
||||
}
|
||||
|
||||
|
|
@ -354,7 +360,7 @@ func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.C
|
|||
clock: c,
|
||||
duration: duration,
|
||||
jitter: jitter,
|
||||
backoffTimer: c.NewTimer(0),
|
||||
backoffTimer: nil,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -366,8 +372,15 @@ func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration {
|
|||
return jitteredPeriod
|
||||
}
|
||||
|
||||
// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff.
|
||||
// The returned timer must be drained before calling Backoff() the second time
|
||||
func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer {
|
||||
j.backoffTimer.Reset(j.getNextBackoff())
|
||||
backoff := j.getNextBackoff()
|
||||
if j.backoffTimer == nil {
|
||||
j.backoffTimer = j.clock.NewTimer(backoff)
|
||||
} else {
|
||||
j.backoffTimer.Reset(backoff)
|
||||
}
|
||||
return j.backoffTimer
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ func (sw *StreamWatcher) receive() {
|
|||
case io.ErrUnexpectedEOF:
|
||||
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
|
||||
default:
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err)
|
||||
} else {
|
||||
sw.result <- Event{
|
||||
|
|
|
|||
|
|
@ -655,7 +655,7 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
|
|||
if err != nil {
|
||||
// The watch stream mechanism handles many common partial data errors, so closed
|
||||
// connections can be retried in many cases.
|
||||
if net.IsProbableEOF(err) {
|
||||
if net.IsProbableEOF(err) || net.IsTimeout(err) {
|
||||
return watch.NewEmptyWatch(), nil
|
||||
}
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -82,9 +82,9 @@ type Reflector struct {
|
|||
// observed when doing a sync with the underlying store
|
||||
// it is thread safe, but not synchronized with the underlying store
|
||||
lastSyncResourceVersion string
|
||||
// isLastSyncResourceVersionGone is true if the previous list or watch request with lastSyncResourceVersion
|
||||
// failed with an HTTP 410 (Gone) status code.
|
||||
isLastSyncResourceVersionGone bool
|
||||
// isLastSyncResourceVersionUnavailable is true if the previous list or watch request with
|
||||
// lastSyncResourceVersion failed with an "expired" or "too large resource version" error.
|
||||
isLastSyncResourceVersionUnavailable bool
|
||||
// lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion
|
||||
lastSyncResourceVersionMutex sync.RWMutex
|
||||
// WatchListPageSize is the requested chunk size of initial and resync watch lists.
|
||||
|
|
@ -256,13 +256,14 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
}
|
||||
|
||||
list, paginatedResult, err = pager.List(context.Background(), options)
|
||||
if isExpiredError(err) {
|
||||
r.setIsLastSyncResourceVersionExpired(true)
|
||||
// Retry immediately if the resource version used to list is expired.
|
||||
if isExpiredError(err) || isTooLargeResourceVersionError(err) {
|
||||
r.setIsLastSyncResourceVersionUnavailable(true)
|
||||
// Retry immediately if the resource version used to list is unavailable.
|
||||
// The pager already falls back to full list if paginated list calls fail due to an "Expired" error on
|
||||
// continuation pages, but the pager might not be enabled, or the full list might fail because the
|
||||
// resource version it is listing at is expired, so we need to fallback to resourceVersion="" in all
|
||||
// to recover and ensure the reflector makes forward progress.
|
||||
// continuation pages, but the pager might not be enabled, the full list might fail because the
|
||||
// resource version it is listing at is expired or the cache may not yet be synced to the provided
|
||||
// resource version. So we need to fallback to resourceVersion="" in all to recover and ensure
|
||||
// the reflector makes forward progress.
|
||||
list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()})
|
||||
}
|
||||
close(listCh)
|
||||
|
|
@ -292,7 +293,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
r.paginatedResult = true
|
||||
}
|
||||
|
||||
r.setIsLastSyncResourceVersionExpired(false) // list was successful
|
||||
r.setIsLastSyncResourceVersionUnavailable(false) // list was successful
|
||||
initTrace.Step("Objects listed")
|
||||
listMetaInterface, err := meta.ListAccessor(list)
|
||||
if err != nil {
|
||||
|
|
@ -364,6 +365,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
AllowWatchBookmarks: true,
|
||||
}
|
||||
|
||||
// start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent
|
||||
start := r.clock.Now()
|
||||
w, err := r.listerWatcher.Watch(options)
|
||||
if err != nil {
|
||||
switch {
|
||||
|
|
@ -390,11 +393,11 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err := r.watchHandler(start, w, &resourceVersion, resyncerrc, stopCh); err != nil {
|
||||
if err != errorStopRequested {
|
||||
switch {
|
||||
case isExpiredError(err):
|
||||
// Don't set LastSyncResourceVersionExpired - LIST call with ResourceVersion=RV already
|
||||
// Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already
|
||||
// has a semantic that it returns data at least as fresh as provided RV.
|
||||
// So first try to LIST with setting RV to resource version of last observed object.
|
||||
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
|
||||
|
|
@ -417,8 +420,7 @@ func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) err
|
|||
}
|
||||
|
||||
// watchHandler watches w and keeps *resourceVersion up to date.
|
||||
func (r *Reflector) watchHandler(w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
start := r.clock.Now()
|
||||
func (r *Reflector) watchHandler(start time.Time, w watch.Interface, resourceVersion *string, errc chan error, stopCh <-chan struct{}) error {
|
||||
eventCount := 0
|
||||
|
||||
// Stopping the watcher should be idempotent and if we return from this function there's no way
|
||||
|
|
@ -518,9 +520,9 @@ func (r *Reflector) relistResourceVersion() string {
|
|||
r.lastSyncResourceVersionMutex.RLock()
|
||||
defer r.lastSyncResourceVersionMutex.RUnlock()
|
||||
|
||||
if r.isLastSyncResourceVersionGone {
|
||||
if r.isLastSyncResourceVersionUnavailable {
|
||||
// Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache
|
||||
// if the lastSyncResourceVersion is expired, we set ResourceVersion="" and list again to re-establish reflector
|
||||
// if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector
|
||||
// to the latest available ResourceVersion, using a consistent read from etcd.
|
||||
return ""
|
||||
}
|
||||
|
|
@ -532,12 +534,12 @@ func (r *Reflector) relistResourceVersion() string {
|
|||
return r.lastSyncResourceVersion
|
||||
}
|
||||
|
||||
// setIsLastSyncResourceVersionExpired sets if the last list or watch request with lastSyncResourceVersion returned a
|
||||
// expired error: HTTP 410 (Gone) Status Code.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionExpired(isExpired bool) {
|
||||
// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned
|
||||
// "expired" or "too large resource version" error.
|
||||
func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) {
|
||||
r.lastSyncResourceVersionMutex.Lock()
|
||||
defer r.lastSyncResourceVersionMutex.Unlock()
|
||||
r.isLastSyncResourceVersionGone = isExpired
|
||||
r.isLastSyncResourceVersionUnavailable = isUnavailable
|
||||
}
|
||||
|
||||
func isExpiredError(err error) bool {
|
||||
|
|
@ -547,3 +549,7 @@ func isExpiredError(err error) bool {
|
|||
// check when we fully drop support for Kubernetes 1.17 servers from reflectors.
|
||||
return apierrors.IsResourceExpired(err) || apierrors.IsGone(err)
|
||||
}
|
||||
|
||||
func isTooLargeResourceVersionError(err error) bool {
|
||||
return apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ import (
|
|||
var (
|
||||
// ClusterDefaults has the same behavior as the old EnvVar and DefaultCluster fields
|
||||
// DEPRECATED will be replaced
|
||||
ClusterDefaults = clientcmdapi.Cluster{Server: os.Getenv("KUBERNETES_MASTER")}
|
||||
ClusterDefaults = clientcmdapi.Cluster{Server: getDefaultServer()}
|
||||
// DefaultClientConfig represents the legacy behavior of this package for defaulting
|
||||
// DEPRECATED will be replace
|
||||
DefaultClientConfig = DirectClientConfig{*clientcmdapi.NewConfig(), "", &ConfigOverrides{
|
||||
|
|
@ -43,6 +43,15 @@ var (
|
|||
}, nil, NewDefaultClientConfigLoadingRules(), promptedCredentials{}}
|
||||
)
|
||||
|
||||
// getDefaultServer returns a default setting for DefaultClientConfig
|
||||
// DEPRECATED
|
||||
func getDefaultServer() string {
|
||||
if server := os.Getenv("KUBERNETES_MASTER"); len(server) > 0 {
|
||||
return server
|
||||
}
|
||||
return "http://localhost:8080"
|
||||
}
|
||||
|
||||
// ClientConfig is used to make it easy to get an api server client
|
||||
type ClientConfig interface {
|
||||
// RawConfig returns the merged result of all overrides
|
||||
|
|
|
|||
|
|
@ -88,6 +88,9 @@ func (cml *ConfigMapLock) Update(ctx context.Context, ler LeaderElectionRecord)
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cml.cm.Annotations == nil {
|
||||
cml.cm.Annotations = make(map[string]string)
|
||||
}
|
||||
cml.cm.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes)
|
||||
cml.cm, err = cml.Client.ConfigMaps(cml.ConfigMapMeta.Namespace).Update(ctx, cml.cm, metav1.UpdateOptions{})
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ module k8s.io/cloud-provider
|
|||
go 1.13
|
||||
|
||||
require (
|
||||
k8s.io/api v0.18.1
|
||||
k8s.io/apimachinery v0.18.1
|
||||
k8s.io/client-go v0.18.1
|
||||
k8s.io/api v0.18.6
|
||||
k8s.io/apimachinery v0.18.6
|
||||
k8s.io/client-go v0.18.6
|
||||
k8s.io/klog v1.0.0
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89
|
||||
)
|
||||
|
|
@ -15,7 +15,7 @@ require (
|
|||
replace (
|
||||
golang.org/x/sys => golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a // pinned to release-branch.go1.13
|
||||
golang.org/x/tools => golang.org/x/tools v0.0.0-20190821162956-65e3620a7ae7 // pinned to release-branch.go1.13
|
||||
k8s.io/api => k8s.io/api v0.18.1
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.18.1
|
||||
k8s.io/client-go => k8s.io/client-go v0.18.1
|
||||
k8s.io/api => k8s.io/api v0.18.6
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.18.6
|
||||
k8s.io/client-go => k8s.io/client-go v0.18.6
|
||||
)
|
||||
|
|
|
|||
|
|
@ -167,16 +167,16 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
|||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.18.1/go.mod h1:3My4jorQWzSs5a+l7Ge6JBbIxChLnY8HnuT58ZWolss=
|
||||
k8s.io/apimachinery v0.18.1/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||
k8s.io/client-go v0.18.1/go.mod h1:iCikYRiXOj/yRRFE/aWqrpPtDt4P2JVWhtHkmESTcfY=
|
||||
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
|
||||
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
||||
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
|
||||
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
|
|
|
|||
|
|
@ -19,5 +19,6 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/cloud-provider/volume:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -110,22 +110,23 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
|||
return nil, fmt.Errorf("pv is nil or Azure Disk source not defined on pv")
|
||||
}
|
||||
|
||||
azureSource := pv.Spec.PersistentVolumeSource.AzureDisk
|
||||
var (
|
||||
azureSource = pv.Spec.PersistentVolumeSource.AzureDisk
|
||||
|
||||
// refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md
|
||||
csiSource := &v1.CSIPersistentVolumeSource{
|
||||
Driver: AzureDiskDriverName,
|
||||
VolumeHandle: azureSource.DataDiskURI,
|
||||
ReadOnly: *azureSource.ReadOnly,
|
||||
FSType: *azureSource.FSType,
|
||||
VolumeAttributes: map[string]string{azureDiskKind: "Managed"},
|
||||
}
|
||||
// refer to https://github.com/kubernetes-sigs/azuredisk-csi-driver/blob/master/docs/driver-parameters.md
|
||||
csiSource = &v1.CSIPersistentVolumeSource{
|
||||
Driver: AzureDiskDriverName,
|
||||
VolumeAttributes: map[string]string{azureDiskKind: "Managed"},
|
||||
VolumeHandle: azureSource.DataDiskURI,
|
||||
}
|
||||
)
|
||||
|
||||
if azureSource.CachingMode != nil {
|
||||
csiSource.VolumeAttributes[azureDiskCachingMode] = string(*azureSource.CachingMode)
|
||||
}
|
||||
|
||||
if azureSource.FSType != nil {
|
||||
csiSource.FSType = *azureSource.FSType
|
||||
csiSource.VolumeAttributes[azureDiskFSType] = *azureSource.FSType
|
||||
}
|
||||
|
||||
|
|
@ -133,9 +134,12 @@ func (t *azureDiskCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
|||
csiSource.VolumeAttributes[azureDiskKind] = string(*azureSource.Kind)
|
||||
}
|
||||
|
||||
if azureSource.ReadOnly != nil {
|
||||
csiSource.ReadOnly = *azureSource.ReadOnly
|
||||
}
|
||||
|
||||
pv.Spec.PersistentVolumeSource.AzureDisk = nil
|
||||
pv.Spec.PersistentVolumeSource.CSI = csiSource
|
||||
pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes)
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,11 +18,13 @@ package plugins
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/klog"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
@ -32,14 +34,19 @@ const (
|
|||
AzureFileInTreePluginName = "kubernetes.io/azure-file"
|
||||
|
||||
separator = "#"
|
||||
volumeIDTemplate = "%s#%s#%s"
|
||||
volumeIDTemplate = "%s#%s#%s#%s"
|
||||
// Parameter names defined in azure file CSI driver, refer to
|
||||
// https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
||||
azureFileShareName = "shareName"
|
||||
|
||||
secretNameTemplate = "azure-storage-account-%s-secret"
|
||||
defaultSecretNamespace = "default"
|
||||
)
|
||||
|
||||
var _ InTreePlugin = &azureFileCSITranslator{}
|
||||
|
||||
var secretNameFormatRE = regexp.MustCompile(`azure-storage-account-(.+)-secret`)
|
||||
|
||||
// azureFileCSITranslator handles translation of PV spec from In-tree
|
||||
// Azure File to CSI Azure File and vice versa
|
||||
type azureFileCSITranslator struct{}
|
||||
|
|
@ -58,32 +65,41 @@ func (t *azureFileCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.St
|
|||
// and converts the AzureFile source to a CSIPersistentVolumeSource
|
||||
func (t *azureFileCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume) (*v1.PersistentVolume, error) {
|
||||
if volume == nil || volume.AzureFile == nil {
|
||||
return nil, fmt.Errorf("volume is nil or AWS EBS not defined on volume")
|
||||
return nil, fmt.Errorf("volume is nil or Azure File not defined on volume")
|
||||
}
|
||||
|
||||
azureSource := volume.AzureFile
|
||||
accountName, err := getStorageAccountName(azureSource.SecretName)
|
||||
if err != nil {
|
||||
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
|
||||
accountName = azureSource.SecretName
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Must be unique per disk as it is used as the unique part of the
|
||||
// staging path
|
||||
Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName),
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName),
|
||||
ReadOnly: azureSource.ReadOnly,
|
||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||
NodePublishSecretRef: &v1.SecretReference{
|
||||
Name: azureSource.ShareName,
|
||||
Namespace: "default",
|
||||
var (
|
||||
pv = &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Must be unique per disk as it is used as the unique part of the
|
||||
// staging path
|
||||
Name: fmt.Sprintf("%s-%s", AzureFileDriverName, azureSource.ShareName),
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: AzureFileDriverName,
|
||||
VolumeHandle: fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, ""),
|
||||
ReadOnly: azureSource.ReadOnly,
|
||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||
NodeStageSecretRef: &v1.SecretReference{
|
||||
Name: azureSource.SecretName,
|
||||
Namespace: defaultSecretNamespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||
},
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
|
|
@ -95,23 +111,33 @@ func (t *azureFileCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume)
|
|||
}
|
||||
|
||||
azureSource := pv.Spec.PersistentVolumeSource.AzureFile
|
||||
|
||||
volumeID := fmt.Sprintf(volumeIDTemplate, "", azureSource.SecretName, azureSource.ShareName)
|
||||
// refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
||||
csiSource := &v1.CSIPersistentVolumeSource{
|
||||
VolumeHandle: volumeID,
|
||||
ReadOnly: azureSource.ReadOnly,
|
||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||
accountName, err := getStorageAccountName(azureSource.SecretName)
|
||||
if err != nil {
|
||||
klog.Warningf("getStorageAccountName(%s) returned with error: %v", azureSource.SecretName, err)
|
||||
accountName = azureSource.SecretName
|
||||
}
|
||||
volumeID := fmt.Sprintf(volumeIDTemplate, "", accountName, azureSource.ShareName, "")
|
||||
|
||||
csiSource.NodePublishSecretRef = &v1.SecretReference{
|
||||
Name: azureSource.ShareName,
|
||||
Namespace: *azureSource.SecretNamespace,
|
||||
var (
|
||||
// refer to https://github.com/kubernetes-sigs/azurefile-csi-driver/blob/master/docs/driver-parameters.md
|
||||
csiSource = &v1.CSIPersistentVolumeSource{
|
||||
Driver: AzureFileDriverName,
|
||||
NodeStageSecretRef: &v1.SecretReference{
|
||||
Name: azureSource.SecretName,
|
||||
Namespace: defaultSecretNamespace,
|
||||
},
|
||||
ReadOnly: azureSource.ReadOnly,
|
||||
VolumeAttributes: map[string]string{azureFileShareName: azureSource.ShareName},
|
||||
VolumeHandle: volumeID,
|
||||
}
|
||||
)
|
||||
|
||||
if azureSource.SecretNamespace != nil {
|
||||
csiSource.NodeStageSecretRef.Namespace = *azureSource.SecretNamespace
|
||||
}
|
||||
|
||||
pv.Spec.PersistentVolumeSource.AzureFile = nil
|
||||
pv.Spec.PersistentVolumeSource.CSI = csiSource
|
||||
pv.Spec.AccessModes = backwardCompatibleAccessModes(pv.Spec.AccessModes)
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
|
@ -129,22 +155,21 @@ func (t *azureFileCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume)
|
|||
ReadOnly: csiSource.ReadOnly,
|
||||
}
|
||||
|
||||
if csiSource.NodePublishSecretRef != nil && csiSource.NodePublishSecretRef.Name != "" {
|
||||
azureSource.SecretName = csiSource.NodePublishSecretRef.Name
|
||||
azureSource.SecretNamespace = &csiSource.NodePublishSecretRef.Namespace
|
||||
if csiSource.NodeStageSecretRef != nil && csiSource.NodeStageSecretRef.Name != "" {
|
||||
azureSource.SecretName = csiSource.NodeStageSecretRef.Name
|
||||
azureSource.SecretNamespace = &csiSource.NodeStageSecretRef.Namespace
|
||||
if csiSource.VolumeAttributes != nil {
|
||||
if shareName, ok := csiSource.VolumeAttributes[azureFileShareName]; ok {
|
||||
azureSource.ShareName = shareName
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_, _, fileShareName, err := getFileShareInfo(csiSource.VolumeHandle)
|
||||
_, storageAccount, fileShareName, _, err := getFileShareInfo(csiSource.VolumeHandle)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
azureSource.ShareName = fileShareName
|
||||
// to-do: for dynamic provision scenario in CSI, it uses cluster's identity to get storage account key
|
||||
// secret for the file share is not created, we may create a serect here
|
||||
azureSource.SecretName = fmt.Sprintf(secretNameTemplate, storageAccount)
|
||||
}
|
||||
|
||||
pv.Spec.CSI = nil
|
||||
|
|
@ -182,12 +207,25 @@ func (t *azureFileCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string)
|
|||
}
|
||||
|
||||
// get file share info according to volume id, e.g.
|
||||
// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41"
|
||||
// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41
|
||||
func getFileShareInfo(id string) (string, string, string, error) {
|
||||
// input: "rg#f5713de20cde511e8ba4900#pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41#diskname.vhd"
|
||||
// output: rg, f5713de20cde511e8ba4900, pvc-file-dynamic-17e43f84-f474-11e8-acd0-000d3a00df41, diskname.vhd
|
||||
func getFileShareInfo(id string) (string, string, string, string, error) {
|
||||
segments := strings.Split(id, separator)
|
||||
if len(segments) < 3 {
|
||||
return "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id)
|
||||
return "", "", "", "", fmt.Errorf("error parsing volume id: %q, should at least contain two #", id)
|
||||
}
|
||||
return segments[0], segments[1], segments[2], nil
|
||||
var diskName string
|
||||
if len(segments) > 3 {
|
||||
diskName = segments[3]
|
||||
}
|
||||
return segments[0], segments[1], segments[2], diskName, nil
|
||||
}
|
||||
|
||||
// get storage account name from secret name
|
||||
func getStorageAccountName(secretName string) (string, error) {
|
||||
matches := secretNameFormatRE.FindStringSubmatch(secretName)
|
||||
if len(matches) != 2 {
|
||||
return "", fmt.Errorf("could not get account name from %s, correct format: %s", secretName, secretNameFormatRE)
|
||||
}
|
||||
return matches[1], nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -210,7 +210,7 @@ func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) {
|
|||
}
|
||||
case proto.Number:
|
||||
switch item.Kind {
|
||||
case proto.Number:
|
||||
case proto.Integer, proto.Number:
|
||||
return
|
||||
}
|
||||
case proto.String:
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ import (
|
|||
"io"
|
||||
"net"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -1202,7 +1203,13 @@ func azToRegion(az string) (string, error) {
|
|||
if len(az) < 1 {
|
||||
return "", fmt.Errorf("invalid (empty) AZ")
|
||||
}
|
||||
region := az[:len(az)-1]
|
||||
|
||||
r := regexp.MustCompile(`^([a-zA-Z]+-)+\d+`)
|
||||
region := r.FindString(az)
|
||||
if region == "" {
|
||||
return "", fmt.Errorf("invalid AZ: %s", az)
|
||||
}
|
||||
|
||||
return region, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -111,6 +111,11 @@ func (g *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, svc *v1
|
|||
|
||||
return status, true, nil
|
||||
}
|
||||
// Checking for finalizer is more accurate because controller restart could happen in the middle of resource
|
||||
// deletion. So even though forwarding rule was deleted, cleanup might not have been complete.
|
||||
if hasFinalizer(svc, ILBFinalizerV1) {
|
||||
return &v1.LoadBalancerStatus{}, true, nil
|
||||
}
|
||||
return nil, false, ignoreNotFound(err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -45,6 +45,8 @@ const (
|
|||
ILBFinalizerV1 = "gke.networking.io/l4-ilb-v1"
|
||||
// ILBFinalizerV2 is the finalizer used by newer controllers that implement Internal LoadBalancer services.
|
||||
ILBFinalizerV2 = "gke.networking.io/l4-ilb-v2"
|
||||
// maxInstancesPerInstanceGroup defines maximum number of VMs per InstanceGroup.
|
||||
maxInstancesPerInstanceGroup = 1000
|
||||
)
|
||||
|
||||
func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v1.Service, existingFwdRule *compute.ForwardingRule, nodes []*v1.Node) (*v1.LoadBalancerStatus, error) {
|
||||
|
|
@ -61,6 +63,9 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
|
|||
"Skipped ensureInternalLoadBalancer as service contains '%s' finalizer.", ILBFinalizerV2)
|
||||
return nil, cloudprovider.ImplementedElsewhere
|
||||
}
|
||||
|
||||
loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc)
|
||||
klog.V(2).Infof("ensureInternalLoadBalancer(%v): Attaching %q finalizer", loadBalancerName, ILBFinalizerV1)
|
||||
if err := addFinalizer(svc, g.client.CoreV1(), ILBFinalizerV1); err != nil {
|
||||
klog.Errorf("Failed to attach finalizer '%s' on service %s/%s - %v", ILBFinalizerV1, svc.Namespace, svc.Name, err)
|
||||
return nil, err
|
||||
|
|
@ -84,7 +89,6 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
|
|||
}
|
||||
}
|
||||
|
||||
loadBalancerName := g.GetLoadBalancerName(context.TODO(), clusterName, svc)
|
||||
sharedBackend := shareBackendService(svc)
|
||||
backendServiceName := makeBackendServiceName(loadBalancerName, clusterID, sharedBackend, scheme, protocol, svc.Spec.SessionAffinity)
|
||||
backendServiceLink := g.getBackendServiceLink(backendServiceName)
|
||||
|
|
@ -312,10 +316,12 @@ func (g *Cloud) ensureInternalLoadBalancerDeleted(clusterName, clusterID string,
|
|||
|
||||
// Try deleting instance groups - expect ResourceInuse error if needed by other LBs
|
||||
igName := makeInstanceGroupName(clusterID)
|
||||
klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): Attempting delete of instanceGroup %v", loadBalancerName, igName)
|
||||
if err := g.ensureInternalInstanceGroupsDeleted(igName); err != nil && !isInUsedByError(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(2).Infof("ensureInternalLoadBalancerDeleted(%v): Removing %q finalizer", loadBalancerName, ILBFinalizerV1)
|
||||
if err := removeFinalizer(svc, g.client.CoreV1(), ILBFinalizerV1); err != nil {
|
||||
klog.Errorf("Failed to remove finalizer '%s' on service %s/%s - %v", ILBFinalizerV1, svc.Namespace, svc.Name, err)
|
||||
return err
|
||||
|
|
@ -512,6 +518,17 @@ func (g *Cloud) ensureInternalInstanceGroup(name, zone string, nodes []*v1.Node)
|
|||
kubeNodes.Insert(n.Name)
|
||||
}
|
||||
|
||||
// Individual InstanceGroup has a limit for 1000 instances in it.
|
||||
// As a result, it's not possible to add more to it.
|
||||
// Given that the long-term fix (AlphaFeatureILBSubsets) is already in-progress,
|
||||
// to stop the bleeding we now simply cut down the contents to first 1000
|
||||
// instances in the alphabetical order. Since there is a limitation for
|
||||
// 250 backend VMs for ILB, this isn't making things worse.
|
||||
if len(kubeNodes) > maxInstancesPerInstanceGroup {
|
||||
klog.Warningf("Limiting number of VMs for InstanceGroup %s to %d", name, maxInstancesPerInstanceGroup)
|
||||
kubeNodes = sets.NewString(kubeNodes.List()[:maxInstancesPerInstanceGroup]...)
|
||||
}
|
||||
|
||||
gceNodes := sets.NewString()
|
||||
if ig == nil {
|
||||
klog.V(2).Infof("ensureInternalInstanceGroup(%v, %v): creating instance group", name, zone)
|
||||
|
|
|
|||
|
|
@ -831,7 +831,7 @@ honnef.co/go/tools/staticcheck
|
|||
honnef.co/go/tools/stylecheck
|
||||
honnef.co/go/tools/unused
|
||||
honnef.co/go/tools/version
|
||||
# k8s.io/api v0.18.1 => k8s.io/api v0.18.1
|
||||
# k8s.io/api v0.18.6 => k8s.io/api v0.18.6
|
||||
## explicit
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
|
|
@ -876,11 +876,11 @@ k8s.io/api/settings/v1alpha1
|
|||
k8s.io/api/storage/v1
|
||||
k8s.io/api/storage/v1alpha1
|
||||
k8s.io/api/storage/v1beta1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => k8s.io/apiextensions-apiserver v0.18.1
|
||||
# k8s.io/apiextensions-apiserver v0.18.0 => k8s.io/apiextensions-apiserver v0.18.6
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
|
||||
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
|
||||
# k8s.io/apimachinery v0.18.1 => k8s.io/apimachinery v0.18.1
|
||||
# k8s.io/apimachinery v0.18.6 => k8s.io/apimachinery v0.18.6
|
||||
## explicit
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
|
|
@ -935,7 +935,7 @@ k8s.io/apimachinery/pkg/watch
|
|||
k8s.io/apimachinery/third_party/forked/golang/json
|
||||
k8s.io/apimachinery/third_party/forked/golang/netutil
|
||||
k8s.io/apimachinery/third_party/forked/golang/reflect
|
||||
# k8s.io/cli-runtime v0.18.1 => k8s.io/cli-runtime v0.18.1
|
||||
# k8s.io/cli-runtime v0.18.6 => k8s.io/cli-runtime v0.18.6
|
||||
## explicit
|
||||
k8s.io/cli-runtime/pkg/genericclioptions
|
||||
k8s.io/cli-runtime/pkg/kustomize
|
||||
|
|
@ -949,7 +949,7 @@ k8s.io/cli-runtime/pkg/kustomize/k8sdeps/transformer/patch
|
|||
k8s.io/cli-runtime/pkg/kustomize/k8sdeps/validator
|
||||
k8s.io/cli-runtime/pkg/printers
|
||||
k8s.io/cli-runtime/pkg/resource
|
||||
# k8s.io/client-go v0.18.1 => k8s.io/client-go v0.18.1
|
||||
# k8s.io/client-go v0.18.6 => k8s.io/client-go v0.18.6
|
||||
## explicit
|
||||
k8s.io/client-go/discovery
|
||||
k8s.io/client-go/discovery/cached/disk
|
||||
|
|
@ -1179,7 +1179,7 @@ k8s.io/client-go/util/jsonpath
|
|||
k8s.io/client-go/util/keyutil
|
||||
k8s.io/client-go/util/retry
|
||||
k8s.io/client-go/util/workqueue
|
||||
# k8s.io/cloud-provider v0.18.1 => k8s.io/cloud-provider v0.18.1
|
||||
# k8s.io/cloud-provider v0.18.6 => k8s.io/cloud-provider v0.18.6
|
||||
k8s.io/cloud-provider
|
||||
k8s.io/cloud-provider/node/helpers
|
||||
k8s.io/cloud-provider/service/helpers
|
||||
|
|
@ -1189,13 +1189,13 @@ k8s.io/cloud-provider/volume/helpers
|
|||
# k8s.io/cloud-provider-openstack v1.17.0
|
||||
## explicit
|
||||
k8s.io/cloud-provider-openstack/pkg/util/openstack
|
||||
# k8s.io/component-base v0.18.1 => k8s.io/component-base v0.18.1
|
||||
# k8s.io/component-base v0.18.6 => k8s.io/component-base v0.18.6
|
||||
## explicit
|
||||
k8s.io/component-base/metrics
|
||||
k8s.io/component-base/metrics/legacyregistry
|
||||
k8s.io/component-base/metrics/prometheus/restclient
|
||||
k8s.io/component-base/version
|
||||
# k8s.io/csi-translation-lib v0.18.1 => k8s.io/csi-translation-lib v0.18.1
|
||||
# k8s.io/csi-translation-lib v0.18.6 => k8s.io/csi-translation-lib v0.18.6
|
||||
k8s.io/csi-translation-lib/plugins
|
||||
# k8s.io/gengo v0.0.0-20200710205751-c0d492a0f3ca
|
||||
## explicit
|
||||
|
|
@ -1214,11 +1214,11 @@ k8s.io/klog/klogr
|
|||
# k8s.io/klog/v2 v2.0.0
|
||||
## explicit
|
||||
k8s.io/klog/v2
|
||||
# k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c
|
||||
# k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
|
||||
k8s.io/kube-openapi/pkg/common
|
||||
k8s.io/kube-openapi/pkg/util/proto
|
||||
k8s.io/kube-openapi/pkg/util/proto/validation
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.18.1
|
||||
# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.18.6
|
||||
## explicit
|
||||
k8s.io/kubectl/pkg/cmd/util
|
||||
k8s.io/kubectl/pkg/cmd/util/editor
|
||||
|
|
@ -1234,7 +1234,7 @@ k8s.io/kubectl/pkg/util/openapi/validation
|
|||
k8s.io/kubectl/pkg/util/templates
|
||||
k8s.io/kubectl/pkg/util/term
|
||||
k8s.io/kubectl/pkg/validation
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.18.1
|
||||
# k8s.io/legacy-cloud-providers v0.0.0 => k8s.io/legacy-cloud-providers v0.18.6
|
||||
## explicit
|
||||
k8s.io/legacy-cloud-providers/aws
|
||||
k8s.io/legacy-cloud-providers/gce
|
||||
|
|
@ -1332,25 +1332,25 @@ sigs.k8s.io/structured-merge-diff/v3/value
|
|||
# sigs.k8s.io/yaml v1.2.0
|
||||
## explicit
|
||||
sigs.k8s.io/yaml
|
||||
# k8s.io/api => k8s.io/api v0.18.1
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.18.1
|
||||
# k8s.io/client-go => k8s.io/client-go v0.18.1
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.1
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.18.1
|
||||
# k8s.io/apiserver => k8s.io/apiserver v0.18.1
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.1
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.1
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.1
|
||||
# k8s.io/cri-api => k8s.io/cri-api v0.18.1
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.1
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.1
|
||||
# k8s.io/component-base => k8s.io/component-base v0.18.1
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.1
|
||||
# k8s.io/metrics => k8s.io/metrics v0.18.1
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.1
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.1
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.18.1
|
||||
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.1
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.1
|
||||
# k8s.io/code-generator => k8s.io/code-generator v0.18.1
|
||||
# k8s.io/api => k8s.io/api v0.18.6
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.18.6
|
||||
# k8s.io/client-go => k8s.io/client-go v0.18.6
|
||||
# k8s.io/cloud-provider => k8s.io/cloud-provider v0.18.6
|
||||
# k8s.io/kubectl => k8s.io/kubectl v0.18.6
|
||||
# k8s.io/apiserver => k8s.io/apiserver v0.18.6
|
||||
# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.6
|
||||
# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.18.6
|
||||
# k8s.io/kube-proxy => k8s.io/kube-proxy v0.18.6
|
||||
# k8s.io/cri-api => k8s.io/cri-api v0.18.6
|
||||
# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.18.6
|
||||
# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.18.6
|
||||
# k8s.io/component-base => k8s.io/component-base v0.18.6
|
||||
# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.18.6
|
||||
# k8s.io/metrics => k8s.io/metrics v0.18.6
|
||||
# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.18.6
|
||||
# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.18.6
|
||||
# k8s.io/kubelet => k8s.io/kubelet v0.18.6
|
||||
# k8s.io/cli-runtime => k8s.io/cli-runtime v0.18.6
|
||||
# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.18.6
|
||||
# k8s.io/code-generator => k8s.io/code-generator v0.18.6
|
||||
# github.com/gophercloud/gophercloud => github.com/gophercloud/gophercloud v0.9.0
|
||||
|
|
|
|||
Loading…
Reference in New Issue