diff --git a/go.mod b/go.mod
index e6c0568c2..af17ebf63 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,6 @@ require (
github.com/emicklei/go-restful v2.15.0+incompatible // indirect
github.com/go-openapi/spec v0.20.2 // indirect
github.com/google/go-cmp v0.5.6
- github.com/googleapis/gnostic v0.5.3 // indirect
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/smartystreets/assertions v1.0.0 // indirect
@@ -15,17 +14,16 @@ require (
github.com/spf13/viper v1.8.1
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d
gotest.tools/v3 v3.0.3
- k8s.io/api v0.21.4
- k8s.io/apiextensions-apiserver v0.21.4
- k8s.io/apimachinery v0.21.4
+ k8s.io/api v0.22.5
+ k8s.io/apiextensions-apiserver v0.22.5
+ k8s.io/apimachinery v0.22.5
k8s.io/cli-runtime v0.21.4
- k8s.io/client-go v0.21.4
- k8s.io/code-generator v0.21.4
- k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 // indirect
- knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455
+ k8s.io/client-go v0.22.5
+ k8s.io/code-generator v0.22.5
+ knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d
knative.dev/hack v0.0.0-20211222071919-abd085fc43de
- knative.dev/networking v0.0.0-20211223013028-62388a5f2853
- knative.dev/pkg v0.0.0-20211216142117-79271798f696
- knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7
+ knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf
+ knative.dev/pkg v0.0.0-20220105211333-96f18522d78d
+ knative.dev/serving v0.28.1-0.20220107170125-03091748d279
sigs.k8s.io/yaml v1.3.0
)
diff --git a/go.sum b/go.sum
index 52a8c12e0..cec4e4336 100644
--- a/go.sum
+++ b/go.sum
@@ -57,14 +57,18 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v43.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
@@ -76,6 +80,7 @@ github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsI
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
@@ -128,6 +133,8 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96 h1:2P/dm3KbCLnRHQN/Ma50elhMx1Si9loEZe5hOrsuvuE=
+github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -136,6 +143,7 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -166,6 +174,8 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
@@ -183,9 +193,11 @@ github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJ
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc=
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.4.1/go.mod h1:lhEpxMrIUkeu9rVRgoAbyqZ8GR8Hd3DUy+thHUxAHoI=
+github.com/cloudevents/sdk-go/sql/v2 v2.8.0 h1:gWednxJHL0Ycf93XeEFyQxYj81A7b4eNwkzjNxGunAM=
+github.com/cloudevents/sdk-go/sql/v2 v2.8.0/go.mod h1:u9acNJbhmi1wnDJro4PEAqbr4N1LTCyEUClErxbPS1A=
github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY=
-github.com/cloudevents/sdk-go/v2 v2.7.0 h1:Pt+cOKWNG0tZZKRzuvfVsxcWArO0eq/UPKUxskyuSb8=
-github.com/cloudevents/sdk-go/v2 v2.7.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
+github.com/cloudevents/sdk-go/v2 v2.8.0 h1:kmRaLbsafZmidZ0rZ6h7WOMqCkRMcVTLV5lxV/HKQ9Y=
+github.com/cloudevents/sdk-go/v2 v2.8.0/go.mod h1:GpCBmUj7DIRiDhVvsK5d6WCbgTWs8DxAWTRtAwQmIXs=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@@ -195,6 +207,9 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
@@ -366,12 +381,15 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -379,6 +397,7 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
@@ -451,6 +470,7 @@ github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
@@ -525,8 +545,9 @@ github.com/gonum/mathext v0.0.0-20181121095525-8a4bf007ea55/go.mod h1:fmo8aiSEWk
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=
github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b/go.mod h1:Z4GIJBJO3Wa4gD4vbwQxXXZ+WHmW6E9ixmNrwvs0iZs=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -583,8 +604,9 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/googleapis/gnostic v0.5.3 h1:2qsuRm+bzgwSIKikigPASa2GhW8H2Dn4Qq7UxD8K/48=
-github.com/googleapis/gnostic v0.5.3/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -604,6 +626,7 @@ github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@@ -667,6 +690,7 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
@@ -758,6 +782,7 @@ github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2J
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -788,6 +813,7 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
@@ -826,6 +852,7 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ=
github.com/openzipkin/zipkin-go v0.3.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ=
@@ -934,6 +961,7 @@ github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:s
github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
@@ -991,6 +1019,7 @@ github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo=
github.com/tsenart/go-tsz v0.0.0-20180814235614-0bd30b3df1c3/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo=
github.com/tsenart/vegeta/v12 v12.8.4/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho=
@@ -1039,10 +1068,15 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@@ -1055,7 +1089,17 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
-go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
@@ -1065,6 +1109,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
@@ -1197,8 +1242,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211118161319-6a13c67c3ce4/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211205041911-012df41ee64c h1:7SfqwP5fxEtl/P02w5IhKc86ziJ+A25yFrkVgoy2FT8=
golang.org/x/net v0.0.0-20211205041911-012df41ee64c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1300,6 +1346,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1383,6 +1430,7 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1505,6 +1553,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1655,23 +1704,27 @@ k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s=
-k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc=
k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk=
-k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU=
+k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk=
+k8s.io/apiextensions-apiserver v0.22.5 h1:ML0QqT7FIlmZHN+9+2EtARJ3cJVHeoizt6GCteFRE0o=
+k8s.io/apiextensions-apiserver v0.22.5/go.mod h1:tIXeZ0BrDxUb1PoAz+tgOz43Zi1Bp4BEEqVtUccMJbE=
k8s.io/apimachinery v0.19.7/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apimachinery v0.21.1/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
-k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw=
k8s.io/apimachinery v0.21.4/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
+k8s.io/apimachinery v0.22.5 h1:cIPwldOYm1Slq9VLBRPtEYpyhjIm1C6aAMAoENuvN9s=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
k8s.io/cli-runtime v0.21.4 h1:kvOzx6dKg+9wRuHTzSqo8tfTV6ixZCkmi+ag54s7mn8=
k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
@@ -1679,16 +1732,19 @@ k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs=
-k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc=
k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew=
+k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
k8s.io/cloud-provider v0.21.0/go.mod h1:z17TQgu3JgUFjcgby8sj5X86YdVK5Pbt+jm/eYMZU9M=
-k8s.io/code-generator v0.21.4 h1:vO8jVuEGV4UF+/2s/88Qg05MokE/1QUFi/Q2YDgz++A=
k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
+k8s.io/code-generator v0.22.5 h1:jn+mYXI5q7rzo7Bz/n8xZIgbe61SeXlIjU5jA8jLVps=
+k8s.io/code-generator v0.22.5/go.mod h1:sbdWCOVob+KaQ5O7xs8PNNaCTpbWVqNgA6EPwLOmRNk=
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw=
k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
k8s.io/controller-manager v0.21.0/go.mod h1:Ohy0GRNRKPVjB8C8G+dV+4aPn26m8HYUI6ejloUBvUA=
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
@@ -1706,34 +1762,36 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
-k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-knative.dev/caching v0.0.0-20211206133228-c29dc56d8f03/go.mod h1:xki+LBTL1riXSoU2dKznqUfgOlQ2eO/F1WF+GMXxH0k=
-knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455 h1:LnatQYBFh/tum+ATTVZKB1xV5UxwvA2bhFZUGPSve6I=
-knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455/go.mod h1:4o3oerr1tmjWTV2n33Ar9Ss+jF/QksOsa4/81ghhOVg=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+knative.dev/caching v0.0.0-20220105210833-30ba0b6609c6/go.mod h1:SgmDCiZNxriP5d1ie3cDZ+/3nXVk8cCd7dEVPWz0beo=
+knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d h1:SWnDa8RmF1jti2oM+lOXgUmvI0D0ohCw677OsVIbqLw=
+knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d/go.mod h1:rJnn9hsSYQ89SS31Sjxjzj5OVCjzCXtdUDaXfYEmnvQ=
knative.dev/hack v0.0.0-20211122162614-813559cefdda/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack v0.0.0-20211222071919-abd085fc43de h1:K7UeyvIfdTjznffAZg2L4fDkOuFWEDiaxgEa+B33nP8=
knative.dev/hack v0.0.0-20211222071919-abd085fc43de/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI=
knative.dev/hack/schema v0.0.0-20211222071919-abd085fc43de/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0=
-knative.dev/networking v0.0.0-20211209101835-8ef631418fc0/go.mod h1:+ozCw7PVf//G9+HOW04hfWnU8UJE5fmWAQkb+ieMaXY=
-knative.dev/networking v0.0.0-20211223013028-62388a5f2853 h1:VZ/yJoR/eiyI/wyo1JNEgpRFyRaqGD8paBYOH1gU/nQ=
-knative.dev/networking v0.0.0-20211223013028-62388a5f2853/go.mod h1:NTfJpL2xQVdJtdPYuIE2j7rxC4/Cttplh1g0oYqQJFE=
+knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf h1:1RqxCIJBwvpahPVNCfxEk4Z/z7nHgmBhPp7ba9A1My0=
+knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf/go.mod h1:kXbsW1qHQcwHyd7qV1bHeYxGysR6XHh/hCkEvk28R/s=
knative.dev/pkg v0.0.0-20211206113427-18589ac7627e/go.mod h1:E6B4RTjZyxe55a0kxOlnEHEl71zuG7gghnqYvNBKwBw=
-knative.dev/pkg v0.0.0-20211216142117-79271798f696 h1:L/r5prSBhm+7x4br5g8Gij/OfF4nx12sorqMXCcnpm0=
-knative.dev/pkg v0.0.0-20211216142117-79271798f696/go.mod h1:hrD91/shO1o4KMZa4oWhnbRPmVJhvq86TLy/STF/qf8=
+knative.dev/pkg v0.0.0-20220104185830-52e42b760b54/go.mod h1:189cvGP0mwpqwZGFrLk5WuERIsNI/J6HuQ1CIX7SXxY=
+knative.dev/pkg v0.0.0-20220105211333-96f18522d78d h1:KqTqUP+w382CaI7NdIGaFLSI0qq2vo4QT93zzbsLnYY=
+knative.dev/pkg v0.0.0-20220105211333-96f18522d78d/go.mod h1:189cvGP0mwpqwZGFrLk5WuERIsNI/J6HuQ1CIX7SXxY=
knative.dev/reconciler-test v0.0.0-20211222120418-816f2192fec9/go.mod h1:dCq1Fuu+eUISdnxABMvoDhefF91DYwE6O3rTYTraXbw=
-knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7 h1:vkv/sstZZtV9al/ZJ84l8TyWTLPGWZOpk7Ke9d6itBg=
-knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7/go.mod h1:1d8YYUu0hY19KlIRs2SgAn/o64Hr265+3fhOtV3FFVA=
+knative.dev/serving v0.28.1-0.20220107170125-03091748d279 h1:oojha0ReZFGPreNwVRZUR25Uelc6jobehximut1C+yY=
+knative.dev/serving v0.28.1-0.20220107170125-03091748d279/go.mod h1:BzDqCMZ1YCvv2cnGdOT0/JbvrQCiu1Vzd7YNDirgr2M=
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
diff --git a/third_party/VENDOR-LICENSE/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE b/third_party/VENDOR-LICENSE/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
new file mode 100644
index 000000000..52cf18e42
--- /dev/null
+++ b/third_party/VENDOR-LICENSE/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/VENDOR-LICENSE/github.com/cloudevents/sdk-go/sql/v2/LICENSE b/third_party/VENDOR-LICENSE/github.com/cloudevents/sdk-go/sql/v2/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/third_party/VENDOR-LICENSE/github.com/cloudevents/sdk-go/sql/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/VENDOR-LICENSE/k8s.io/apimachinery/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/LICENSE
similarity index 100%
rename from third_party/VENDOR-LICENSE/k8s.io/apimachinery/LICENSE
rename to third_party/VENDOR-LICENSE/k8s.io/apimachinery/pkg/LICENSE
diff --git a/third_party/VENDOR-LICENSE/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/third_party/forked/golang/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/third_party/VENDOR-LICENSE/k8s.io/apimachinery/third_party/forked/golang/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/VENDOR-LICENSE/k8s.io/client-go/third_party/forked/golang/template/LICENSE b/third_party/VENDOR-LICENSE/k8s.io/client-go/third_party/forked/golang/template/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/third_party/VENDOR-LICENSE/k8s.io/client-go/third_party/forked/golang/template/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
new file mode 100644
index 000000000..52cf18e42
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
new file mode 100644
index 000000000..1592212e1
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNInvalidAltNumber int
+
+type ATN struct {
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Used to build DFA predictors for them.
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ states []ATNState
+}
+
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes the set of valid tokens that can occur starting
+// in s and staying in same rule. Token.EPSILON is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ if s.GetNextTokenWithinRule() != nil {
+ return s.GetNextTokenWithinRule()
+ }
+
+ s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil))
+ s.GetNextTokenWithinRule().readOnly = true
+
+ return s.GetNextTokenWithinRule()
+}
+
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
new file mode 100644
index 000000000..97ba417f7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
@@ -0,0 +1,295 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type comparable interface {
+ equals(other interface{}) bool
+}
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ comparable
+
+ hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) equals(o interface{}) bool {
+ if b == o {
+ return true
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+func (b *BaseATNConfig) hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func (l *LexerATNConfig) hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.GetStateNumber())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.hash())
+ h = murmurUpdate(h, l.semanticContext.hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+func (l *LexerATNConfig) equals(other interface{}) bool {
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.equals(othert.BaseATNConfig)
+}
+
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
new file mode 100644
index 000000000..51d6ab82a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
@@ -0,0 +1,407 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type ATNConfigSet interface {
+ hash() int
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() Set
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Equals(other interface{}) bool
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ Alts() *BitSet
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup Set
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func (b *BaseATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: NewArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing := b.configLookup.Add(config).(ATNConfig)
+
+ if existing == config {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() Set {
+ states := NewArray2DHashSet(nil, nil)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Add(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+func (b *BaseATNConfigSet) Equals(other interface{}) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.hash()
+ }
+ return h
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = NewArray2DHashSet(nil, equalATNConfigs)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ b.configLookup = NewArray2DHashSet(nil, nil)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func hashATNConfig(i interface{}) int {
+ o := i.(ATNConfig)
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().hash()
+ return hash
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
+ return false
+ }
+
+ if ai.GetAlt() != bi.GetAlt() {
+ return false
+ }
+
+ return ai.GetSemanticContext().equals(bi.GetSemanticContext())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
new file mode 100644
index 000000000..18b89efaf
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+
+ if CopyFrom != nil {
+ o.readOnly = CopyFrom.readOnly
+ o.verifyATN = CopyFrom.verifyATN
+ o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions
+ }
+
+ return o
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
new file mode 100644
index 000000000..884d39cf7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
@@ -0,0 +1,828 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "encoding/hex"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf16"
+)
+
+// This is the earliest supported serialized UUID.
+// stick to serialized version for now, we don't need a UUID instance
+var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E"
+var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089"
+
+// This list contains all of the currently supported UUIDs, ordered by when
+// the feature first appeared in this branch.
+var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP}
+
+var SerializedVersion = 3
+
+// This is the current serialized UUID.
+var SerializedUUID = AddedUnicodeSMP
+
+type LoopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type BlockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ deserializationOptions *ATNDeserializationOptions
+ data []rune
+ pos int
+ uuid string
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = ATNDeserializationOptionsdefaultOptions
+ }
+
+ return &ATNDeserializer{deserializationOptions: options}
+}
+
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+// isFeatureSupported determines if a particular serialized representation of an
+// ATN supports a particular feature, identified by the UUID used for
+// serializing the ATN at the time the feature was first introduced. Feature is
+// the UUID marking the first time the feature was supported in the serialized
+// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently
+// being deserialized. It returns true if actualUuid represents a serialized ATN
+// at or after the feature identified by feature was introduced, and otherwise
+// false.
+func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool {
+ idx1 := stringInSlice(feature, SupportedUUIDs)
+
+ if idx1 < 0 {
+ return false
+ }
+
+ idx2 := stringInSlice(actualUUID, SupportedUUIDs)
+
+ return idx2 >= idx1
+}
+
+func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN {
+ a.reset(utf16.Decode(data))
+ a.checkVersion()
+ a.checkUUID()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := make([]*IntervalSet, 0)
+
+ // First, deserialize sets with 16-bit arguments <= U+FFFF.
+ sets = a.readSets(atn, sets, a.readInt)
+ // Next, if the ATN was serialized with the Unicode SMP feature,
+ // deserialize sets with 32-bit arguments <= U+10FFFF.
+ if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) {
+ sets = a.readSets(atn, sets, a.readInt32)
+ }
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) reset(data []rune) {
+ temp := make([]rune, len(data))
+
+ for i, c := range data {
+ // Don't adjust the first value since that's the version number
+ if i == 0 {
+ temp[i] = c
+ } else if c > 1 {
+ temp[i] = c - 2
+ } else {
+ temp[i] = c + 65533
+ }
+ }
+
+ a.data = temp
+ a.pos = 0
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != SerializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) checkUUID() {
+ uuid := a.readUUID()
+
+ if stringInSlice(uuid, SupportedUUIDs) < 0 {
+ panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).")
+ }
+
+ a.uuid = uuid
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ loopBackStateNumbers := make([]LoopEndStateIntPair, 0)
+ endStateNumbers := make([]BlockStartStateIntPair, 0)
+
+ nstates := a.readInt()
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ if ruleIndex == 0xFFFF {
+ ruleIndex = -1
+ }
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for j := 0; j < len(loopBackStateNumbers); j++ {
+ pair := loopBackStateNumbers[j]
+
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for j := 0; j < len(endStateNumbers); j++ {
+ pair := endStateNumbers[j]
+
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0)
+
+ for i := 0; i < nrules; i++ {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ if tokenType == 0xFFFF {
+ tokenType = TokenEOF
+ }
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0)
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+
+ for i := 0; i < nmodes; i++ {
+ s := a.readInt()
+
+ atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState))
+ }
+}
+
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet {
+ m := a.readInt()
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := readUnicode()
+ i2 := readUnicode()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ var t, ok = state.GetTransitions()[j].(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if t.precedence == 0 {
+ outermostPrecedenceReturn = t.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if s2, ok := state.(*BaseBlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.endState == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.endState.startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.endState.startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for j := 0; j < len(s2.GetTransitions()); j++ {
+ target := s2.GetTransitions()[j].getTarget()
+
+ if t2, ok := target.(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for j := 0; j < len(s2.GetTransitions()); j++ {
+ target := s2.GetTransitions()[j].getTarget()
+
+ if t2, ok := target.(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil)
+
+ for i := 0; i < count; i++ {
+ actionType := a.readInt()
+ data1 := a.readInt()
+
+ if data1 == 0xFFFF {
+ data1 = -1
+ }
+
+ data2 := a.readInt()
+
+ if data2 == 0xFFFF {
+ data2 = -1
+ }
+
+ lexerAction := a.lexerActionFactory(actionType, data1, data2)
+
+ atn.lexerActions[i] = lexerAction
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the ATN to determine if a ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.deserializationOptions.verifyATN {
+ return
+ }
+
+ // Verify assumptions
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2 := state.(type) {
+ case *StarBlockStartState:
+ var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok2, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok2, "")
+ a.checkCondition(s3.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok2, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case *BaseBlockStartState:
+ a.checkCondition(s2.endState != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v)
+}
+
+func (a *ATNDeserializer) readInt32() int {
+ var low = a.readInt()
+ var high = a.readInt()
+ return low | (high << 16)
+}
+
+//TODO
+//func (a *ATNDeserializer) readLong() int64 {
+// panic("Not implemented")
+// var low = a.readInt32()
+// var high = a.readInt32()
+// return (low & 0x00000000FFFFFFFF) | (high << int32)
+//}
+
+func createByteToHex() []string {
+ bth := make([]string, 256)
+
+ for i := 0; i < 256; i++ {
+ bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)}))
+ }
+
+ return bth
+}
+
+var byteToHex = createByteToHex()
+
+func (a *ATNDeserializer) readUUID() string {
+ bb := make([]int, 16)
+
+ for i := 7; i >= 0; i-- {
+ integer := a.readInt()
+
+ bb[(2*i)+1] = integer & 0xFF
+ bb[2*i] = (integer >> 8) & 0xFF
+ }
+
+ return byteToHex[bb[0]] + byteToHex[bb[1]] +
+ byteToHex[bb[2]] + byteToHex[bb[3]] + "-" +
+ byteToHex[bb[4]] + byteToHex[bb[5]] + "-" +
+ byteToHex[bb[6]] + byteToHex[bb[7]] + "-" +
+ byteToHex[bb[8]] + byteToHex[bb[9]] + "-" +
+ byteToHex[bb[10]] + byteToHex[bb[11]] +
+ byteToHex[bb[12]] + byteToHex[bb[13]] +
+ byteToHex[bb[14]] + byteToHex[bb[15]]
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
new file mode 100644
index 000000000..d5454d6d5
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ visited := make(map[PredictionContext]PredictionContext)
+
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
new file mode 100644
index 000000000..563d5db38
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
@@ -0,0 +1,386 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ hash() int
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewBaseATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) equals(other interface{}) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ as.epsilonOnlyTransitions = false
+ }
+
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+}
+
+type BasicState struct {
+ *BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ *BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ *BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
+}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ *BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ *BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
+}
+
+type RuleStartState struct {
+ *BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ *BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ *BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
+}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
+}
+
+type StarLoopbackState struct {
+ *BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
+}
+
+type StarLoopEntryState struct {
+ *BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{BaseDecisionState: b}
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ *BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ *BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
new file mode 100644
index 000000000..a7b48976b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
new file mode 100644
index 000000000..70c1207f7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(*Interval) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
new file mode 100644
index 000000000..330ff8f31
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
new file mode 100644
index 000000000..c90e9b890
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index indexs into tokens of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of IntStream for a description of initializing methods.
+ index int
+
+ // tokenSource is the TokenSource from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens is all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(marker int) {}
+
+func (c *CommonTokenStream) reset() {
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ return c.GetTextFromInterval(nil)
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
+ c.lazyInit()
+ c.Fill()
+
+ if interval == nil {
+ interval = NewInterval(0, len(c.tokens)-1)
+ }
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
new file mode 100644
index 000000000..770c32a6b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
@@ -0,0 +1,183 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "sort"
+ "sync"
+)
+
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there.
+ states map[int]*DFAState
+ statesMu sync.RWMutex
+
+ s0 *DFAState
+ s0Mu sync.RWMutex
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+ precedenceDfaMu sync.RWMutex
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ return &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: make(map[int]*DFAState),
+ }
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
+ return nil
+ }
+
+ return d.getS0().getIthEdge(precedence)
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ s0 := d.getS0()
+ if precedence >= s0.numEdges() {
+ edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
+ s0.setEdges(edges)
+ d.setS0(s0)
+ }
+
+ s0.setIthEdge(precedence, startState)
+}
+
+func (d *DFA) getPrecedenceDfa() bool {
+ d.precedenceDfaMu.RLock()
+ defer d.precedenceDfaMu.RUnlock()
+ return d.precedenceDfa
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.getPrecedenceDfa() != precedenceDfa {
+ d.setStates(make(map[int]*DFAState))
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
+ precedenceState.setEdges(make([]*DFAState, 0))
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.setS0(precedenceState)
+ } else {
+ d.setS0(nil)
+ }
+
+ d.precedenceDfaMu.Lock()
+ defer d.precedenceDfaMu.Unlock()
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+func (d *DFA) getS0() *DFAState {
+ d.s0Mu.RLock()
+ defer d.s0Mu.RUnlock()
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0Mu.Lock()
+ defer d.s0Mu.Unlock()
+ d.s0 = s
+}
+
+func (d *DFA) getState(hash int) (*DFAState, bool) {
+ d.statesMu.RLock()
+ defer d.statesMu.RUnlock()
+ s, ok := d.states[hash]
+ return s, ok
+}
+
+func (d *DFA) setStates(states map[int]*DFAState) {
+ d.statesMu.Lock()
+ defer d.statesMu.Unlock()
+ d.states = states
+}
+
+func (d *DFA) setState(hash int, state *DFAState) {
+ d.statesMu.Lock()
+ defer d.statesMu.Unlock()
+ d.states[hash] = state
+}
+
+func (d *DFA) numStates() int {
+ d.statesMu.RLock()
+ defer d.statesMu.RUnlock()
+ return len(d.states)
+}
+
+type dfaStateList []*DFAState
+
+func (d dfaStateList) Len() int { return len(d) }
+func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
+func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
+
+// sortedStates returns the states in d sorted by their state number.
+func (d *DFA) sortedStates() []*DFAState {
+ vs := make([]*DFAState, 0, len(d.states))
+
+ for _, v := range d.states {
+ vs = append(vs, v)
+ }
+
+ sort.Sort(dfaStateList(vs))
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
new file mode 100644
index 000000000..bf2ccc06c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// DFASerializer is a DFA walker that knows how to dump them to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(i))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
new file mode 100644
index 000000000..3a81706ad
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
@@ -0,0 +1,183 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "sync"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1a2..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+ edgesMu sync.RWMutex
+
+ isAcceptState bool
+
+ // prediction is the ttype we match or alt we predict if the state is accept.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewBaseATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() Set {
+ alts := NewArray2DHashSet(nil, nil)
+
+ if d.configs != nil {
+ for _, c := range d.configs.GetItems() {
+ alts.Add(c.GetAlt())
+ }
+ }
+
+ if alts.Len() == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) getEdges() []*DFAState {
+ d.edgesMu.RLock()
+ defer d.edgesMu.RUnlock()
+ return d.edges
+}
+
+func (d *DFAState) numEdges() int {
+ d.edgesMu.RLock()
+ defer d.edgesMu.RUnlock()
+ return len(d.edges)
+}
+
+func (d *DFAState) getIthEdge(i int) *DFAState {
+ d.edgesMu.RLock()
+ defer d.edgesMu.RUnlock()
+ return d.edges[i]
+}
+
+func (d *DFAState) setEdges(newEdges []*DFAState) {
+ d.edgesMu.Lock()
+ defer d.edgesMu.Unlock()
+ d.edges = newEdges
+}
+
+func (d *DFAState) setIthEdge(i int, edge *DFAState) {
+ d.edgesMu.Lock()
+ defer d.edgesMu.Unlock()
+ d.edges[i] = edge
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+// equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) equals(other interface{}) bool {
+ if d == other {
+ return true
+ } else if _, ok := other.(*DFAState); !ok {
+ return false
+ }
+
+ return d.configs.Equals(other.(*DFAState).configs)
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) hash() int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, d.configs.hash())
+ return murmurFinish(h, 1)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
new file mode 100644
index 000000000..1fec43d9d
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+// - Ambiguities: These are cases where more than one path through the
+// grammar can Match the input.
+// - Weak context sensitivity: These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+// - Strong (forced) context sensitivity: These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+//
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+//
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.GetItems() {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
new file mode 100644
index 000000000..028e1a9d7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+//
+// Provides a default instance of {@link ConsoleErrorListener}.
+//
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+//
+// {@inheritDoc}
+//
+//
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format.
+//
+//
+// line line:charPositionInLine msg
+//
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
new file mode 100644
index 000000000..977a6e454
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
@@ -0,0 +1,758 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ inErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
+//
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //inErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+// The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+//
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool {
+ return d.errorRecoveryMode
+}
+
+//
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+//
+// @param recognizer
+//
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+//
+// {@inheritDoc}
+//
+// The default implementation simply calls {@link //endErrorCondition}.
+//
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+//
+// {@inheritDoc}
+//
+// The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table.
+//
+//
+// - {@link NoViableAltException}: Dispatches the call to
+// {@link //ReportNoViableAlternative}
+// - {@link InputMisMatchException}: Dispatches the call to
+// {@link //ReportInputMisMatch}
+// - {@link FailedPredicateException}: Dispatches the call to
+// {@link //ReportFailedPredicate}
+// - All other types: calls {@link Parser//NotifyErrorListeners} to Report
+// the exception
+//
+//
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.inErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// {@inheritDoc}
+//
+// The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule.
+//
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.getErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
+// that the current lookahead symbol is consistent with what were expecting
+// at d point in the ATN. You can call d anytime but ANTLR only
+// generates code to check before subrules/loops and each iteration.
+//
+// Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g.,
+//
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+//
+// At the start of a sub rule upon error, {@link //Sync} performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// ORIGINS
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule
+//
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }.
+//
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.inErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ panic(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// This is called by {@link //ReportError} when the exception is a
+// {@link NoViableAltException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+//
+// This is called by {@link //ReportError} when the exception is an
+// {@link InputMisMatchException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+//
+// This is called by {@link //ReportError} when the exception is a
+// {@link FailedPredicateException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// This method is called to Report a syntax error which requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+// removed from the input stream. When d method returns,
+// {@code recognizer} is in error recovery mode.
+//
+// This method is called when {@link //singleTokenDeletion} identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.inErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// This method is called to Report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time d
+// method is called, the missing token has not yet been inserted. When d
+// method returns, {@code recognizer} is in error recovery mode.
+//
+// This method is called when {@link //singleTokenInsertion} identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.inErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}.
+//
+// EXTRA TOKEN (single token deletion)
+//
+// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the {@code LA(2)} token) as the successful result of the Match operation.
+//
+// This recovery strategy is implemented by {@link
+// //singleTokenDeletion}.
+//
+// MISSING TOKEN (single token insertion)
+//
+// If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by {@link
+// //singleTokenInsertion}.
+//
+// EXAMPLE
+//
+// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain:
+//
+//
+// stat &rarr expr &rarr atom
+//
+//
+// and it will be trying to Match the {@code ')'} at d point in the
+// derivation:
+//
+//
+// => ID '=' '(' INT ')' ('+' atom)* ''
+// ^
+//
+//
+// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+//
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ panic(NewInputMisMatchException(recognizer))
+}
+
+//
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce d behavior.
+//
+// @param recognizer the parser instance
+// @return {@code true} if single-token insertion is a viable recovery
+// strategy for the current mismatched input, otherwise {@code false}
+//
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// This method implements the single-token deletion inline error recovery
+// strategy. It is called by {@link //recoverInline} to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// {@code recognizer} will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, d method calls
+// {@link //ReportUnwantedToken} to Report the error, followed by
+// {@link Parser//consume} to actually "delete" the extraneous token. Then,
+// before returning {@link //ReportMatch} is called to signal a successful
+// Match.
+//
+// @param recognizer the parser instance
+// @return the successfully Matched {@link Token} instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise
+// {@code nil}
+//
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// Conjure up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example, x=ID {f($x)}. The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// d token is missing and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a CommonToken of the appropriate type. The text will be the token.
+// If you change what tokens must be created by the lexer,
+// override d method to create the appropriate tokens.
+//
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// Compute the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack d amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// EXAMPLE
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r//or* any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+//
+// b : c '^' INT
+// c : ID
+// | INT
+//
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input "[]", the call chain is
+//
+// a -> b -> c
+//
+// and, hence, the follow context stack is:
+//
+// depth follow set start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets--the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+//
+// and
+//
+// "A note on error recovery in recursive descent parsers":
+// http://portal.acm.org/citation.cfm?id=947902.947905
+//
+// Later, Josef Grosch had some good ideas:
+//
+// "Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers":
+// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined
+// at run-time upon error to avoid overhead during parsing.
+//
+func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+//
+// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
+// by immediately canceling the parse operation with a
+// {@link ParseCancellationException}. The implementation ensures that the
+// {@link ParserRuleContext//exception} field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+//
+// This error strategy is useful in the following scenarios.
+//
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of {@link BailErrorStrategy//Sync} improves the performance of
+// the first stage.
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+//
+//
+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
+//
+// @see Parser//setErrorHandler(ANTLRErrorStrategy)
+
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Instead of recovering from exception {@code e}, re-panic it wrapped
+// in a {@link ParseCancellationException} so it is not caught by the
+// rule func catches. Use {@link Exception//getCause()} to get the
+// original {@link RecognitionException}.
+//
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ context = context.GetParent().(ParserRuleContext)
+ }
+ panic(NewParseCancellationException()) // TODO we don't emit e properly
+}
+
+// Make sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+//
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Make sure we don't attempt to recover from problems in subrules.//
+func (b *BailErrorStrategy) Sync(recognizer Parser) {
+ // pass
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
new file mode 100644
index 000000000..2ef74926e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+ // The current {@link Token} when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ t.offendingToken = nil
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For {@link NoViableAltException} and
+ // {@link LexerNoViableAltException} exceptions, this is the
+ // {@link DecisionState} number. For others, it is the state whose outgoing
+ // edge we couldn't Match.
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+// If the state number is not known, b method returns -1.
+
+//
+// Gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time b exception was panicn.
+//
+// If the set of expected tokens is not known and could not be computed,
+// b method returns {@code nil}.
+//
+// @return The set of token types that could potentially follow the current
+// state in the ATN, or {@code nil} if the information is not available.
+// /
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs ATNConfigSet
+}
+
+// Indicates that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error. Reported by ReportNoViableAlternative()
+//
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)?//
+ n.deadEndConfigs = deadEndConfigs
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it. (At the
+ // time the error occurred, of course the stream needs to keep a
+ // buffer all of the tokens but later we might not have access to those.)
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// This signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+//
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// A semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
new file mode 100644
index 000000000..842170c08
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "io"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ *InputStream
+
+ filename string
+}
+
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ buf := bytes.NewBuffer(nil)
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ _, err = io.Copy(buf, f)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := new(FileStream)
+
+ fs.filename = fileName
+ s := string(buf.Bytes())
+
+ fs.InputStream = NewInputStream(s)
+
+ return fs, nil
+
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/go.mod b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/go.mod
new file mode 100644
index 000000000..004cf3465
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/go.mod
@@ -0,0 +1,3 @@
+module github.com/antlr/antlr4/runtime/Go/antlr
+
+go 1.16
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
new file mode 100644
index 000000000..5ff270f53
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+func NewInputStream(data string) *InputStream {
+
+ is := new(InputStream)
+
+ is.name = ""
+ is.index = 0
+ is.data = []rune(data)
+ is.size = len(is.data) // number of runes
+
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// mark/release do nothing we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+func (is *InputStream) Release(marker int) {
+}
+
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i *Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return "Obtained from string"
+}
+
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
new file mode 100644
index 000000000..438e0ea6e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
new file mode 100644
index 000000000..1e9393adb
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
@@ -0,0 +1,308 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+/* stop is not included! */
+func NewInterval(start, stop int) *Interval {
+ i := new(Interval)
+
+ i.Start = start
+ i.Stop = stop
+ return i
+}
+
+func (i *Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+func (i *Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+func (i *Interval) length() int {
+ return i.Stop - i.Start
+}
+
+type IntervalSet struct {
+ intervals []*Interval
+ readOnly bool
+}
+
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v *Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]*Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ len := 0
+
+ for _, v := range i.intervals {
+ len += v.length()
+ }
+
+ return len
+}
+
+func (i *IntervalSet) removeRange(v *Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ var sb strings.Builder
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(v.Stop - 1))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
new file mode 100644
index 000000000..b04f04572
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
@@ -0,0 +1,418 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something nonnil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// Return a token from l source i.e., Match a token on the char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+ ttype := LexerSkip
+
+ ttype = b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+
+ return nil
+}
+
+// Instruct the lexer to Skip creating a token for current lexer rule
+// and look for another token. NextToken() knows to keep looking when
+// a lexer rule finishes with token set to SKIPTOKEN. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+// /
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+func (b *BaseLexer) PushMode(m int) {
+ if LexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// By default does not support multiple emits per NextToken invocation
+// for efficiency reasons. Subclass and override l method, NextToken,
+// and GetToken (to push tokens into a list and pull from that list
+// rather than a single variable as l implementation does).
+// /
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// The standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override l method to emit
+// custom Token objects or provide a Newfactory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// What is the index of the current character of lookahead?///
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// Return the text Matched so far for the current token or any text override.
+//Set the complete text of l token it wipes any previous changes to the text.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// Return a list of all Token objects in input char stream.
+// Forces load of all tokens. Does not include EOF token.
+// /
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Lexers can normally Match any char in it's vocabulary after Matching
+// a token, so do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+// /
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
new file mode 100644
index 000000000..5a325be13
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
@@ -0,0 +1,430 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
+ LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
+ LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
+ LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
+ LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
+ LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
+ LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
+ LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ hash() int
+ equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(lexer Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) hash() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) equals(other LexerAction) bool {
+ return b == other
+}
+
+//
+// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
+//
+// The {@code Skip} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// Provides a singleton instance of l parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// Implements the {@code pushMode} lexer action by calling
+// {@link Lexer//pushMode} with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
+//
+// The {@code popMode} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerPopModeAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerPopModeAction() *LexerPopModeAction {
+
+ l := new(LexerPopModeAction)
+
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
+
+ return l
+}
+
+var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
+
+// This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerPopModeAction) execute(lexer Lexer) {
+ lexer.PopMode()
+}
+
+func (l *LexerPopModeAction) String() string {
+ return "popMode"
+}
+
+// Implements the {@code more} lexer action by calling {@link Lexer//more}.
+//
+// The {@code more} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+
+type LexerMoreAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerMoreAction() *LexerMoreAction {
+ l := new(LexerMoreAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
+
+ return l
+}
+
+var LexerMoreActionINSTANCE = NewLexerMoreAction()
+
+// This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerMoreAction) execute(lexer Lexer) {
+ lexer.More()
+}
+
+func (l *LexerMoreAction) String() string {
+ return "more"
+}
+
+// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
+// the assigned mode.
+type LexerModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerModeAction(mode int) *LexerModeAction {
+ l := new(LexerModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
+ l.mode = mode
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+// This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+// Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// Implements the {@code channel} lexer action by calling
+// {@link Lexer//setChannel} with the assigned channel.
+// Constructs a New{@code channel} action with the specified channel value.
+// @param channel The channel value to pass to {@link Lexer//setChannel}.
+type LexerChannelAction struct {
+ *BaseLexerAction
+
+ channel int
+}
+
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+// This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+// Constructs a Newindexed custom action by associating a character offset
+// with a {@link LexerAction}.
+//
+// Note: This class is only required for lexer actions for which
+// {@link LexerAction//isPositionDependent} returns {@code true}.
+//
+// @param offset The offset into the input {@link CharStream}, relative to
+// the token start index, at which the specified lexer action should be
+// executed.
+// @param action The lexer action to execute at a particular offset in the
+// input {@link CharStream}.
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+// This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.hash())
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
new file mode 100644
index 000000000..056941dd6
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+// The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link LexerATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(57)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
+ }
+
+ return l
+}
+
+// Creates a {@link LexerActionExecutor} which executes the actions for
+// the input {@code lexerActionExecutor} followed by a specified
+// {@code lexerAction}.
+//
+// @param lexerActionExecutor The executor for actions already traversed by
+// the lexer while Matching a token within a particular
+// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
+// though it were an empty executor.
+// @param lexerAction The lexer action to execute after the actions
+// specified in {@code lexerActionExecutor}.
+//
+// @return A {@link LexerActionExecutor} for executing the combine actions
+// of {@code lexerActionExecutor} and {@code lexerAction}.
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// Creates a {@link LexerActionExecutor} which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
+// {@link IntStream//seek} on the input {@link CharStream} to set the input
+// position to the end of the current token. This behavior provides
+// for efficient DFA representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the ATN, the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the DFA representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns {@code this}.
+//
+// @param offset The current offset to assign to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// @return A {@link LexerActionExecutor} which stores input stream offsets
+// for all position-dependent lexer actions.
+// /
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0)
+
+ for _, a := range l.lexerActions {
+ updatedLexerActions = append(updatedLexerActions, a)
+ }
+ }
+
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+// This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) hash() int {
+ if l == nil {
+ return 61
+ }
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
new file mode 100644
index 000000000..730bea47a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
@@ -0,0 +1,665 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ LexerATNSimulatorDebug = false
+ LexerATNSimulatorDFADebug = false
+
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ *BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache DoubleDict
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := new(LexerATNSimulator)
+
+ l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+ // line number 1..n within the input///
+ l.Line = 1
+ // The index of the character relative to the beginning of the line
+ // 0..n-1///
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+ // done
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ if dfa.getS0() == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, dfa.getS0())
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure)
+
+ if !suppressEdge {
+ l.decisionToDFA[l.mode].setS0(next)
+ }
+
+ predict := l.execATN(input, next)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if LexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes Newsrc/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if s.getEdges() == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
+ if LexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param input The input stream
+// @param s The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, l method
+// returns {@link //ERROR}.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a failover from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// Given a starting configuration set, figure out all ATN configurations
+// we can reach upon input {@code t}. Parameter {@code reach} is a return
+// parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a config that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.GetItems() {
+ currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
+ if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if LexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := (t == TokenEOF)
+ config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if LexerATNSimulatorDebug {
+ fmt.Printf("ACTION %v\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// Since the alternatives within any lexer decision are ordered by
+// preference, l method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from {@code config}, all other (potentially reachable) states for
+// l rule would have a lower priority.
+//
+// @return {@code true} if an accept state is reached, otherwise
+// {@code false}.
+func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if LexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
+ configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
+
+ var cfg *LexerATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.SetHasSemanticContext(true)
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In l case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// Evaluate a predicate specified in the lexer.
+//
+// If {@code speculative} is {@code true}, l method was called before
+// {@link //consume} for the Matched character. This method should call
+// {@link //consume} before evaluating the predicate to ensure position
+// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
+// and {@link Lexer//getcolumn}, properly reflect the current
+// lexer state. This method should restore {@code input} and the simulator
+// to the original state before returning (i.e. undo the actions made by the
+// call to {@link //consume}.
+//
+// @param input The input stream.
+// @param ruleIndex The rule containing the predicate.
+// @param predIndex The index of the predicate within the rule.
+// @param speculative {@code true} if the current index in {@code input} is
+// one character before the predicate's location.
+//
+// @return {@code true} if the specified predicate evaluates to
+// {@code true}.
+// /
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ // /
+ suppressEdge := cfgs.HasSemanticContext()
+ cfgs.SetHasSemanticContext(false)
+
+ to = l.addDFAState(cfgs)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ if from.getEdges() == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
+ }
+ from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState ATNConfig
+
+ for _, cfg := range configs.GetItems() {
+
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ hash := proposed.hash()
+ dfa := l.decisionToDFA[l.mode]
+ existing, ok := dfa.getState(hash)
+ if ok {
+ return existing
+ }
+ newState := proposed
+ newState.stateNumber = dfa.numStates()
+ configs.SetReadOnly(true)
+ newState.configs = configs
+ dfa.setState(hash, newState)
+ return newState
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// Get the text Matched so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(tt))
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
new file mode 100644
index 000000000..ebd43e456
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
@@ -0,0 +1,212 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+//* Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+///
+const (
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+//*
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+ look[alt] = NewIntervalSet()
+ lookBusy := NewArray2DHashSet(nil, nil)
+ seeThruPreds := false // fail to get lookahead upon pred
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
+ // Wipe out lookahead for la alternative if we found nothing
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and the end of the rule containing
+// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
+// If {@code ctx} is not {@code nil} and the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx the complete parser context, or {@code nil} if the context
+// should be ignored
+//
+// @return The set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+///
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ seeThruPreds := true // ignore preds get all lookahead
+ var lookContext PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, NewArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewBaseATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ lookBusy.Add(c)
+
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx != BasePredictionContextEMPTY {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
new file mode 100644
index 000000000..fb60258e3
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
@@ -0,0 +1,718 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// p.is all the parsing support code essentially most of it is error
+// recovery stuff.//
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ // The {@link ParserRuleContext} object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+ // Specifies whether or not the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+ // When {@link //setTrace}{@code (true)} is called, a reference to the
+ // {@link TraceListener} is stored here so it can be easily removed in a
+ // later call to {@link //setTrace}{@code (false)}. The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+ // The list of {@link ParseTreeListener} listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time {@link //NotifyErrorListeners} is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// p.field maps from the serialized ATN string to the deserialized {@link
+// ATN} with
+// bypass alternatives.
+//
+// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
+//
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// Registers {@code listener} to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+//
+//
+// @param listener the listener to add
+//
+// @panics nilPointerException if {@code} listener is {@code nil}
+//
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+//
+// Remove {@code listener} from the list of parse listeners.
+//
+// If {@code listener} is {@code nil} or has not been added as a parse
+// listener, p.method does nothing.
+// @param listener the listener to remove
+//
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// Notify any parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+//
+// Notify any parse listeners of an exit rule event.
+//
+// @see //addParseListener
+//
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// Tell our token source and error strategy about a Newway to create tokens.//
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// The ATN with bypass alternatives is expensive to create so we create it
+// lazily.
+//
+// @panics UnsupportedOperationException if the current parser does not
+// implement the {@link //getSerializedATN()} method.
+//
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// Set the token stream and reset the parser.//
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// Match needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+//
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.inErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have Newlocalctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+//
+// Checks whether or not {@code symbol} can follow the current state in the
+// ATN. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+//
+// return getExpectedTokens().contains(symbol)
+//
+//
+// @param symbol the symbol type to check
+// @return {@code true} if {@code symbol} can follow the current state in
+// the ATN, otherwise {@code false}.
+
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// Computes the set of input symbols which could follow the current parser
+// state and context, as given by {@link //GetState} and {@link //GetContext},
+// respectively.
+//
+// @see ATN//getExpectedTokens(int, RuleContext)
+//
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// Return List<String> of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+//
+// this very useful for error messages.
+
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.numStates() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// During a parse is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. p.is for quick and dirty debugging.
+//
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
new file mode 100644
index 000000000..d7a3665be
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
@@ -0,0 +1,1538 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorListATNDecisions = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
+)
+
+type ParserATNSimulator struct {
+ *BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *DoubleDict
+ outerContext ParserRuleContext
+}
+
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := new(ParserATNSimulator)
+
+ p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
+ // isn't Synchronized but we're ok since two threads shouldn't reuse same
+ // parser/atnsim object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a)&rarrc should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // wack cache after each prediction
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ if dfa.getPrecedenceDfa() {
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = RuleContextEmpty
+ }
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ // If p is not a precedence DFA, we check the ATN start state
+ // to determine if p ATN start state is the decision for the
+ // closure block that determines whether a precedence rule
+ // should continue or complete.
+
+ t2 := dfa.atnStartState
+ t, ok := t2.(*StarLoopEntryState)
+ if !dfa.getPrecedenceDfa() && ok {
+ if t.precedenceRuleDecision {
+ dfa.setPrecedenceDfa(true)
+ }
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
+
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ }
+ alt := p.execATN(dfa, s0, input, index, outerContext)
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// Performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+// if the set is empty, there is no viable alternative for current symbol
+// does the state uniquely predict an alternative?
+// does the state have a conflict that would prevent us from
+// putting it on the work list?
+
+// We also have some key operations to do:
+// add an edge from previous DFA state to potentially NewDFA state, D,
+// upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// collecting predicates and adding semantic context to DFA accept states
+// adding rule context to context-sensitive DFA accept states
+// consuming an input symbol
+// Reporting a conflict
+// Reporting an ambiguity
+// Reporting a context sensitivity
+// Reporting insufficient predicates
+
+// cover these cases:
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
+//
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.GetConflictingAlts()
+ if D.predicates != nil {
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL failover")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if ParserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue()
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if ParserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
+ case 1:
+ return alts.minValue()
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue()
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+
+ panic("Should not have reached p state")
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ edges := previousD.getEdges()
+ if edges == nil || t+1 < 0 || t+1 >= len(edges) {
+ return nil
+ }
+
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create Newtarget state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if ParserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.SetUniqueAlt(predictedAlt)
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.GetConflictingAlts().minValue())
+ }
+ if D.isAcceptState && D.configs.HasSemanticContext() {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previous, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if ParserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.SetUniqueAlt(p.getUniqueAlt(reach))
+ // unique prediction?
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ predictedAlt = reach.GetUniqueAlt()
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have nonconflicting alt subsets as in:
+
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt
+}
+
+func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
+ if ParserATNSimulatorDebug {
+ fmt.Println("in computeReachSet, starting closure: " + closure.String())
+ }
+ if p.mergeCache == nil {
+ p.mergeCache = NewDoubleDict()
+ }
+ intermediate := NewBaseATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*BaseATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.GetItems() {
+ if ParserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewBaseATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewBaseATNConfigSet(fullCtx)
+ closureBusy := NewArray2DHashSet(nil, nil)
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet p condition whether or not it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+ if len(reach.GetItems()) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+//
+// Return a configuration set containing only the configurations from
+// {@code configs} which are in a {@link RuleStopState}. If all
+// configurations in {@code configs} are already in a rule stop state, p
+// method simply returns {@code configs}.
+//
+// When {@code lookToEndOfRule} is true, p method uses
+// {@link ATN//NextTokens} for each configuration in {@code configs} which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// @param configs the configuration set to update
+// @param lookToEndOfRule when true, p method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// {@code configs}.
+//
+// @return {@code configs} if all configurations in {@code configs} are in a
+// rule stop state, otherwise return a Newconfiguration set containing only
+// the configurations from {@code configs} which are in a rule stop state
+//
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewBaseATNConfigSet(configs.FullContext())
+ for _, config := range configs.GetItems() {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewBaseATNConfigSet(fullCtx)
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewBaseATNConfig6(target, i+1, initialContext)
+ closureBusy := NewArray2DHashSet(nil, nil)
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+//
+// This method transforms the start state computed by
+// {@link //computeStartState} to the special start state used by a
+// precedence DFA for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+//
+// - Evaluate the precedence predicates for each configuration using
+// {@link SemanticContext//evalPrecedence}.
+// - Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context. This transformation is
+// valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+//
+//
+//
+//
+// The prediction context must be considered by p filter to address
+// situations like the following.
+//
+//
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+//
+//
+// If the above grammar, the ATN state immediately before the token
+// reference {@code 'a'} in {@code letterA} is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// {@code statement}. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to {@code prog} (and then back in to {@code statement}
+// from being eliminated by the filter.
+//
+//
+// @param configs The configuration set computed by
+// {@link //computeStartState} as the start state for the DFA.
+// @return The transformed configuration set representing the start state
+// for a precedence DFA at a particular precedence level (determined by
+// calling {@link Parser//getPrecedence}).
+//
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]PredictionContext)
+ configSet := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, config := range configs.GetItems() {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.GetItems() {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.GetItems() {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // nonambig alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // unpredicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+//
+// This method is used to improve the localization of error messages by
+// choosing an alternative rather than panicing a
+// {@link NoViableAltException} in particular prediction scenarios where the
+// {@link //ERROR} state was reached during ATN simulation.
+//
+//
+// The default implementation of p method uses the following
+// algorithm to identify an ATN configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// {@link ParserRuleContext} returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+//
+// - If a syntactically valid path or paths reach the end of the decision rule and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+//
+// - Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
+//
+//
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a {@link FailedPredicateException} in
+// the parser. Specifically, p could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing p alternative within
+// {@link //AdaptivePredict} instead of panicing a
+// {@link NoViableAltException}, the resulting
+// {@link FailedPredicateException} in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+//
+// @param configs The ATN configurations which were valid immediately before
+// the {@link //ERROR} state was reached
+// @param outerContext The is the \gamma_0 initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// @return The value to return from {@link //AdaptivePredict}, or
+// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
+// identified and {@link //AdaptivePredict} should Report an error instead.
+//
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.GetItems()) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.GetItems() {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
+ succeeded := NewBaseATNConfigSet(configs.FullContext())
+ failed := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, c := range configs.GetItems() {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []ATNConfigSet{succeeded, failed}
+}
+
+// Look through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
+ fmt.Println("configs(" + configs.String() + ")")
+ if config.GetReachesIntoOuterContext() > 50 {
+ panic("problem")
+ }
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges//
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if p is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ if closureBusy.Add(c) != c {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
+ newDepth--
+ if ParserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+ if !t.getIsEpsilon() && closureBusy.Add(c) != c {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
+ if TurnOffLRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+
+ decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewBaseATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewBaseATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewBaseATNConfig4(config, t.getTarget())
+}
+
+func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewBaseATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ''
+//
+// When the ATN simulation reaches the state before '', it has a DFA
+// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
+// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
+// because alternative to has another way to continue, via [6|2|[]].
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state&rarrconfig-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd nonconflicting configuration for a different alternative:
+// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not
+// stop working on p state. In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+//
+
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.GetUniqueAlt() != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.GetUniqueAlt())
+ } else {
+ conflictingAlts = configs.GetConflictingAlts()
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil {
+ if t >= len(p.parser.GetLiteralNames()) {
+ fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
+ // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
+ } else {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in AdaptivePredict around execATN but I cut
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
+//
+func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.GetItems() {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+//
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+// If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if ParserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+
+ if ParserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+//
+// Add state {@code D} to the DFA if it is not already present, and return
+// the actual instance stored in the DFA. If a state equivalent to {@code D}
+// is already in the DFA, the existing state is returned. Otherwise p
+// method returns {@code D} after adding it to the DFA.
+//
+// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
+// does not change the DFA.
+//
+// @param dfa The dfa
+// @param D The DFA state to add
+// @return The state stored in the DFA. This will be either the existing
+// state if {@code D} is already in the DFA, or {@code D} itself if the
+// state was not already present.
+//
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+ hash := d.hash()
+ existing, ok := dfa.getState(hash)
+ if ok {
+ return existing
+ }
+ d.stateNumber = dfa.numStates()
+ if !d.configs.ReadOnly() {
+ d.configs.OptimizeConfigs(p.BaseATNSimulator)
+ d.configs.SetReadOnly(true)
+ }
+ dfa.setState(hash, d)
+ if ParserATNSimulatorDebug {
+ fmt.Println("adding NewDFA state: " + d.String())
+ }
+ return d
+}
+
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// If context sensitive parsing, we know it's ambiguity not conflict//
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
new file mode 100644
index 000000000..49cd10c5f
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
@@ -0,0 +1,362 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ *BaseRuleContext
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+
+ prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+
+ return prc
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// Double dispatch methods for listeners
+func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
+}
+
+func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that///
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// * Used by EnterOuterAlt to toss out a RuleContext previously added as
+// we entered a rule. If we have // label, we will need to remove
+// generic ruleContext object.
+// /
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
new file mode 100644
index 000000000..9fdfd52b2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
@@ -0,0 +1,751 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// Represents {@code $} in local context prediction, which means wildcard.
+// {@code//+x =//}.
+// /
+const (
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// Represents {@code $} in an array in full context mode, when {@code $}
+// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
+// {@code $} = {@link //EmptyReturnState}.
+// /
+
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+type PredictionContext interface {
+ hash() int
+ GetParent(int) PredictionContext
+ getReturnState(int) int
+ equals(PredictionContext) bool
+ length() int
+ isEmpty() bool
+ hasEmptyPath() bool
+ String() string
+}
+
+type BasePredictionContext struct {
+ cachedHash int
+}
+
+func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
+ pc := new(BasePredictionContext)
+ pc.cachedHash = cachedHash
+
+ return pc
+}
+
+func (b *BasePredictionContext) isEmpty() bool {
+ return false
+}
+
+func calculateHash(parent PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+// Used to cache {@link BasePredictionContext} objects. Its used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+
+type PredictionContextCache struct {
+ cache map[PredictionContext]PredictionContext
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ t := new(PredictionContextCache)
+ t.cache = make(map[PredictionContext]PredictionContext)
+ return t
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a Newcontext to the cache.
+// Protect shared cache from unsafe thread access.
+//
+func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
+ if ctx == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY
+ }
+ existing := p.cache[ctx]
+ if existing != nil {
+ return existing
+ }
+ p.cache[ctx] = ctx
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
+ return p.cache[ctx]
+}
+
+func (p *PredictionContextCache) length() int {
+ return len(p.cache)
+}
+
+type SingletonPredictionContext interface {
+ PredictionContext
+}
+
+type BaseSingletonPredictionContext struct {
+ *BasePredictionContext
+
+ parentCtx PredictionContext
+ returnState int
+}
+
+func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
+ var cachedHash int
+ if parent != nil {
+ cachedHash = calculateHash(parent, returnState)
+ } else {
+ cachedHash = calculateEmptyHash()
+ }
+
+ s := new(BaseSingletonPredictionContext)
+ s.BasePredictionContext = NewBasePredictionContext(cachedHash)
+
+ s.parentCtx = parent
+ s.returnState = returnState
+
+ return s
+}
+
+func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func (b *BaseSingletonPredictionContext) length() int {
+ return 1
+}
+
+func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
+ return b.parentCtx
+}
+
+func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
+ return b.returnState
+}
+
+func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
+ return b.returnState == BasePredictionContextEmptyReturnState
+}
+
+func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ return false
+ } else if b.hash() != other.hash() {
+ return false // can't be same if hash is different
+ }
+
+ otherP := other.(*BaseSingletonPredictionContext)
+
+ if b.returnState != other.getReturnState(0) {
+ return false
+ } else if b.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return b.parentCtx.equals(otherP.parentCtx)
+}
+
+func (b *BaseSingletonPredictionContext) hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) String() string {
+ var up string
+
+ if b.parentCtx == nil {
+ up = ""
+ } else {
+ up = b.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if b.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(b.returnState)
+ }
+
+ return strconv.Itoa(b.returnState) + " " + up
+}
+
+var BasePredictionContextEMPTY = NewEmptyPredictionContext()
+
+type EmptyPredictionContext struct {
+ *BaseSingletonPredictionContext
+}
+
+func NewEmptyPredictionContext() *EmptyPredictionContext {
+
+ p := new(EmptyPredictionContext)
+
+ p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
+
+ return p
+}
+
+func (e *EmptyPredictionContext) isEmpty() bool {
+ return true
+}
+
+func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
+ return nil
+}
+
+func (e *EmptyPredictionContext) getReturnState(index int) int {
+ return e.returnState
+}
+
+func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
+ return e == other
+}
+
+func (e *EmptyPredictionContext) String() string {
+ return "$"
+}
+
+type ArrayPredictionContext struct {
+ *BasePredictionContext
+
+ parents []PredictionContext
+ returnStates []int
+}
+
+func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.hash())
+ }
+
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ c := new(ArrayPredictionContext)
+ c.BasePredictionContext = NewBasePredictionContext(hash)
+
+ c.parents = parents
+ c.returnStates = returnStates
+
+ return c
+}
+
+func (a *ArrayPredictionContext) GetReturnStates() []int {
+ return a.returnStates
+}
+
+func (a *ArrayPredictionContext) hasEmptyPath() bool {
+ return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) isEmpty() bool {
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return a.returnStates[0] == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) length() int {
+ return len(a.returnStates)
+}
+
+func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
+ return a.parents[index]
+}
+
+func (a *ArrayPredictionContext) getReturnState(index int) int {
+ return a.returnStates[index]
+}
+
+func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
+ if _, ok := other.(*ArrayPredictionContext); !ok {
+ return false
+ } else if a.cachedHash != other.hash() {
+ return false // can't be same if hash is different
+ } else {
+ otherP := other.(*ArrayPredictionContext)
+ return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
+ }
+}
+
+func (a *ArrayPredictionContext) hash() int {
+ return a.BasePredictionContext.cachedHash
+}
+
+func (a *ArrayPredictionContext) String() string {
+ if a.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(a.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if a.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(a.returnStates[i])
+ if a.parents[i] != nil {
+ s = s + " " + a.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+
+ return s + "]"
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
+ if outerContext == nil {
+ outerContext = RuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ // share same graph if both same
+ if a == b {
+ return a
+ }
+
+ ac, ok1 := a.(*BaseSingletonPredictionContext)
+ bc, ok2 := b.(*BaseSingletonPredictionContext)
+
+ if ok1 && ok2 {
+ return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as// wildcard
+ if rootIsWildcard {
+ if _, ok := a.(*EmptyPredictionContext); ok {
+ return a
+ }
+ if _, ok := b.(*EmptyPredictionContext); ok {
+ return b
+ }
+ }
+ // convert singleton so both are arrays to normalize
+ if _, ok := a.(*BaseSingletonPredictionContext); ok {
+ a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ }
+ if _, ok := b.(*BaseSingletonPredictionContext); ok {
+ b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ }
+ return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
+}
+
+//
+// Merge two {@link SingletonBasePredictionContext} instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.hash(), b.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.hash(), a.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent == a.parentCtx {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent == b.parentCtx {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array
+ // Newjoined parent so create Newsingleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent PredictionContext
+ if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), apc)
+ }
+ return apc
+}
+
+//
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
+ if rootIsWildcard {
+ if a == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+//
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+// /
+func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.hash(), b.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.hash(), a.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: track whether this is possible above during merge sort for speed
+ if M == a {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), a)
+ }
+ return a
+ }
+ if M == b {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), b)
+ }
+ return b
+ }
+ combineCommonParents(mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), M)
+ }
+ return M
+}
+
+//
+// Make pass over all M {@code parents} merge any {@code equals()}
+// ones.
+// /
+func combineCommonParents(parents []PredictionContext) {
+ uniqueParents := make(map[PredictionContext]PredictionContext)
+
+ for p := 0; p < len(parents); p++ {
+ parent := parents[p]
+ if uniqueParents[parent] == nil {
+ uniqueParents[parent] = parent
+ }
+ }
+ for q := 0; q < len(parents); q++ {
+ parents[q] = uniqueParents[parents[q]]
+ }
+}
+
+func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
+
+ if context.isEmpty() {
+ return context
+ }
+ existing := visited[context]
+ if existing != nil {
+ return existing
+ }
+ existing = contextCache.Get(context)
+ if existing != nil {
+ visited[context] = existing
+ return existing
+ }
+ changed := false
+ parents := make([]PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || parent != context.GetParent(i) {
+ if !changed {
+ parents = make([]PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited[context] = context
+ return context
+ }
+ var updated PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited[updated] = updated
+ visited[context] = updated
+
+ return updated
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
new file mode 100644
index 000000000..15718f912
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
@@ -0,0 +1,553 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ //
+ // The SLL(*) prediction mode. This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // {@link //LL} prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the {@link //SLL} prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful {@link //LL} prediction abilities to complete successfully.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+ //
+ // The LL(*) prediction mode. This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // Report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+ //
+ // The LL(*) prediction mode with exact ambiguity detection. In addition to
+ // the correctness guarantees provided by the {@link //LL} prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+//
+// Computes the SLL prediction termination condition.
+//
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases.
+//
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+//
+// COMBINED SLL+LL PARSING
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative (e.g.
+// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// HEURISTIC
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+//
+// When the ATN simulation reaches the state before {@code ''}, it has a
+// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
+// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
+// processing this node because alternative to has another way to continue,
+// via {@code [6|2|[]]}.
+//
+// It also let's us continue for this rule:
+//
+// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// PURE SLL PARSING
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// PREDICATES IN SLL+LL PARSING
+//
+// SLL decisions don't evaluate predicates until after they reach DFA stop
+// states because they need to create the DFA cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
+// semantic predicate contexts so we might see two configurations like the
+// following.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p})}
+//
+// Before testing these configurations against others, we have to merge
+// {@code x} and {@code x'} (without modifying the existing configurations).
+// For example, we test {@code (x+x')==x''} when looking for conflicts in
+// the following configurations.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+//
+// If the configuration set has predicates (as indicated by
+// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
+// the configurations to strip out all of the predicates so that a standard
+// {@link ATNConfigSet} will merge everything ignoring predicates.
+//
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.HasSemanticContext() {
+ // dup configs, tossing out semantic predicates
+ dup := NewBaseATNConfigSet(false)
+ for _, c := range configs.GetItems() {
+
+ // NewBaseATNConfig({semanticContext:}, c)
+ c = NewBaseATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar preds
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// Checks if any configuration in {@code configs} is in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if any configuration in {@code configs} is in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if all configurations in {@code configs} are in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if all configurations in {@code configs} are in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
+
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+//
+// Full LL prediction termination.
+//
+// Can we stop looking ahead during ATN simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations {@code C}, into
+// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
+// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
+// and {@code (s, j, ctx, _)} for {@code i!=j}.
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
+// {@code C} holding {@code s} and {@code ctx} fixed.
+//
+// Or in pseudo-code, for each configuration {@code c} in {@code C}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+// The values in {@code map} are the set of {@code A_s,ctx} sets.
+//
+// If {@code |A_s,ctx|=1} then there is no conflict associated with
+// {@code s} and {@code ctx}.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// more lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// CONFLICTING CONFIGS
+//
+// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
+// when {@code i!=j} but {@code x=x'}. Because we merge all
+// {@code (s, i, _)} configurations together, that means that there are at
+// most {@code n} configurations associated with state {@code s} for
+// {@code n} possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts {@code x} and
+// {@code x'}. Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either {@code x} or
+// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
+// is the superset, then {@code i} is the only possible prediction since the
+// others resolve to {@code min(i)} as well. However, if {@code x} is
+// associated with {@code j>i} then at least one stack configuration for
+// {@code j} is not in conflict with alternative {@code i}. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing a equality check between {@code x} and
+// {@code x'} that lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// CONTINUE/STOP RULE
+//
+// Continue if union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
+// alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// CASES
+//
+//
+//
+// - no conflicts and more than 1 alternative in set => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
+// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
+// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1,3}} => continue
+//
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
+// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1}} => stop and predict 1
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {1}} = {@code {1}} => stop and predict 1, can announce
+// ambiguity {@code {1,2}}
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
+// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {2}} = {@code {1,2}} => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
+// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {3}} = {@code {1,3}} => continue
+//
+//
+//
+// EXACT AMBIGUITY DETECTION
+//
+// If all states Report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set.
+//
+// |A_i|>1 and
+// A_i = A_j for all i, j.
+//
+// In other words, we continue examining lookahead until all {@code A_i}
+// have more than one alternative and all {@code A_i} are the same. If
+// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
+// because the resolved set is {@code {1}}. To determine what the real
+// ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like
+// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+//
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+//
+// Determines if every alternative subset in {@code altsets} contains more
+// than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every {@link BitSet} in {@code altsets} has
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+//
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+//
+// Determines if any single alternative subset in {@code altsets} contains
+// exactly one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
+//
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// Determines if any single alternative subset in {@code altsets} contains
+// more than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+//
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// Determines if every alternative subset in {@code altsets} is equivalent.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every member of {@code altsets} is equal to the
+// others, otherwise {@code false}
+//
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+//
+// Returns the unique alternative predicted by all alternative subsets in
+// {@code altsets}. If no such alternative exists, this method returns
+// {@link ATN//INVALID_ALT_NUMBER}.
+//
+// @param altsets a collection of alternative subsets
+//
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// Gets the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each {@link BitSet}
+// in {@code altsets}.
+//
+// @param altsets a collection of alternative subsets
+// @return the set of represented alternatives in {@code altsets}
+//
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+//
+// This func gets the conflicting alt subsets from a configuration set.
+// For each configuration {@code c} in {@code configs}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
+ configToAlts := make(map[int]*BitSet)
+
+ for _, c := range configs.GetItems() {
+ key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
+
+ alts, ok := configToAlts[key]
+ if !ok {
+ alts = NewBitSet()
+ configToAlts[key] = alts
+ }
+ alts.add(c.GetAlt())
+ }
+
+ values := make([]*BitSet, 0, 10)
+ for _, v := range configToAlts {
+ values = append(values, v)
+ }
+ return values
+}
+
+//
+// Get a map from state to alt subset from a configuration set. For each
+// configuration {@code c} in {@code configs}:
+//
+//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+//
+//
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.GetItems() {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
new file mode 100644
index 000000000..12578cfb3
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
@@ -0,0 +1,217 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+var tokenTypeMapCache = make(map[string]int)
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.9.3"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+func (b *BaseRecognizer) GetTokenType(tokenName string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// What is the error header, normally line/character position information?//
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of {@link ANTLRErrorStrategy} may provide a similar
+// feature when necessary. For example, see
+// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
+//
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// subclass needs to override these if there are sempreds or actions
+// that the ATN interp needs to execute
+func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
+ return true
+}
+
+func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
new file mode 100644
index 000000000..600cf8c06
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// A rule context is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack. We actually carry no
+// information about the rule associated with b context (except
+// when parsing). We keep only the state number of the invoking state from
+// the ATN submachine that invoked b. Contrast b with the s
+// pointer inside ParserRuleContext that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the subclass
+// ParserRuleContext.
+//
+// @see ParserRuleContext
+//
+
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
+
+type BaseRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+}
+
+func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
+
+ rn := new(BaseRuleContext)
+
+ // What context invoked b rule?
+ rn.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ rn.invokingState = -1
+ } else {
+ rn.invokingState = invokingState
+ }
+
+ return rn
+}
+
+func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
+ return b
+}
+
+func (b *BaseRuleContext) SetParent(v Tree) {
+ if v == nil {
+ b.parentCtx = nil
+ } else {
+ b.parentCtx = v.(RuleContext)
+ }
+}
+
+func (b *BaseRuleContext) GetInvokingState() int {
+ return b.invokingState
+}
+
+func (b *BaseRuleContext) SetInvokingState(t int) {
+ b.invokingState = t
+}
+
+func (b *BaseRuleContext) GetRuleIndex() int {
+ return b.RuleIndex
+}
+
+func (b *BaseRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
+
+// A context is empty if there is no invoking state meaning nobody call
+// current context.
+func (b *BaseRuleContext) IsEmpty() bool {
+ return b.invokingState == -1
+}
+
+// Return the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of b
+// method.
+//
+
+func (b *BaseRuleContext) GetParent() Tree {
+ return b.parentCtx
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
new file mode 100644
index 000000000..bdbaabdc2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
@@ -0,0 +1,466 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A tree structure used to record the semantic context in which
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+//
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
+// {@link SemanticContext} within the scope of this outer class.
+//
+
+type SemanticContext interface {
+ comparable
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ hash() int
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) equals(other interface{}) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, p.ruleIndex)
+ h = murmurUpdate(h, p.predIndex)
+ if p.isCtxDependent {
+ h = murmurUpdate(h, 1)
+ } else {
+ h = murmurUpdate(h, 0)
+ }
+ return murmurFinish(h, 3)
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) equals(other interface{}) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*PrecedencePredicate); !ok {
+ return false
+ } else {
+ return p.precedence == other.(*PrecedencePredicate).precedence
+ }
+}
+
+func (p *PrecedencePredicate) hash() int {
+ h := uint32(1)
+ h = 31*h + uint32(p.precedence)
+ return int(h)
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ set.Each(func(v interface{}) bool {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ return true
+ })
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := NewArray2DHashSet(nil, nil)
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Add(reduced)
+ }
+
+ vs := operands.Values()
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) equals(other interface{}) bool {
+ if a == other {
+ return true
+ } else if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+//
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *OR) hash() int {
+ h := murmurInit(41) // Init with a value different from AND
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := NewArray2DHashSet(nil, nil)
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Add(reduced)
+ }
+
+ vs := operands.Values()
+
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) equals(other interface{}) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+//
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
new file mode 100644
index 000000000..2d8e99095
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // During lookahead operations, this "token" signifies we hit rule end ATN state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // All tokens go to the parser (unless Skip() is called in that rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+
+ TokenDefaultChannel = 0
+
+ // Anything on different channel than DEFAULT_CHANNEL is not parsed
+ // by parser.
+
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ *BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := new(CommonToken)
+
+ t.BaseToken = new(BaseToken)
+
+ t.source = source
+ t.tokenType = tokenType
+ t.channel = channel
+ t.start = start
+ t.stop = stop
+ t.tokenIndex = -1
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+//
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
new file mode 100644
index 000000000..e023978fe
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
new file mode 100644
index 000000000..df92c8147
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(*Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
new file mode 100644
index 000000000..96a03f02a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
@@ -0,0 +1,649 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+package antlr
+
+import (
+"bytes"
+"fmt"
+)
+
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+
+
+const(
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instruction_index int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ op_name string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation)GetInstructionIndex() int{
+ return op.instruction_index
+}
+
+func (op *BaseRewriteOperation)GetIndex() int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation)GetText() string{
+ return op.text
+}
+
+func (op *BaseRewriteOperation)GetOpName() string{
+ return op.op_name
+}
+
+func (op *BaseRewriteOperation)GetTokens() TokenStream{
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation)SetInstructionIndex(val int){
+ op.instruction_index = val
+}
+
+func (op *BaseRewriteOperation)SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation)SetText(val string){
+ op.text = val
+}
+
+func (op *BaseRewriteOperation)SetOpName(val string){
+ op.op_name = val
+}
+
+func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.op_name,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
+ return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index,
+ text:text,
+ op_name:"InsertBeforeOp",
+ tokens:stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
+ return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index+1,
+ text:text,
+ tokens:stream,
+ }}
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct{
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation:BaseRewriteOperation{
+ index:from,
+ text:text,
+ op_name:"ReplaceOp",
+ tokens:stream,
+ },
+ LastIndex:to,
+ }
+}
+
+func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
+ if op.text != ""{
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex +1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
+ },
+ last_rewrite_token_indexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
+ is, ok := tsr.programs[program_name]
+ if ok{
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
+ tsr.Rollback(Default_Program_Name, instruction_index)
+}
+//Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
+ tsr.DeleteProgram(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
+ tsr.InsertAfter(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
+ tsr.InsertBefore(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
+ tsr.Replace(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
+ tsr.Replace(program_name, from, to, "" )
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
+ tsr.Delete(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
+ tsr.DeleteDefault(index,index)
+}
+
+func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
+ tsr.DeleteToken(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
+ if !ok{
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
+ tsr.last_rewrite_token_indexes[program_name] = i
+}
+
+func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
+ is := make([]RewriteOperation, 0, Program_Init_Size)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok{
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetTextDefault() string{
+ return tsr.GetText(
+ Default_Program_Name,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start,0)
+ if rewrites == nil || len(rewrites) == 0{
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
+ }
+ }
+ return buf.String()
+}
+
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
+//
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// Return a map from token index to operation.
+//
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
+ // WALK REPLACES
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil{continue}
+ rop, ok := op.(*ReplaceOp)
+ if !ok{continue}
+ // Wipe prior inserts within range
+ for j:=0; j rop.index && iop.index <=rop.LastIndex{
+ // delete insert as it's a no-op.
+ rewrites[iop.instruction_index] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
+ // delete replace as it's a no-op.
+ rewrites[prevop.instruction_index] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint{
+ rewrites[prevop.instruction_index] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
+ }else if !disjoint{
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i:=0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil{continue}
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok{continue}
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
+ panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil {continue}
+ if _, ok := m[op.GetIndex()]; ok{
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+
+/*
+ Quick fixing Go lack of overloads
+ */
+
+func max(a,b int)int{
+ if a>b{
+ return a
+ }else {
+ return b
+ }
+}
+func min(a,b int)int{
+ if aThis is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+// Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ *BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+
+ t := new(AtomTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.label = intervalSet // The token type or character value or, signifies special intervalSet.
+ t.intervalSet = t.makeLabel()
+ t.serializationType = TransitionATOM
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ *BaseTransition
+
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+
+ t := new(RuleTransition)
+ t.BaseTransition = NewBaseTransition(ruleStart)
+
+ t.ruleIndex = ruleIndex
+ t.precedence = precedence
+ t.followState = followState
+ t.serializationType = TransitionRULE
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ *BaseTransition
+
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+
+ t := new(EpsilonTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionEPSILON
+ t.isEpsilon = true
+ t.outermostPrecedenceReturn = outermostPrecedenceReturn
+ return t
+}
+
+func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ *BaseTransition
+
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+
+ t := new(RangeTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionRANGE
+ t.start = start
+ t.stop = stop
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ *BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+
+ t := new(BaseAbstractPredicateTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ return t
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+
+ t := new(PredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPREDICATE
+ t.ruleIndex = ruleIndex
+ t.predIndex = predIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ *BaseTransition
+
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+
+ t := new(ActionTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionACTION
+ t.ruleIndex = ruleIndex
+ t.actionIndex = actionIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ *BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+
+ t := new(SetTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionSET
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ *SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+
+ t := new(NotSetTransition)
+
+ t.SetTransition = NewSetTransition(target, set)
+
+ t.serializationType = TransitionNOTSET
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ *BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+
+ t := new(WildcardTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionWILDCARD
+ return t
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+
+ t := new(PrecedencePredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPRECEDENCE
+ t.precedence = precedence
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
new file mode 100644
index 000000000..bdeb6d788
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
@@ -0,0 +1,256 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+
+ GetSourceInterval() *Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+
+ GetRuleContext() RuleContext
+ GetBaseRuleContext() *BaseRuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
+
+// TODO
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(i int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, EnterRule is called before
+// recursively walking down into child nodes, then
+// ExitRule is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+//
+// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
+// then by triggering the event specific to the given parse tree node
+//
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// Exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
+//
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
new file mode 100644
index 000000000..80144ecad
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+/** A set of utility routines useful for all kinds of ANTLR trees. */
+
+// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
+// node payloads to get the text for the nodes. Detect
+// parse trees and extract data appropriately.
+func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
+
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ s := TreesGetNodeText(tree, ruleNames, nil)
+
+ s = EscapeWhitespace(s, false)
+ c := tree.GetChildCount()
+ if c == 0 {
+ return s
+ }
+ res := "(" + s + " "
+ if c > 0 {
+ s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
+ res += s
+ }
+ for i := 1; i < c; i++ {
+ s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
+ res += (" " + s)
+ }
+ res += ")"
+ return res
+}
+
+func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ if ruleNames != nil {
+ switch t2 := t.(type) {
+ case RuleNode:
+ t3 := t2.GetRuleContext()
+ altNumber := t3.GetAltNumber()
+
+ if altNumber != ATNInvalidAltNumber {
+ return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
+ }
+ return ruleNames[t3.GetRuleIndex()]
+ case ErrorNode:
+ return fmt.Sprint(t2)
+ case TerminalNode:
+ if t2.GetSymbol() != nil {
+ return t2.GetSymbol().GetText()
+ }
+ }
+ }
+
+ // no recog for rule names
+ payload := t.GetPayload()
+ if p2, ok := payload.(Token); ok {
+ return p2.GetText()
+ }
+
+ return fmt.Sprint(t.GetPayload())
+}
+
+// Return ordered list of all children of this node
+func TreesGetChildren(t Tree) []Tree {
+ list := make([]Tree, 0)
+ for i := 0; i < t.GetChildCount(); i++ {
+ list = append(list, t.GetChild(i))
+ }
+ return list
+}
+
+// Return a list of all ancestors of this node. The first node of
+// list is the root and the last is the parent of this node.
+//
+func TreesgetAncestors(t Tree) []Tree {
+ ancestors := make([]Tree, 0)
+ t = t.GetParent()
+ for t != nil {
+ f := []Tree{t}
+ ancestors = append(f, ancestors...)
+ t = t.GetParent()
+ }
+ return ancestors
+}
+
+func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
+ return TreesfindAllNodes(t, ttype, true)
+}
+
+func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
+ return TreesfindAllNodes(t, ruleIndex, false)
+}
+
+func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
+ nodes := make([]ParseTree, 0)
+ treesFindAllNodes(t, index, findTokens, &nodes)
+ return nodes
+}
+
+func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
+ // check this node (the root) first
+
+ t2, ok := t.(TerminalNode)
+ t3, ok2 := t.(ParserRuleContext)
+
+ if findTokens && ok {
+ if t2.GetSymbol().GetTokenType() == index {
+ *nodes = append(*nodes, t2)
+ }
+ } else if !findTokens && ok2 {
+ if t3.GetRuleIndex() == index {
+ *nodes = append(*nodes, t3)
+ }
+ }
+ // check children
+ for i := 0; i < t.GetChildCount(); i++ {
+ treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
+ }
+}
+
+func TreesDescendants(t ParseTree) []ParseTree {
+ nodes := []ParseTree{t}
+ for i := 0; i < t.GetChildCount(); i++ {
+ nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
+ }
+ return nodes
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
new file mode 100644
index 000000000..8fc876334
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
@@ -0,0 +1,344 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "strconv"
+ "strings"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("Stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+func standardEqualsFunction(a interface{}, b interface{}) bool {
+
+ ac, oka := a.(comparable)
+ bc, okb := b.(comparable)
+
+ if !oka || !okb {
+ panic("Not Comparable")
+ }
+
+ return ac.equals(bc)
+}
+
+func standardHashFunction(a interface{}) int {
+ if h, ok := a.(hasher); ok {
+ return h.hash()
+ }
+
+ panic("Not Hasher")
+}
+
+type hasher interface {
+ hash() int
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ size := intMax(b.minLen(), set.minLen())
+ if size > len(b.data) {
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ for i := 0; i < size; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ if len(b.data) != len(otherBitSet.data) {
+ return false
+ }
+
+ for k := range b.data {
+ if b.data[k] != otherBitSet.data[k] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+type DoubleDict struct {
+ data map[int]map[int]interface{}
+}
+
+func NewDoubleDict() *DoubleDict {
+ dd := new(DoubleDict)
+ dd.data = make(map[int]map[int]interface{})
+ return dd
+}
+
+func (d *DoubleDict) Get(a, b int) interface{} {
+ data := d.data[a]
+
+ if data == nil {
+ return nil
+ }
+
+ return data[b]
+}
+
+func (d *DoubleDict) set(a, b int, o interface{}) {
+ data := d.data[a]
+
+ if data == nil {
+ data = make(map[int]interface{})
+ d.data[a] = data
+ }
+
+ data[b] = o
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
new file mode 100644
index 000000000..9caa78b8c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
@@ -0,0 +1,237 @@
+package antlr
+
+import "math"
+
+const (
+ _initalCapacity = 16
+ _initalBucketCapacity = 8
+ _loadFactor = 0.75
+)
+
+var _ Set = (*Array2DHashSet)(nil)
+
+type Set interface {
+ Add(value interface{}) (added interface{})
+ Len() int
+ Get(value interface{}) (found interface{})
+ Contains(value interface{}) bool
+ Values() []interface{}
+ Each(f func(interface{}) bool)
+}
+
+type Array2DHashSet struct {
+ buckets [][]interface{}
+ hashcodeFunction func(interface{}) int
+ equalsFunction func(interface{}, interface{}) bool
+
+ n int // How many elements in set
+ threshold int // when to expand
+
+ currentPrime int // jump by 4 primes each expand or whatever
+ initialBucketCapacity int
+}
+
+func (as *Array2DHashSet) Each(f func(interface{}) bool) {
+ if as.Len() < 1 {
+ return
+ }
+
+ for _, bucket := range as.buckets {
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+ if !f(o) {
+ return
+ }
+ }
+ }
+}
+
+func (as *Array2DHashSet) Values() []interface{} {
+ if as.Len() < 1 {
+ return nil
+ }
+
+ values := make([]interface{}, 0, as.Len())
+ as.Each(func(i interface{}) bool {
+ values = append(values, i)
+ return true
+ })
+ return values
+}
+
+func (as *Array2DHashSet) Contains(value interface{}) bool {
+ return as.Get(value) != nil
+}
+
+func (as *Array2DHashSet) Add(value interface{}) interface{} {
+ if as.n > as.threshold {
+ as.expand()
+ }
+ return as.innerAdd(value)
+}
+
+func (as *Array2DHashSet) expand() {
+ old := as.buckets
+
+ as.currentPrime += 4
+
+ var (
+ newCapacity = len(as.buckets) << 1
+ newTable = as.createBuckets(newCapacity)
+ newBucketLengths = make([]int, len(newTable))
+ )
+
+ as.buckets = newTable
+ as.threshold = int(float64(newCapacity) * _loadFactor)
+
+ for _, bucket := range old {
+ if bucket == nil {
+ continue
+ }
+
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+
+ b := as.getBuckets(o)
+ bucketLength := newBucketLengths[b]
+ var newBucket []interface{}
+ if bucketLength == 0 {
+ // new bucket
+ newBucket = as.createBucket(as.initialBucketCapacity)
+ newTable[b] = newBucket
+ } else {
+ newBucket = newTable[b]
+ if bucketLength == len(newBucket) {
+ // expand
+ newBucketCopy := make([]interface{}, len(newBucket)<<1)
+ copy(newBucketCopy[:bucketLength], newBucket)
+ newBucket = newBucketCopy
+ newTable[b] = newBucket
+ }
+ }
+
+ newBucket[bucketLength] = o
+ newBucketLengths[b]++
+ }
+ }
+}
+
+func (as *Array2DHashSet) Len() int {
+ return as.n
+}
+
+func (as *Array2DHashSet) Get(o interface{}) interface{} {
+ if o == nil {
+ return nil
+ }
+
+ b := as.getBuckets(o)
+ bucket := as.buckets[b]
+ if bucket == nil { // no bucket
+ return nil
+ }
+
+ for _, e := range bucket {
+ if e == nil {
+ return nil // empty slot; not there
+ }
+ if as.equalsFunction(e, o) {
+ return e
+ }
+ }
+
+ return nil
+}
+
+func (as *Array2DHashSet) innerAdd(o interface{}) interface{} {
+ b := as.getBuckets(o)
+
+ bucket := as.buckets[b]
+
+ // new bucket
+ if bucket == nil {
+ bucket = as.createBucket(as.initialBucketCapacity)
+ bucket[0] = o
+
+ as.buckets[b] = bucket
+ as.n++
+ return o
+ }
+
+ // look for it in bucket
+ for i := 0; i < len(bucket); i++ {
+ existing := bucket[i]
+ if existing == nil { // empty slot; not there, add.
+ bucket[i] = o
+ as.n++
+ return o
+ }
+
+ if as.equalsFunction(existing, o) { // found existing, quit
+ return existing
+ }
+ }
+
+ // full bucket, expand and add to end
+ oldLength := len(bucket)
+ bucketCopy := make([]interface{}, oldLength<<1)
+ copy(bucketCopy[:oldLength], bucket)
+ bucket = bucketCopy
+ as.buckets[b] = bucket
+ bucket[oldLength] = o
+ as.n++
+ return o
+}
+
+func (as *Array2DHashSet) getBuckets(value interface{}) int {
+ hash := as.hashcodeFunction(value)
+ return hash & (len(as.buckets) - 1)
+}
+
+func (as *Array2DHashSet) createBuckets(cap int) [][]interface{} {
+ return make([][]interface{}, cap)
+}
+
+func (as *Array2DHashSet) createBucket(cap int) []interface{} {
+ return make([]interface{}, cap)
+}
+
+func NewArray2DHashSetWithCap(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(interface{}, interface{}) bool,
+ initCap int,
+ initBucketCap int,
+) *Array2DHashSet {
+ if hashcodeFunction == nil {
+ hashcodeFunction = standardHashFunction
+ }
+
+ if equalsFunction == nil {
+ equalsFunction = standardEqualsFunction
+ }
+
+ ret := &Array2DHashSet{
+ hashcodeFunction: hashcodeFunction,
+ equalsFunction: equalsFunction,
+
+ n: 0,
+ threshold: int(math.Floor(_initalCapacity * _loadFactor)),
+
+ currentPrime: 1,
+ initialBucketCapacity: initBucketCap,
+ }
+
+ ret.buckets = ret.createBuckets(initCap)
+ return ret
+}
+
+func NewArray2DHashSet(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(interface{}, interface{}) bool,
+) *Array2DHashSet {
+ return NewArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLLexer.g4 b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLLexer.g4
new file mode 100644
index 000000000..a889c058a
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLLexer.g4
@@ -0,0 +1,79 @@
+lexer grammar CESQLLexer;
+
+// NOTE:
+// This grammar is case-sensitive, although CESQL keywords are case-insensitive.
+// In order to implement case-insensitivity, check out
+// https://github.com/antlr/antlr4/blob/master/doc/case-insensitive-lexing.md#custom-character-streams-approach
+
+// Skip tab, carriage return and newlines
+
+SPACE: [ \t\r\n]+ -> skip;
+
+// Fragments for Literal primitives
+
+fragment ID_LITERAL: [a-zA-Z0-9]+;
+fragment DQUOTA_STRING: '"' ( '\\'. | '""' | ~('"'| '\\') )* '"';
+fragment SQUOTA_STRING: '\'' ('\\'. | '\'\'' | ~('\'' | '\\'))* '\'';
+fragment INT_DIGIT: [0-9];
+fragment FN_LITERAL: [A-Z] [A-Z_]*;
+
+// Constructors symbols
+
+LR_BRACKET: '(';
+RR_BRACKET: ')';
+COMMA: ',';
+SINGLE_QUOTE_SYMB: '\'';
+DOUBLE_QUOTE_SYMB: '"';
+
+fragment QUOTE_SYMB
+ : SINGLE_QUOTE_SYMB | DOUBLE_QUOTE_SYMB
+ ;
+
+// Operators
+// - Logic
+
+AND: 'AND';
+OR: 'OR';
+XOR: 'XOR';
+NOT: 'NOT';
+
+// - Arithmetics
+
+STAR: '*';
+DIVIDE: '/';
+MODULE: '%';
+PLUS: '+';
+MINUS: '-';
+
+// - Comparison
+
+EQUAL: '=';
+NOT_EQUAL: '!=';
+GREATER: '>';
+GREATER_OR_EQUAL: '>=';
+LESS: '<';
+LESS_GREATER: '<>';
+LESS_OR_EQUAL: '<=';
+
+// Like, exists, in
+
+LIKE: 'LIKE';
+EXISTS: 'EXISTS';
+IN: 'IN';
+
+// Booleans
+
+TRUE: 'TRUE';
+FALSE: 'FALSE';
+
+// Literals
+
+DQUOTED_STRING_LITERAL: DQUOTA_STRING;
+SQUOTED_STRING_LITERAL: SQUOTA_STRING;
+INTEGER_LITERAL: INT_DIGIT+;
+
+// Identifiers
+
+IDENTIFIER: [a-zA-Z]+;
+IDENTIFIER_WITH_NUMBER: [a-zA-Z0-9]+;
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE: [A-Z] [A-Z_]*;
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLParser.g4 b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLParser.g4
new file mode 100644
index 000000000..abab0bac8
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLParser.g4
@@ -0,0 +1,62 @@
+grammar CESQLParser;
+
+import CESQLLexer;
+
+// Entrypoint
+cesql: expression EOF;
+
+// Structure of operations, function invocations and expression
+expression
+ : functionIdentifier functionParameterList #functionInvocationExpression
+ // unary operators are the highest priority
+ | NOT expression #unaryLogicExpression
+ | MINUS expression # unaryNumericExpression
+ // LIKE, EXISTS and IN takes precedence over all the other binary operators
+ | expression NOT? LIKE stringLiteral #likeExpression
+ | EXISTS identifier #existsExpression
+ | expression NOT? IN setExpression #inExpression
+ // Numeric operations
+ | expression (STAR | DIVIDE | MODULE) expression #binaryMultiplicativeExpression
+ | expression (PLUS | MINUS) expression #binaryAdditiveExpression
+ // Comparison operations
+ | expression (EQUAL | NOT_EQUAL | LESS_GREATER | GREATER_OR_EQUAL | LESS_OR_EQUAL | LESS | GREATER) expression #binaryComparisonExpression
+ // Logic operations
+ | expression (AND | OR | XOR) expression #binaryLogicExpression
+ // Subexpressions and atoms
+ | LR_BRACKET expression RR_BRACKET #subExpression
+ | atom #atomExpression
+ ;
+
+atom
+ : booleanLiteral #booleanAtom
+ | integerLiteral #integerAtom
+ | stringLiteral #stringAtom
+ | identifier #identifierAtom
+ ;
+
+// Identifiers
+
+identifier
+ : (IDENTIFIER | IDENTIFIER_WITH_NUMBER)
+ ;
+functionIdentifier
+ : (IDENTIFIER | FUNCTION_IDENTIFIER_WITH_UNDERSCORE)
+ ;
+
+// Literals
+
+booleanLiteral: (TRUE | FALSE);
+stringLiteral: (DQUOTED_STRING_LITERAL | SQUOTED_STRING_LITERAL);
+integerLiteral: INTEGER_LITERAL;
+
+// Functions
+
+functionParameterList
+ : LR_BRACKET ( expression ( COMMA expression )* )? RR_BRACKET
+ ;
+
+// Sets
+
+setExpression
+ : LR_BRACKET expression ( COMMA expression )* RR_BRACKET // Empty sets are not allowed
+ ;
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/Makefile b/vendor/github.com/cloudevents/sdk-go/sql/v2/Makefile
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/README.md b/vendor/github.com/cloudevents/sdk-go/sql/v2/README.md
new file mode 100644
index 000000000..179de0722
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/README.md
@@ -0,0 +1,32 @@
+# CloudEvents Expression Language Go implementation
+
+CloudEvents Expression Language implementation.
+
+Note: this package is a work in progress, APIs might break in future releases.
+
+## User guide
+
+To start using it:
+
+```go
+import cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
+
+// Parse the expression
+expression, err := cesqlparser.Parse("subject = 'Hello world'")
+
+// Res can be either int32, bool or string
+res, err := expression.Evaluate(event)
+```
+
+## Development guide
+
+To regenerate the parser, make sure you have [ANTLR4 installed](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md) and then run:
+
+```shell
+antlr4 -Dlanguage=Go -package gen -o gen -visitor -no-listener CESQLParser.g4
+```
+
+Then you need to run this sed command as a workaround until this ANTLR [issue](https://github.com/antlr/antlr4/issues/2433) is resolved. Without this, building for 32bit platforms will throw an int overflow error:
+```shell
+sed -i 's/(1<(int64(1)< y
+ },
+ }
+}
+
+func NewGreaterOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return integerComparisonExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) bool {
+ return x >= y
+ },
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/like_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/like_expression.go
new file mode 100644
index 000000000..236c15caf
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/like_expression.go
@@ -0,0 +1,96 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ "regexp"
+ "strings"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type likeExpression struct {
+ baseUnaryExpression
+ pattern *regexp.Regexp
+}
+
+func (l likeExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ val, err := l.child.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ val, err = utils.Cast(val, cesql.StringType)
+ if err != nil {
+ return nil, err
+ }
+
+ return l.pattern.MatchString(val.(string)), nil
+}
+
+func NewLikeExpression(child cesql.Expression, pattern string) (cesql.Expression, error) {
+ // Converting to regex is not the most performant impl, but it works
+ p, err := convertLikePatternToRegex(pattern)
+ if err != nil {
+ return nil, err
+ }
+
+ return likeExpression{
+ baseUnaryExpression: baseUnaryExpression{
+ child: child,
+ },
+ pattern: p,
+ }, nil
+}
+
+func convertLikePatternToRegex(pattern string) (*regexp.Regexp, error) {
+ var chunks []string
+ chunks = append(chunks, "^")
+
+ var chunk strings.Builder
+
+ for i := 0; i < len(pattern); i++ {
+ if pattern[i] == '\\' && i < len(pattern)-1 {
+ if pattern[i+1] == '%' {
+ // \% case
+ chunk.WriteRune('%')
+ chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
+ chunk.Reset()
+ i++
+ continue
+ } else if pattern[i+1] == '_' {
+ // \_ case
+ chunk.WriteRune('_')
+ chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
+ chunk.Reset()
+ i++
+ continue
+ }
+ } else if pattern[i] == '_' {
+ // replace with .
+ chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
+ chunk.Reset()
+ chunks = append(chunks, ".")
+ } else if pattern[i] == '%' {
+ // replace with .*
+ chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
+ chunk.Reset()
+ chunks = append(chunks, ".*")
+ } else {
+ chunk.WriteByte(pattern[i])
+ }
+ }
+
+ if chunk.Len() != 0 {
+ chunks = append(chunks, "\\Q"+chunk.String()+"\\E")
+ }
+
+ chunks = append(chunks, "$")
+
+ return regexp.Compile(strings.Join(chunks, ""))
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/literal_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/literal_expression.go
new file mode 100644
index 000000000..5c139fc8c
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/literal_expression.go
@@ -0,0 +1,23 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type literalExpression struct {
+ value interface{}
+}
+
+func (l literalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ return l.value, nil
+}
+
+func NewLiteralExpression(value interface{}) cesql.Expression {
+ return literalExpression{value: value}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/logic_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/logic_expressions.go
new file mode 100644
index 000000000..5c332a5f9
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/logic_expressions.go
@@ -0,0 +1,77 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type logicExpression struct {
+ baseBinaryExpression
+ fn func(x, y bool) bool
+}
+
+func (s logicExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftVal, err := s.left.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ rightVal, err := s.right.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ leftVal, err = utils.Cast(leftVal, cesql.BooleanType)
+ if err != nil {
+ return nil, err
+ }
+
+ rightVal, err = utils.Cast(rightVal, cesql.BooleanType)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.fn(leftVal.(bool), rightVal.(bool)), nil
+}
+
+func NewAndExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return logicExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y bool) bool {
+ return x && y
+ },
+ }
+}
+
+func NewOrExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return logicExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y bool) bool {
+ return x || y
+ },
+ }
+}
+
+func NewXorExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return logicExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y bool) bool {
+ return x != y
+ },
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/math_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/math_expressions.go
new file mode 100644
index 000000000..d03782011
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/math_expressions.go
@@ -0,0 +1,109 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ "errors"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type mathExpression struct {
+ baseBinaryExpression
+ fn func(x, y int32) (int32, error)
+}
+
+func (s mathExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftVal, err := s.left.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ rightVal, err := s.right.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
+ if err != nil {
+ return nil, err
+ }
+
+ rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.fn(leftVal.(int32), rightVal.(int32))
+}
+
+func NewSumExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ return x + y, nil
+ },
+ }
+}
+
+func NewDifferenceExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ return x - y, nil
+ },
+ }
+}
+
+func NewMultiplicationExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ return x * y, nil
+ },
+ }
+}
+
+func NewModuleExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ if y == 0 {
+ return 0, errors.New("math error: division by zero")
+ }
+ return x % y, nil
+ },
+ }
+}
+
+func NewDivisionExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ if y == 0 {
+ return 0, errors.New("math error: division by zero")
+ }
+ return x / y, nil
+ },
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/negate_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/negate_expression.go
new file mode 100644
index 000000000..f917a83bf
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/negate_expression.go
@@ -0,0 +1,32 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type negateExpression baseUnaryExpression
+
+func (l negateExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ val, err := l.child.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ val, err = utils.Cast(val, cesql.IntegerType)
+ if err != nil {
+ return nil, err
+ }
+
+ return -(val.(int32)), nil
+}
+
+func NewNegateExpression(child cesql.Expression) cesql.Expression {
+ return negateExpression{child: child}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/not_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/not_expression.go
new file mode 100644
index 000000000..7f5e95b9f
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/not_expression.go
@@ -0,0 +1,32 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type notExpression baseUnaryExpression
+
+func (l notExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ val, err := l.child.Evaluate(event)
+ if err != nil {
+ return nil, err
+ }
+
+ val, err = utils.Cast(val, cesql.BooleanType)
+ if err != nil {
+ return nil, err
+ }
+
+ return !(val.(bool)), nil
+}
+
+func NewNotExpression(child cesql.Expression) cesql.Expression {
+ return notExpression{child: child}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function.go
new file mode 100644
index 000000000..1502a3d2f
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function.go
@@ -0,0 +1,17 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package v2
+
+import cloudevents "github.com/cloudevents/sdk-go/v2"
+
+type Function interface {
+ Name() string
+ Arity() int
+ IsVariadic() bool
+ ArgType(index int) *Type
+
+ Run(event cloudevents.Event, arguments []interface{}) (interface{}, error)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/casting_functions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/casting_functions.go
new file mode 100644
index 000000000..91069de80
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/casting_functions.go
@@ -0,0 +1,57 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+var IntFunction function = function{
+ name: "INT",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.Cast(i[0], cesql.IntegerType)
+ },
+}
+
+var BoolFunction function = function{
+ name: "BOOL",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.Cast(i[0], cesql.BooleanType)
+ },
+}
+
+var StringFunction function = function{
+ name: "STRING",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.Cast(i[0], cesql.StringType)
+ },
+}
+
+var IsIntFunction function = function{
+ name: "IS_INT",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.CanCast(i[0], cesql.IntegerType), nil
+ },
+}
+
+var IsBoolFunction function = function{
+ name: "IS_BOOL",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.CanCast(i[0], cesql.BooleanType), nil
+ },
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/function.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/function.go
new file mode 100644
index 000000000..4ad61faed
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/function.go
@@ -0,0 +1,41 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type function struct {
+ name string
+ fixedArgs []cesql.Type
+ variadicArgs *cesql.Type
+ fn func(cloudevents.Event, []interface{}) (interface{}, error)
+}
+
+func (f function) Name() string {
+ return f.name
+}
+
+func (f function) Arity() int {
+ return len(f.fixedArgs)
+}
+
+func (f function) IsVariadic() bool {
+ return f.variadicArgs != nil
+}
+
+func (f function) ArgType(index int) *cesql.Type {
+ if index < len(f.fixedArgs) {
+ return &f.fixedArgs[index]
+ }
+ return f.variadicArgs
+}
+
+func (f function) Run(event cloudevents.Event, arguments []interface{}) (interface{}, error) {
+ return f.fn(event, arguments)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/integer_functions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/integer_functions.go
new file mode 100644
index 000000000..4aaf64f22
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/integer_functions.go
@@ -0,0 +1,24 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+var AbsFunction function = function{
+ name: "ABS",
+ fixedArgs: []cesql.Type{cesql.IntegerType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ x := i[0].(int32)
+ if x < 0 {
+ return -x, nil
+ }
+ return x, nil
+ },
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/string_functions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/string_functions.go
new file mode 100644
index 000000000..2d0d7e4a9
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/string_functions.go
@@ -0,0 +1,177 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ "fmt"
+ "strings"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+var LengthFunction function = function{
+ name: "LENGTH",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ variadicArgs: nil,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return int32(len(i[0].(string))), nil
+ },
+}
+
+var ConcatFunction function = function{
+ name: "CONCAT",
+ variadicArgs: cesql.TypePtr(cesql.StringType),
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ var sb strings.Builder
+ for _, v := range i {
+ sb.WriteString(v.(string))
+ }
+ return sb.String(), nil
+ },
+}
+
+var ConcatWSFunction function = function{
+ name: "CONCAT_WS",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ variadicArgs: cesql.TypePtr(cesql.StringType),
+ fn: func(event cloudevents.Event, args []interface{}) (interface{}, error) {
+ if len(args) == 1 {
+ return "", nil
+ }
+ separator := args[0].(string)
+
+ var sb strings.Builder
+ for i := 1; i < len(args)-1; i++ {
+ sb.WriteString(args[i].(string))
+ sb.WriteString(separator)
+ }
+ sb.WriteString(args[len(args)-1].(string))
+ return sb.String(), nil
+ },
+}
+
+var LowerFunction function = function{
+ name: "LOWER",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return strings.ToLower(i[0].(string)), nil
+ },
+}
+
+var UpperFunction function = function{
+ name: "UPPER",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return strings.ToUpper(i[0].(string)), nil
+ },
+}
+
+var TrimFunction function = function{
+ name: "TRIM",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return strings.TrimSpace(i[0].(string)), nil
+ },
+}
+
+var LeftFunction function = function{
+ name: "LEFT",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ y := int(i[1].(int32))
+
+ if y > len(str) {
+ return str, nil
+ }
+
+ if y < 0 {
+ return nil, fmt.Errorf("LEFT y argument is < 0: %d", y)
+ }
+
+ return str[0:y], nil
+ },
+}
+
+var RightFunction function = function{
+ name: "RIGHT",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ y := int(i[1].(int32))
+
+ if y > len(str) {
+ return str, nil
+ }
+
+ if y < 0 {
+ return nil, fmt.Errorf("RIGHT y argument is < 0: %d", y)
+ }
+
+ return str[len(str)-y:], nil
+ },
+}
+
+var SubstringFunction function = function{
+ name: "SUBSTRING",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ pos := int(i[1].(int32))
+
+ if pos == 0 {
+ return "", nil
+ }
+
+ if pos < -len(str) || pos > len(str) {
+ return "", fmt.Errorf("SUBSTRING invalid pos argument: %d", pos)
+ }
+
+ var beginning int
+ if pos < 0 {
+ beginning = len(str) + pos
+ } else {
+ beginning = pos - 1
+ }
+
+ return str[beginning:], nil
+ },
+}
+
+var SubstringWithLengthFunction function = function{
+ name: "SUBSTRING",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType, cesql.IntegerType},
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ pos := int(i[1].(int32))
+ length := int(i[2].(int32))
+
+ if pos == 0 {
+ return "", nil
+ }
+
+ if pos < -len(str) || pos > len(str) {
+ return "", fmt.Errorf("SUBSTRING invalid pos argument: %d", pos)
+ }
+
+ var beginning int
+ if pos < 0 {
+ beginning = len(str) + pos
+ } else {
+ beginning = pos - 1
+ }
+
+ var end int
+ if beginning+length > len(str) {
+ end = len(str)
+ } else {
+ end = beginning + length
+ }
+
+ return str[beginning:end], nil
+ },
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.interp b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.interp
new file mode 100644
index 000000000..2727c21b9
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.interp
@@ -0,0 +1,87 @@
+token literal names:
+null
+null
+'('
+')'
+','
+'\''
+'"'
+'AND'
+'OR'
+'XOR'
+'NOT'
+'*'
+'/'
+'%'
+'+'
+'-'
+'='
+'!='
+'>'
+'>='
+'<'
+'<>'
+'<='
+'LIKE'
+'EXISTS'
+'IN'
+'TRUE'
+'FALSE'
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+SPACE
+LR_BRACKET
+RR_BRACKET
+COMMA
+SINGLE_QUOTE_SYMB
+DOUBLE_QUOTE_SYMB
+AND
+OR
+XOR
+NOT
+STAR
+DIVIDE
+MODULE
+PLUS
+MINUS
+EQUAL
+NOT_EQUAL
+GREATER
+GREATER_OR_EQUAL
+LESS
+LESS_GREATER
+LESS_OR_EQUAL
+LIKE
+EXISTS
+IN
+TRUE
+FALSE
+DQUOTED_STRING_LITERAL
+SQUOTED_STRING_LITERAL
+INTEGER_LITERAL
+IDENTIFIER
+IDENTIFIER_WITH_NUMBER
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE
+
+rule names:
+cesql
+expression
+atom
+identifier
+functionIdentifier
+booleanLiteral
+stringLiteral
+integerLiteral
+functionParameterList
+setExpression
+
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 35, 112, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 41, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 57, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 63, 10, 3, 3, 3, 3, 3, 7, 3, 67, 10, 3, 12, 3, 14, 3, 70, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 76, 10, 4, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 7, 10, 92, 10, 10, 12, 10, 14, 10, 95, 11, 10, 5, 10, 97, 10, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 11, 3, 11, 7, 11, 105, 10, 11, 12, 11, 14, 11, 108, 11, 11, 3, 11, 3, 11, 3, 11, 2, 3, 4, 12, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 2, 10, 3, 2, 13, 15, 3, 2, 16, 17, 3, 2, 18, 24, 3, 2, 9, 11, 3, 2, 33, 34, 4, 2, 33, 33, 35, 35, 3, 2, 28, 29, 3, 2, 30, 31, 2, 120, 2, 22, 3, 2, 2, 2, 4, 40, 3, 2, 2, 2, 6, 75, 3, 2, 2, 2, 8, 77, 3, 2, 2, 2, 10, 79, 3, 2, 2, 2, 12, 81, 3, 2, 2, 2, 14, 83, 3, 2, 2, 2, 16, 85, 3, 2, 2, 2, 18, 87, 3, 2, 2, 2, 20, 100, 3, 2, 2, 2, 22, 23, 5, 4, 3, 2, 23, 24, 7, 2, 2, 3, 24, 3, 3, 2, 2, 2, 25, 26, 8, 3, 1, 2, 26, 27, 5, 10, 6, 2, 27, 28, 5, 18, 10, 2, 28, 41, 3, 2, 2, 2, 29, 30, 7, 12, 2, 2, 30, 41, 5, 4, 3, 13, 31, 32, 7, 17, 2, 2, 32, 41, 5, 4, 3, 12, 33, 34, 7, 26, 2, 2, 34, 41, 5, 8, 5, 2, 35, 36, 7, 4, 2, 2, 36, 37, 5, 4, 3, 2, 37, 38, 7, 5, 2, 2, 38, 41, 3, 2, 2, 2, 39, 41, 5, 6, 4, 2, 40, 25, 3, 2, 2, 2, 40, 29, 3, 2, 2, 2, 40, 31, 3, 2, 2, 2, 40, 33, 3, 2, 2, 2, 40, 35, 3, 2, 2, 2, 40, 39, 3, 2, 2, 2, 41, 68, 3, 2, 2, 2, 42, 43, 12, 8, 2, 2, 43, 44, 9, 2, 2, 2, 44, 67, 5, 4, 3, 9, 45, 46, 12, 7, 2, 2, 46, 47, 9, 3, 2, 2, 47, 67, 5, 4, 3, 8, 48, 49, 12, 6, 2, 2, 49, 50, 9, 4, 2, 2, 50, 67, 5, 4, 3, 7, 51, 52, 12, 5, 2, 2, 52, 53, 9, 5, 2, 2, 53, 67, 5, 4, 3, 5, 54, 56, 12, 11, 2, 2, 55, 57, 7, 12, 2, 2, 56, 55, 3, 2, 2, 2, 56, 57, 3, 2, 2, 2, 57, 58, 3, 2, 2, 2, 58, 59, 7, 25, 2, 2, 59, 67, 5, 14, 8, 2, 60, 62, 12, 9, 2, 2, 61, 63, 7, 12, 2, 2, 62, 61, 3, 2, 2, 2, 62, 63, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 65, 7, 27, 2, 2, 65, 67, 5, 20, 11, 2, 66, 42, 3, 2, 2, 2, 66, 45, 3, 2, 2, 2, 66, 48, 3, 2, 2, 2, 66, 51, 3, 2, 2, 2, 66, 54, 3, 2, 2, 2, 66, 60, 3, 2, 2, 2, 67, 70, 3, 2, 2, 2, 68, 66, 3, 2, 2, 2, 68, 69, 3, 2, 2, 2, 69, 5, 3, 2, 2, 2, 70, 68, 3, 2, 2, 2, 71, 76, 5, 12, 7, 2, 72, 76, 5, 16, 9, 2, 73, 76, 5, 14, 8, 2, 74, 76, 5, 8, 5, 2, 75, 71, 3, 2, 2, 2, 75, 72, 3, 2, 2, 2, 75, 73, 3, 2, 2, 2, 75, 74, 3, 2, 2, 2, 76, 7, 3, 2, 2, 2, 77, 78, 9, 6, 2, 2, 78, 9, 3, 2, 2, 2, 79, 80, 9, 7, 2, 2, 80, 11, 3, 2, 2, 2, 81, 82, 9, 8, 2, 2, 82, 13, 3, 2, 2, 2, 83, 84, 9, 9, 2, 2, 84, 15, 3, 2, 2, 2, 85, 86, 7, 32, 2, 2, 86, 17, 3, 2, 2, 2, 87, 96, 7, 4, 2, 2, 88, 93, 5, 4, 3, 2, 89, 90, 7, 6, 2, 2, 90, 92, 5, 4, 3, 2, 91, 89, 3, 2, 2, 2, 92, 95, 3, 2, 2, 2, 93, 91, 3, 2, 2, 2, 93, 94, 3, 2, 2, 2, 94, 97, 3, 2, 2, 2, 95, 93, 3, 2, 2, 2, 96, 88, 3, 2, 2, 2, 96, 97, 3, 2, 2, 2, 97, 98, 3, 2, 2, 2, 98, 99, 7, 5, 2, 2, 99, 19, 3, 2, 2, 2, 100, 101, 7, 4, 2, 2, 101, 106, 5, 4, 3, 2, 102, 103, 7, 6, 2, 2, 103, 105, 5, 4, 3, 2, 104, 102, 3, 2, 2, 2, 105, 108, 3, 2, 2, 2, 106, 104, 3, 2, 2, 2, 106, 107, 3, 2, 2, 2, 107, 109, 3, 2, 2, 2, 108, 106, 3, 2, 2, 2, 109, 110, 7, 5, 2, 2, 110, 21, 3, 2, 2, 2, 11, 40, 56, 62, 66, 68, 75, 93, 96, 106]
\ No newline at end of file
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.tokens b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.tokens
new file mode 100644
index 000000000..913f0bc71
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.tokens
@@ -0,0 +1,59 @@
+SPACE=1
+LR_BRACKET=2
+RR_BRACKET=3
+COMMA=4
+SINGLE_QUOTE_SYMB=5
+DOUBLE_QUOTE_SYMB=6
+AND=7
+OR=8
+XOR=9
+NOT=10
+STAR=11
+DIVIDE=12
+MODULE=13
+PLUS=14
+MINUS=15
+EQUAL=16
+NOT_EQUAL=17
+GREATER=18
+GREATER_OR_EQUAL=19
+LESS=20
+LESS_GREATER=21
+LESS_OR_EQUAL=22
+LIKE=23
+EXISTS=24
+IN=25
+TRUE=26
+FALSE=27
+DQUOTED_STRING_LITERAL=28
+SQUOTED_STRING_LITERAL=29
+INTEGER_LITERAL=30
+IDENTIFIER=31
+IDENTIFIER_WITH_NUMBER=32
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
+'('=2
+')'=3
+','=4
+'\''=5
+'"'=6
+'AND'=7
+'OR'=8
+'XOR'=9
+'NOT'=10
+'*'=11
+'/'=12
+'%'=13
+'+'=14
+'-'=15
+'='=16
+'!='=17
+'>'=18
+'>='=19
+'<'=20
+'<>'=21
+'<='=22
+'LIKE'=23
+'EXISTS'=24
+'IN'=25
+'TRUE'=26
+'FALSE'=27
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.interp b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.interp
new file mode 100644
index 000000000..318204ece
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.interp
@@ -0,0 +1,122 @@
+token literal names:
+null
+null
+'('
+')'
+','
+'\''
+'"'
+'AND'
+'OR'
+'XOR'
+'NOT'
+'*'
+'/'
+'%'
+'+'
+'-'
+'='
+'!='
+'>'
+'>='
+'<'
+'<>'
+'<='
+'LIKE'
+'EXISTS'
+'IN'
+'TRUE'
+'FALSE'
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+SPACE
+LR_BRACKET
+RR_BRACKET
+COMMA
+SINGLE_QUOTE_SYMB
+DOUBLE_QUOTE_SYMB
+AND
+OR
+XOR
+NOT
+STAR
+DIVIDE
+MODULE
+PLUS
+MINUS
+EQUAL
+NOT_EQUAL
+GREATER
+GREATER_OR_EQUAL
+LESS
+LESS_GREATER
+LESS_OR_EQUAL
+LIKE
+EXISTS
+IN
+TRUE
+FALSE
+DQUOTED_STRING_LITERAL
+SQUOTED_STRING_LITERAL
+INTEGER_LITERAL
+IDENTIFIER
+IDENTIFIER_WITH_NUMBER
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE
+
+rule names:
+SPACE
+ID_LITERAL
+DQUOTA_STRING
+SQUOTA_STRING
+INT_DIGIT
+FN_LITERAL
+LR_BRACKET
+RR_BRACKET
+COMMA
+SINGLE_QUOTE_SYMB
+DOUBLE_QUOTE_SYMB
+QUOTE_SYMB
+AND
+OR
+XOR
+NOT
+STAR
+DIVIDE
+MODULE
+PLUS
+MINUS
+EQUAL
+NOT_EQUAL
+GREATER
+GREATER_OR_EQUAL
+LESS
+LESS_GREATER
+LESS_OR_EQUAL
+LIKE
+EXISTS
+IN
+TRUE
+FALSE
+DQUOTED_STRING_LITERAL
+SQUOTED_STRING_LITERAL
+INTEGER_LITERAL
+IDENTIFIER
+IDENTIFIER_WITH_NUMBER
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 237, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4, 39, 9, 39, 4, 40, 9, 40, 3, 2, 6, 2, 83, 10, 2, 13, 2, 14, 2, 84, 3, 2, 3, 2, 3, 3, 6, 3, 90, 10, 3, 13, 3, 14, 3, 91, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4, 7, 4, 100, 10, 4, 12, 4, 14, 4, 103, 11, 4, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 113, 10, 5, 12, 5, 14, 5, 116, 11, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 7, 7, 124, 10, 7, 12, 7, 14, 7, 127, 11, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 5, 13, 141, 10, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3, 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17, 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3, 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 27, 3, 27, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3, 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32, 3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 37, 6, 37, 217, 10, 37, 13, 37, 14, 37, 218, 3, 38, 6, 38, 222, 10, 38, 13, 38, 14, 38, 223, 3, 39, 6, 39, 227, 10, 39, 13, 39, 14, 39, 228, 3, 40, 3, 40, 7, 40, 233, 10, 40, 12, 40, 14, 40, 236, 11, 40, 2, 2, 41, 3, 3, 5, 2, 7, 2, 9, 2, 11, 2, 13, 2, 15, 4, 17, 5, 19, 6, 21, 7, 23, 8, 25, 2, 27, 9, 29, 10, 31, 11, 33, 12, 35, 13, 37, 14, 39, 15, 41, 16, 43, 17, 45, 18, 47, 19, 49, 20, 51, 21, 53, 22, 55, 23, 57, 24, 59, 25, 61, 26, 63, 27, 65, 28, 67, 29, 69, 30, 71, 31, 73, 32, 75, 33, 77, 34, 79, 35, 3, 2, 10, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 50, 59, 67, 92, 99, 124, 4, 2, 36, 36, 94, 94, 4, 2, 41, 41, 94, 94, 3, 2, 50, 59, 3, 2, 67, 92, 4, 2, 67, 92, 97, 97, 4, 2, 67, 92, 99, 124, 2, 244, 2, 3, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73, 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 3, 82, 3, 2, 2, 2, 5, 89, 3, 2, 2, 2, 7, 93, 3, 2, 2, 2, 9, 106, 3, 2, 2, 2, 11, 119, 3, 2, 2, 2, 13, 121, 3, 2, 2, 2, 15, 128, 3, 2, 2, 2, 17, 130, 3, 2, 2, 2, 19, 132, 3, 2, 2, 2, 21, 134, 3, 2, 2, 2, 23, 136, 3, 2, 2, 2, 25, 140, 3, 2, 2, 2, 27, 142, 3, 2, 2, 2, 29, 146, 3, 2, 2, 2, 31, 149, 3, 2, 2, 2, 33, 153, 3, 2, 2, 2, 35, 157, 3, 2, 2, 2, 37, 159, 3, 2, 2, 2, 39, 161, 3, 2, 2, 2, 41, 163, 3, 2, 2, 2, 43, 165, 3, 2, 2, 2, 45, 167, 3, 2, 2, 2, 47, 169, 3, 2, 2, 2, 49, 172, 3, 2, 2, 2, 51, 174, 3, 2, 2, 2, 53, 177, 3, 2, 2, 2, 55, 179, 3, 2, 2, 2, 57, 182, 3, 2, 2, 2, 59, 185, 3, 2, 2, 2, 61, 190, 3, 2, 2, 2, 63, 197, 3, 2, 2, 2, 65, 200, 3, 2, 2, 2, 67, 205, 3, 2, 2, 2, 69, 211, 3, 2, 2, 2, 71, 213, 3, 2, 2, 2, 73, 216, 3, 2, 2, 2, 75, 221, 3, 2, 2, 2, 77, 226, 3, 2, 2, 2, 79, 230, 3, 2, 2, 2, 81, 83, 9, 2, 2, 2, 82, 81, 3, 2, 2, 2, 83, 84, 3, 2, 2, 2, 84, 82, 3, 2, 2, 2, 84, 85, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 87, 8, 2, 2, 2, 87, 4, 3, 2, 2, 2, 88, 90, 9, 3, 2, 2, 89, 88, 3, 2, 2, 2, 90, 91, 3, 2, 2, 2, 91, 89, 3, 2, 2, 2, 91, 92, 3, 2, 2, 2, 92, 6, 3, 2, 2, 2, 93, 101, 7, 36, 2, 2, 94, 95, 7, 94, 2, 2, 95, 100, 11, 2, 2, 2, 96, 97, 7, 36, 2, 2, 97, 100, 7, 36, 2, 2, 98, 100, 10, 4, 2, 2, 99, 94, 3, 2, 2, 2, 99, 96, 3, 2, 2, 2, 99, 98, 3, 2, 2, 2, 100, 103, 3, 2, 2, 2, 101, 99, 3, 2, 2, 2, 101, 102, 3, 2, 2, 2, 102, 104, 3, 2, 2, 2, 103, 101, 3, 2, 2, 2, 104, 105, 7, 36, 2, 2, 105, 8, 3, 2, 2, 2, 106, 114, 7, 41, 2, 2, 107, 108, 7, 94, 2, 2, 108, 113, 11, 2, 2, 2, 109, 110, 7, 41, 2, 2, 110, 113, 7, 41, 2, 2, 111, 113, 10, 5, 2, 2, 112, 107, 3, 2, 2, 2, 112, 109, 3, 2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 116, 3, 2, 2, 2, 114, 112, 3, 2, 2, 2, 114, 115, 3, 2, 2, 2, 115, 117, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117, 118, 7, 41, 2, 2, 118, 10, 3, 2, 2, 2, 119, 120, 9, 6, 2, 2, 120, 12, 3, 2, 2, 2, 121, 125, 9, 7, 2, 2, 122, 124, 9, 8, 2, 2, 123, 122, 3, 2, 2, 2, 124, 127, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126, 14, 3, 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 129, 7, 42, 2, 2, 129, 16, 3, 2, 2, 2, 130, 131, 7, 43, 2, 2, 131, 18, 3, 2, 2, 2, 132, 133, 7, 46, 2, 2, 133, 20, 3, 2, 2, 2, 134, 135, 7, 41, 2, 2, 135, 22, 3, 2, 2, 2, 136, 137, 7, 36, 2, 2, 137, 24, 3, 2, 2, 2, 138, 141, 5, 21, 11, 2, 139, 141, 5, 23, 12, 2, 140, 138, 3, 2, 2, 2, 140, 139, 3, 2, 2, 2, 141, 26, 3, 2, 2, 2, 142, 143, 7, 67, 2, 2, 143, 144, 7, 80, 2, 2, 144, 145, 7, 70, 2, 2, 145, 28, 3, 2, 2, 2, 146, 147, 7, 81, 2, 2, 147, 148, 7, 84, 2, 2, 148, 30, 3, 2, 2, 2, 149, 150, 7, 90, 2, 2, 150, 151, 7, 81, 2, 2, 151, 152, 7, 84, 2, 2, 152, 32, 3, 2, 2, 2, 153, 154, 7, 80, 2, 2, 154, 155, 7, 81, 2, 2, 155, 156, 7, 86, 2, 2, 156, 34, 3, 2, 2, 2, 157, 158, 7, 44, 2, 2, 158, 36, 3, 2, 2, 2, 159, 160, 7, 49, 2, 2, 160, 38, 3, 2, 2, 2, 161, 162, 7, 39, 2, 2, 162, 40, 3, 2, 2, 2, 163, 164, 7, 45, 2, 2, 164, 42, 3, 2, 2, 2, 165, 166, 7, 47, 2, 2, 166, 44, 3, 2, 2, 2, 167, 168, 7, 63, 2, 2, 168, 46, 3, 2, 2, 2, 169, 170, 7, 35, 2, 2, 170, 171, 7, 63, 2, 2, 171, 48, 3, 2, 2, 2, 172, 173, 7, 64, 2, 2, 173, 50, 3, 2, 2, 2, 174, 175, 7, 64, 2, 2, 175, 176, 7, 63, 2, 2, 176, 52, 3, 2, 2, 2, 177, 178, 7, 62, 2, 2, 178, 54, 3, 2, 2, 2, 179, 180, 7, 62, 2, 2, 180, 181, 7, 64, 2, 2, 181, 56, 3, 2, 2, 2, 182, 183, 7, 62, 2, 2, 183, 184, 7, 63, 2, 2, 184, 58, 3, 2, 2, 2, 185, 186, 7, 78, 2, 2, 186, 187, 7, 75, 2, 2, 187, 188, 7, 77, 2, 2, 188, 189, 7, 71, 2, 2, 189, 60, 3, 2, 2, 2, 190, 191, 7, 71, 2, 2, 191, 192, 7, 90, 2, 2, 192, 193, 7, 75, 2, 2, 193, 194, 7, 85, 2, 2, 194, 195, 7, 86, 2, 2, 195, 196, 7, 85, 2, 2, 196, 62, 3, 2, 2, 2, 197, 198, 7, 75, 2, 2, 198, 199, 7, 80, 2, 2, 199, 64, 3, 2, 2, 2, 200, 201, 7, 86, 2, 2, 201, 202, 7, 84, 2, 2, 202, 203, 7, 87, 2, 2, 203, 204, 7, 71, 2, 2, 204, 66, 3, 2, 2, 2, 205, 206, 7, 72, 2, 2, 206, 207, 7, 67, 2, 2, 207, 208, 7, 78, 2, 2, 208, 209, 7, 85, 2, 2, 209, 210, 7, 71, 2, 2, 210, 68, 3, 2, 2, 2, 211, 212, 5, 7, 4, 2, 212, 70, 3, 2, 2, 2, 213, 214, 5, 9, 5, 2, 214, 72, 3, 2, 2, 2, 215, 217, 5, 11, 6, 2, 216, 215, 3, 2, 2, 2, 217, 218, 3, 2, 2, 2, 218, 216, 3, 2, 2, 2, 218, 219, 3, 2, 2, 2, 219, 74, 3, 2, 2, 2, 220, 222, 9, 9, 2, 2, 221, 220, 3, 2, 2, 2, 222, 223, 3, 2, 2, 2, 223, 221, 3, 2, 2, 2, 223, 224, 3, 2, 2, 2, 224, 76, 3, 2, 2, 2, 225, 227, 9, 3, 2, 2, 226, 225, 3, 2, 2, 2, 227, 228, 3, 2, 2, 2, 228, 226, 3, 2, 2, 2, 228, 229, 3, 2, 2, 2, 229, 78, 3, 2, 2, 2, 230, 234, 9, 7, 2, 2, 231, 233, 9, 8, 2, 2, 232, 231, 3, 2, 2, 2, 233, 236, 3, 2, 2, 2, 234, 232, 3, 2, 2, 2, 234, 235, 3, 2, 2, 2, 235, 80, 3, 2, 2, 2, 236, 234, 3, 2, 2, 2, 15, 2, 84, 91, 99, 101, 112, 114, 125, 140, 218, 223, 228, 234, 3, 8, 2, 2]
\ No newline at end of file
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.tokens b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.tokens
new file mode 100644
index 000000000..913f0bc71
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.tokens
@@ -0,0 +1,59 @@
+SPACE=1
+LR_BRACKET=2
+RR_BRACKET=3
+COMMA=4
+SINGLE_QUOTE_SYMB=5
+DOUBLE_QUOTE_SYMB=6
+AND=7
+OR=8
+XOR=9
+NOT=10
+STAR=11
+DIVIDE=12
+MODULE=13
+PLUS=14
+MINUS=15
+EQUAL=16
+NOT_EQUAL=17
+GREATER=18
+GREATER_OR_EQUAL=19
+LESS=20
+LESS_GREATER=21
+LESS_OR_EQUAL=22
+LIKE=23
+EXISTS=24
+IN=25
+TRUE=26
+FALSE=27
+DQUOTED_STRING_LITERAL=28
+SQUOTED_STRING_LITERAL=29
+INTEGER_LITERAL=30
+IDENTIFIER=31
+IDENTIFIER_WITH_NUMBER=32
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
+'('=2
+')'=3
+','=4
+'\''=5
+'"'=6
+'AND'=7
+'OR'=8
+'XOR'=9
+'NOT'=10
+'*'=11
+'/'=12
+'%'=13
+'+'=14
+'-'=15
+'='=16
+'!='=17
+'>'=18
+'>='=19
+'<'=20
+'<>'=21
+'<='=22
+'LIKE'=23
+'EXISTS'=24
+'IN'=25
+'TRUE'=26
+'FALSE'=27
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_base_visitor.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_base_visitor.go
new file mode 100644
index 000000000..a7eb0e1f3
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_base_visitor.go
@@ -0,0 +1,109 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
+
+package gen // CESQLParser
+import "github.com/antlr/antlr4/runtime/Go/antlr"
+
+type BaseCESQLParserVisitor struct {
+ *antlr.BaseParseTreeVisitor
+}
+
+func (v *BaseCESQLParserVisitor) VisitCesql(ctx *CesqlContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitInExpression(ctx *InExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryComparisonExpression(ctx *BinaryComparisonExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitAtomExpression(ctx *AtomExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitExistsExpression(ctx *ExistsExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryLogicExpression(ctx *BinaryLogicExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitLikeExpression(ctx *LikeExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitFunctionInvocationExpression(ctx *FunctionInvocationExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryMultiplicativeExpression(ctx *BinaryMultiplicativeExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitUnaryLogicExpression(ctx *UnaryLogicExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitUnaryNumericExpression(ctx *UnaryNumericExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitSubExpression(ctx *SubExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryAdditiveExpression(ctx *BinaryAdditiveExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBooleanAtom(ctx *BooleanAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIntegerAtom(ctx *IntegerAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitStringAtom(ctx *StringAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIdentifierAtom(ctx *IdentifierAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIdentifier(ctx *IdentifierContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitFunctionIdentifier(ctx *FunctionIdentifierContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitStringLiteral(ctx *StringLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIntegerLiteral(ctx *IntegerLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitFunctionParameterList(ctx *FunctionParameterListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitSetExpression(ctx *SetExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_lexer.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_lexer.go
new file mode 100644
index 000000000..b51903faa
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_lexer.go
@@ -0,0 +1,231 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
+
+package gen
+
+import (
+ "fmt"
+ "unicode"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+)
+
+// Suppress unused import error
+var _ = fmt.Printf
+var _ = unicode.IsLetter
+
+var serializedLexerAtn = []uint16{
+ 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 237,
+ 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7,
+ 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12,
+ 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4,
+ 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23,
+ 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9,
+ 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33,
+ 4, 34, 9, 34, 4, 35, 9, 35, 4, 36, 9, 36, 4, 37, 9, 37, 4, 38, 9, 38, 4,
+ 39, 9, 39, 4, 40, 9, 40, 3, 2, 6, 2, 83, 10, 2, 13, 2, 14, 2, 84, 3, 2,
+ 3, 2, 3, 3, 6, 3, 90, 10, 3, 13, 3, 14, 3, 91, 3, 4, 3, 4, 3, 4, 3, 4,
+ 3, 4, 3, 4, 7, 4, 100, 10, 4, 12, 4, 14, 4, 103, 11, 4, 3, 4, 3, 4, 3,
+ 5, 3, 5, 3, 5, 3, 5, 3, 5, 3, 5, 7, 5, 113, 10, 5, 12, 5, 14, 5, 116, 11,
+ 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 7, 3, 7, 7, 7, 124, 10, 7, 12, 7, 14, 7,
+ 127, 11, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12,
+ 3, 12, 3, 13, 3, 13, 5, 13, 141, 10, 13, 3, 14, 3, 14, 3, 14, 3, 14, 3,
+ 15, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 17, 3, 17, 3, 17, 3, 17,
+ 3, 18, 3, 18, 3, 19, 3, 19, 3, 20, 3, 20, 3, 21, 3, 21, 3, 22, 3, 22, 3,
+ 23, 3, 23, 3, 24, 3, 24, 3, 24, 3, 25, 3, 25, 3, 26, 3, 26, 3, 26, 3, 27,
+ 3, 27, 3, 28, 3, 28, 3, 28, 3, 29, 3, 29, 3, 29, 3, 30, 3, 30, 3, 30, 3,
+ 30, 3, 30, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 31, 3, 32, 3, 32,
+ 3, 32, 3, 33, 3, 33, 3, 33, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 3, 34, 3,
+ 34, 3, 34, 3, 35, 3, 35, 3, 36, 3, 36, 3, 37, 6, 37, 217, 10, 37, 13, 37,
+ 14, 37, 218, 3, 38, 6, 38, 222, 10, 38, 13, 38, 14, 38, 223, 3, 39, 6,
+ 39, 227, 10, 39, 13, 39, 14, 39, 228, 3, 40, 3, 40, 7, 40, 233, 10, 40,
+ 12, 40, 14, 40, 236, 11, 40, 2, 2, 41, 3, 3, 5, 2, 7, 2, 9, 2, 11, 2, 13,
+ 2, 15, 4, 17, 5, 19, 6, 21, 7, 23, 8, 25, 2, 27, 9, 29, 10, 31, 11, 33,
+ 12, 35, 13, 37, 14, 39, 15, 41, 16, 43, 17, 45, 18, 47, 19, 49, 20, 51,
+ 21, 53, 22, 55, 23, 57, 24, 59, 25, 61, 26, 63, 27, 65, 28, 67, 29, 69,
+ 30, 71, 31, 73, 32, 75, 33, 77, 34, 79, 35, 3, 2, 10, 5, 2, 11, 12, 15,
+ 15, 34, 34, 5, 2, 50, 59, 67, 92, 99, 124, 4, 2, 36, 36, 94, 94, 4, 2,
+ 41, 41, 94, 94, 3, 2, 50, 59, 3, 2, 67, 92, 4, 2, 67, 92, 97, 97, 4, 2,
+ 67, 92, 99, 124, 2, 244, 2, 3, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3,
+ 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 27,
+ 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2,
+ 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2,
+ 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2,
+ 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2,
+ 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3,
+ 2, 2, 2, 2, 67, 3, 2, 2, 2, 2, 69, 3, 2, 2, 2, 2, 71, 3, 2, 2, 2, 2, 73,
+ 3, 2, 2, 2, 2, 75, 3, 2, 2, 2, 2, 77, 3, 2, 2, 2, 2, 79, 3, 2, 2, 2, 3,
+ 82, 3, 2, 2, 2, 5, 89, 3, 2, 2, 2, 7, 93, 3, 2, 2, 2, 9, 106, 3, 2, 2,
+ 2, 11, 119, 3, 2, 2, 2, 13, 121, 3, 2, 2, 2, 15, 128, 3, 2, 2, 2, 17, 130,
+ 3, 2, 2, 2, 19, 132, 3, 2, 2, 2, 21, 134, 3, 2, 2, 2, 23, 136, 3, 2, 2,
+ 2, 25, 140, 3, 2, 2, 2, 27, 142, 3, 2, 2, 2, 29, 146, 3, 2, 2, 2, 31, 149,
+ 3, 2, 2, 2, 33, 153, 3, 2, 2, 2, 35, 157, 3, 2, 2, 2, 37, 159, 3, 2, 2,
+ 2, 39, 161, 3, 2, 2, 2, 41, 163, 3, 2, 2, 2, 43, 165, 3, 2, 2, 2, 45, 167,
+ 3, 2, 2, 2, 47, 169, 3, 2, 2, 2, 49, 172, 3, 2, 2, 2, 51, 174, 3, 2, 2,
+ 2, 53, 177, 3, 2, 2, 2, 55, 179, 3, 2, 2, 2, 57, 182, 3, 2, 2, 2, 59, 185,
+ 3, 2, 2, 2, 61, 190, 3, 2, 2, 2, 63, 197, 3, 2, 2, 2, 65, 200, 3, 2, 2,
+ 2, 67, 205, 3, 2, 2, 2, 69, 211, 3, 2, 2, 2, 71, 213, 3, 2, 2, 2, 73, 216,
+ 3, 2, 2, 2, 75, 221, 3, 2, 2, 2, 77, 226, 3, 2, 2, 2, 79, 230, 3, 2, 2,
+ 2, 81, 83, 9, 2, 2, 2, 82, 81, 3, 2, 2, 2, 83, 84, 3, 2, 2, 2, 84, 82,
+ 3, 2, 2, 2, 84, 85, 3, 2, 2, 2, 85, 86, 3, 2, 2, 2, 86, 87, 8, 2, 2, 2,
+ 87, 4, 3, 2, 2, 2, 88, 90, 9, 3, 2, 2, 89, 88, 3, 2, 2, 2, 90, 91, 3, 2,
+ 2, 2, 91, 89, 3, 2, 2, 2, 91, 92, 3, 2, 2, 2, 92, 6, 3, 2, 2, 2, 93, 101,
+ 7, 36, 2, 2, 94, 95, 7, 94, 2, 2, 95, 100, 11, 2, 2, 2, 96, 97, 7, 36,
+ 2, 2, 97, 100, 7, 36, 2, 2, 98, 100, 10, 4, 2, 2, 99, 94, 3, 2, 2, 2, 99,
+ 96, 3, 2, 2, 2, 99, 98, 3, 2, 2, 2, 100, 103, 3, 2, 2, 2, 101, 99, 3, 2,
+ 2, 2, 101, 102, 3, 2, 2, 2, 102, 104, 3, 2, 2, 2, 103, 101, 3, 2, 2, 2,
+ 104, 105, 7, 36, 2, 2, 105, 8, 3, 2, 2, 2, 106, 114, 7, 41, 2, 2, 107,
+ 108, 7, 94, 2, 2, 108, 113, 11, 2, 2, 2, 109, 110, 7, 41, 2, 2, 110, 113,
+ 7, 41, 2, 2, 111, 113, 10, 5, 2, 2, 112, 107, 3, 2, 2, 2, 112, 109, 3,
+ 2, 2, 2, 112, 111, 3, 2, 2, 2, 113, 116, 3, 2, 2, 2, 114, 112, 3, 2, 2,
+ 2, 114, 115, 3, 2, 2, 2, 115, 117, 3, 2, 2, 2, 116, 114, 3, 2, 2, 2, 117,
+ 118, 7, 41, 2, 2, 118, 10, 3, 2, 2, 2, 119, 120, 9, 6, 2, 2, 120, 12, 3,
+ 2, 2, 2, 121, 125, 9, 7, 2, 2, 122, 124, 9, 8, 2, 2, 123, 122, 3, 2, 2,
+ 2, 124, 127, 3, 2, 2, 2, 125, 123, 3, 2, 2, 2, 125, 126, 3, 2, 2, 2, 126,
+ 14, 3, 2, 2, 2, 127, 125, 3, 2, 2, 2, 128, 129, 7, 42, 2, 2, 129, 16, 3,
+ 2, 2, 2, 130, 131, 7, 43, 2, 2, 131, 18, 3, 2, 2, 2, 132, 133, 7, 46, 2,
+ 2, 133, 20, 3, 2, 2, 2, 134, 135, 7, 41, 2, 2, 135, 22, 3, 2, 2, 2, 136,
+ 137, 7, 36, 2, 2, 137, 24, 3, 2, 2, 2, 138, 141, 5, 21, 11, 2, 139, 141,
+ 5, 23, 12, 2, 140, 138, 3, 2, 2, 2, 140, 139, 3, 2, 2, 2, 141, 26, 3, 2,
+ 2, 2, 142, 143, 7, 67, 2, 2, 143, 144, 7, 80, 2, 2, 144, 145, 7, 70, 2,
+ 2, 145, 28, 3, 2, 2, 2, 146, 147, 7, 81, 2, 2, 147, 148, 7, 84, 2, 2, 148,
+ 30, 3, 2, 2, 2, 149, 150, 7, 90, 2, 2, 150, 151, 7, 81, 2, 2, 151, 152,
+ 7, 84, 2, 2, 152, 32, 3, 2, 2, 2, 153, 154, 7, 80, 2, 2, 154, 155, 7, 81,
+ 2, 2, 155, 156, 7, 86, 2, 2, 156, 34, 3, 2, 2, 2, 157, 158, 7, 44, 2, 2,
+ 158, 36, 3, 2, 2, 2, 159, 160, 7, 49, 2, 2, 160, 38, 3, 2, 2, 2, 161, 162,
+ 7, 39, 2, 2, 162, 40, 3, 2, 2, 2, 163, 164, 7, 45, 2, 2, 164, 42, 3, 2,
+ 2, 2, 165, 166, 7, 47, 2, 2, 166, 44, 3, 2, 2, 2, 167, 168, 7, 63, 2, 2,
+ 168, 46, 3, 2, 2, 2, 169, 170, 7, 35, 2, 2, 170, 171, 7, 63, 2, 2, 171,
+ 48, 3, 2, 2, 2, 172, 173, 7, 64, 2, 2, 173, 50, 3, 2, 2, 2, 174, 175, 7,
+ 64, 2, 2, 175, 176, 7, 63, 2, 2, 176, 52, 3, 2, 2, 2, 177, 178, 7, 62,
+ 2, 2, 178, 54, 3, 2, 2, 2, 179, 180, 7, 62, 2, 2, 180, 181, 7, 64, 2, 2,
+ 181, 56, 3, 2, 2, 2, 182, 183, 7, 62, 2, 2, 183, 184, 7, 63, 2, 2, 184,
+ 58, 3, 2, 2, 2, 185, 186, 7, 78, 2, 2, 186, 187, 7, 75, 2, 2, 187, 188,
+ 7, 77, 2, 2, 188, 189, 7, 71, 2, 2, 189, 60, 3, 2, 2, 2, 190, 191, 7, 71,
+ 2, 2, 191, 192, 7, 90, 2, 2, 192, 193, 7, 75, 2, 2, 193, 194, 7, 85, 2,
+ 2, 194, 195, 7, 86, 2, 2, 195, 196, 7, 85, 2, 2, 196, 62, 3, 2, 2, 2, 197,
+ 198, 7, 75, 2, 2, 198, 199, 7, 80, 2, 2, 199, 64, 3, 2, 2, 2, 200, 201,
+ 7, 86, 2, 2, 201, 202, 7, 84, 2, 2, 202, 203, 7, 87, 2, 2, 203, 204, 7,
+ 71, 2, 2, 204, 66, 3, 2, 2, 2, 205, 206, 7, 72, 2, 2, 206, 207, 7, 67,
+ 2, 2, 207, 208, 7, 78, 2, 2, 208, 209, 7, 85, 2, 2, 209, 210, 7, 71, 2,
+ 2, 210, 68, 3, 2, 2, 2, 211, 212, 5, 7, 4, 2, 212, 70, 3, 2, 2, 2, 213,
+ 214, 5, 9, 5, 2, 214, 72, 3, 2, 2, 2, 215, 217, 5, 11, 6, 2, 216, 215,
+ 3, 2, 2, 2, 217, 218, 3, 2, 2, 2, 218, 216, 3, 2, 2, 2, 218, 219, 3, 2,
+ 2, 2, 219, 74, 3, 2, 2, 2, 220, 222, 9, 9, 2, 2, 221, 220, 3, 2, 2, 2,
+ 222, 223, 3, 2, 2, 2, 223, 221, 3, 2, 2, 2, 223, 224, 3, 2, 2, 2, 224,
+ 76, 3, 2, 2, 2, 225, 227, 9, 3, 2, 2, 226, 225, 3, 2, 2, 2, 227, 228, 3,
+ 2, 2, 2, 228, 226, 3, 2, 2, 2, 228, 229, 3, 2, 2, 2, 229, 78, 3, 2, 2,
+ 2, 230, 234, 9, 7, 2, 2, 231, 233, 9, 8, 2, 2, 232, 231, 3, 2, 2, 2, 233,
+ 236, 3, 2, 2, 2, 234, 232, 3, 2, 2, 2, 234, 235, 3, 2, 2, 2, 235, 80, 3,
+ 2, 2, 2, 236, 234, 3, 2, 2, 2, 15, 2, 84, 91, 99, 101, 112, 114, 125, 140,
+ 218, 223, 228, 234, 3, 8, 2, 2,
+}
+
+var lexerChannelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+}
+
+var lexerModeNames = []string{
+ "DEFAULT_MODE",
+}
+
+var lexerLiteralNames = []string{
+ "", "", "'('", "')'", "','", "'''", "'\"'", "'AND'", "'OR'", "'XOR'", "'NOT'",
+ "'*'", "'/'", "'%'", "'+'", "'-'", "'='", "'!='", "'>'", "'>='", "'<'",
+ "'<>'", "'<='", "'LIKE'", "'EXISTS'", "'IN'", "'TRUE'", "'FALSE'",
+}
+
+var lexerSymbolicNames = []string{
+ "", "SPACE", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
+ "DOUBLE_QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE",
+ "PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS",
+ "LESS_GREATER", "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE",
+ "DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL",
+ "IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
+}
+
+var lexerRuleNames = []string{
+ "SPACE", "ID_LITERAL", "DQUOTA_STRING", "SQUOTA_STRING", "INT_DIGIT", "FN_LITERAL",
+ "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB", "DOUBLE_QUOTE_SYMB",
+ "QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE", "PLUS",
+ "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS", "LESS_GREATER",
+ "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE", "DQUOTED_STRING_LITERAL",
+ "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL", "IDENTIFIER", "IDENTIFIER_WITH_NUMBER",
+ "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
+}
+
+type CESQLParserLexer struct {
+ *antlr.BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+// NewCESQLParserLexer produces a new lexer instance for the optional input antlr.CharStream.
+//
+// The *CESQLParserLexer instance produced may be reused by calling the SetInputStream method.
+// The initial lexer configuration is expensive to construct, and the object is not thread-safe;
+// however, if used within a Golang sync.Pool, the construction cost amortizes well and the
+// objects can be used in a thread-safe manner.
+func NewCESQLParserLexer(input antlr.CharStream) *CESQLParserLexer {
+ l := new(CESQLParserLexer)
+ lexerDeserializer := antlr.NewATNDeserializer(nil)
+ lexerAtn := lexerDeserializer.DeserializeFromUInt16(serializedLexerAtn)
+ lexerDecisionToDFA := make([]*antlr.DFA, len(lexerAtn.DecisionToState))
+ for index, ds := range lexerAtn.DecisionToState {
+ lexerDecisionToDFA[index] = antlr.NewDFA(ds, index)
+ }
+ l.BaseLexer = antlr.NewBaseLexer(input)
+ l.Interpreter = antlr.NewLexerATNSimulator(l, lexerAtn, lexerDecisionToDFA, antlr.NewPredictionContextCache())
+
+ l.channelNames = lexerChannelNames
+ l.modeNames = lexerModeNames
+ l.RuleNames = lexerRuleNames
+ l.LiteralNames = lexerLiteralNames
+ l.SymbolicNames = lexerSymbolicNames
+ l.GrammarFileName = "CESQLParser.g4"
+ // TODO: l.EOF = antlr.TokenEOF
+
+ return l
+}
+
+// CESQLParserLexer tokens.
+const (
+ CESQLParserLexerSPACE = 1
+ CESQLParserLexerLR_BRACKET = 2
+ CESQLParserLexerRR_BRACKET = 3
+ CESQLParserLexerCOMMA = 4
+ CESQLParserLexerSINGLE_QUOTE_SYMB = 5
+ CESQLParserLexerDOUBLE_QUOTE_SYMB = 6
+ CESQLParserLexerAND = 7
+ CESQLParserLexerOR = 8
+ CESQLParserLexerXOR = 9
+ CESQLParserLexerNOT = 10
+ CESQLParserLexerSTAR = 11
+ CESQLParserLexerDIVIDE = 12
+ CESQLParserLexerMODULE = 13
+ CESQLParserLexerPLUS = 14
+ CESQLParserLexerMINUS = 15
+ CESQLParserLexerEQUAL = 16
+ CESQLParserLexerNOT_EQUAL = 17
+ CESQLParserLexerGREATER = 18
+ CESQLParserLexerGREATER_OR_EQUAL = 19
+ CESQLParserLexerLESS = 20
+ CESQLParserLexerLESS_GREATER = 21
+ CESQLParserLexerLESS_OR_EQUAL = 22
+ CESQLParserLexerLIKE = 23
+ CESQLParserLexerEXISTS = 24
+ CESQLParserLexerIN = 25
+ CESQLParserLexerTRUE = 26
+ CESQLParserLexerFALSE = 27
+ CESQLParserLexerDQUOTED_STRING_LITERAL = 28
+ CESQLParserLexerSQUOTED_STRING_LITERAL = 29
+ CESQLParserLexerINTEGER_LITERAL = 30
+ CESQLParserLexerIDENTIFIER = 31
+ CESQLParserLexerIDENTIFIER_WITH_NUMBER = 32
+ CESQLParserLexerFUNCTION_IDENTIFIER_WITH_UNDERSCORE = 33
+)
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_parser.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_parser.go
new file mode 100644
index 000000000..71d9837ee
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_parser.go
@@ -0,0 +1,2356 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+// Code generated from CESQLParser.g4 by ANTLR 4.9. DO NOT EDIT.
+
+package gen // CESQLParser
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+)
+
+// Suppress unused import errors
+var _ = fmt.Printf
+var _ = reflect.Copy
+var _ = strconv.Itoa
+
+var parserATN = []uint16{
+ 3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 3, 35, 112,
+ 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7,
+ 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 3, 2, 3, 2, 3, 2, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 5, 3, 41, 10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 5, 3, 57, 10, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 5, 3, 63, 10, 3, 3, 3, 3, 3, 7, 3, 67, 10, 3, 12, 3, 14,
+ 3, 70, 11, 3, 3, 4, 3, 4, 3, 4, 3, 4, 5, 4, 76, 10, 4, 3, 5, 3, 5, 3, 6,
+ 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 10, 7,
+ 10, 92, 10, 10, 12, 10, 14, 10, 95, 11, 10, 5, 10, 97, 10, 10, 3, 10, 3,
+ 10, 3, 11, 3, 11, 3, 11, 3, 11, 7, 11, 105, 10, 11, 12, 11, 14, 11, 108,
+ 11, 11, 3, 11, 3, 11, 3, 11, 2, 3, 4, 12, 2, 4, 6, 8, 10, 12, 14, 16, 18,
+ 20, 2, 10, 3, 2, 13, 15, 3, 2, 16, 17, 3, 2, 18, 24, 3, 2, 9, 11, 3, 2,
+ 33, 34, 4, 2, 33, 33, 35, 35, 3, 2, 28, 29, 3, 2, 30, 31, 2, 120, 2, 22,
+ 3, 2, 2, 2, 4, 40, 3, 2, 2, 2, 6, 75, 3, 2, 2, 2, 8, 77, 3, 2, 2, 2, 10,
+ 79, 3, 2, 2, 2, 12, 81, 3, 2, 2, 2, 14, 83, 3, 2, 2, 2, 16, 85, 3, 2, 2,
+ 2, 18, 87, 3, 2, 2, 2, 20, 100, 3, 2, 2, 2, 22, 23, 5, 4, 3, 2, 23, 24,
+ 7, 2, 2, 3, 24, 3, 3, 2, 2, 2, 25, 26, 8, 3, 1, 2, 26, 27, 5, 10, 6, 2,
+ 27, 28, 5, 18, 10, 2, 28, 41, 3, 2, 2, 2, 29, 30, 7, 12, 2, 2, 30, 41,
+ 5, 4, 3, 13, 31, 32, 7, 17, 2, 2, 32, 41, 5, 4, 3, 12, 33, 34, 7, 26, 2,
+ 2, 34, 41, 5, 8, 5, 2, 35, 36, 7, 4, 2, 2, 36, 37, 5, 4, 3, 2, 37, 38,
+ 7, 5, 2, 2, 38, 41, 3, 2, 2, 2, 39, 41, 5, 6, 4, 2, 40, 25, 3, 2, 2, 2,
+ 40, 29, 3, 2, 2, 2, 40, 31, 3, 2, 2, 2, 40, 33, 3, 2, 2, 2, 40, 35, 3,
+ 2, 2, 2, 40, 39, 3, 2, 2, 2, 41, 68, 3, 2, 2, 2, 42, 43, 12, 8, 2, 2, 43,
+ 44, 9, 2, 2, 2, 44, 67, 5, 4, 3, 9, 45, 46, 12, 7, 2, 2, 46, 47, 9, 3,
+ 2, 2, 47, 67, 5, 4, 3, 8, 48, 49, 12, 6, 2, 2, 49, 50, 9, 4, 2, 2, 50,
+ 67, 5, 4, 3, 7, 51, 52, 12, 5, 2, 2, 52, 53, 9, 5, 2, 2, 53, 67, 5, 4,
+ 3, 5, 54, 56, 12, 11, 2, 2, 55, 57, 7, 12, 2, 2, 56, 55, 3, 2, 2, 2, 56,
+ 57, 3, 2, 2, 2, 57, 58, 3, 2, 2, 2, 58, 59, 7, 25, 2, 2, 59, 67, 5, 14,
+ 8, 2, 60, 62, 12, 9, 2, 2, 61, 63, 7, 12, 2, 2, 62, 61, 3, 2, 2, 2, 62,
+ 63, 3, 2, 2, 2, 63, 64, 3, 2, 2, 2, 64, 65, 7, 27, 2, 2, 65, 67, 5, 20,
+ 11, 2, 66, 42, 3, 2, 2, 2, 66, 45, 3, 2, 2, 2, 66, 48, 3, 2, 2, 2, 66,
+ 51, 3, 2, 2, 2, 66, 54, 3, 2, 2, 2, 66, 60, 3, 2, 2, 2, 67, 70, 3, 2, 2,
+ 2, 68, 66, 3, 2, 2, 2, 68, 69, 3, 2, 2, 2, 69, 5, 3, 2, 2, 2, 70, 68, 3,
+ 2, 2, 2, 71, 76, 5, 12, 7, 2, 72, 76, 5, 16, 9, 2, 73, 76, 5, 14, 8, 2,
+ 74, 76, 5, 8, 5, 2, 75, 71, 3, 2, 2, 2, 75, 72, 3, 2, 2, 2, 75, 73, 3,
+ 2, 2, 2, 75, 74, 3, 2, 2, 2, 76, 7, 3, 2, 2, 2, 77, 78, 9, 6, 2, 2, 78,
+ 9, 3, 2, 2, 2, 79, 80, 9, 7, 2, 2, 80, 11, 3, 2, 2, 2, 81, 82, 9, 8, 2,
+ 2, 82, 13, 3, 2, 2, 2, 83, 84, 9, 9, 2, 2, 84, 15, 3, 2, 2, 2, 85, 86,
+ 7, 32, 2, 2, 86, 17, 3, 2, 2, 2, 87, 96, 7, 4, 2, 2, 88, 93, 5, 4, 3, 2,
+ 89, 90, 7, 6, 2, 2, 90, 92, 5, 4, 3, 2, 91, 89, 3, 2, 2, 2, 92, 95, 3,
+ 2, 2, 2, 93, 91, 3, 2, 2, 2, 93, 94, 3, 2, 2, 2, 94, 97, 3, 2, 2, 2, 95,
+ 93, 3, 2, 2, 2, 96, 88, 3, 2, 2, 2, 96, 97, 3, 2, 2, 2, 97, 98, 3, 2, 2,
+ 2, 98, 99, 7, 5, 2, 2, 99, 19, 3, 2, 2, 2, 100, 101, 7, 4, 2, 2, 101, 106,
+ 5, 4, 3, 2, 102, 103, 7, 6, 2, 2, 103, 105, 5, 4, 3, 2, 104, 102, 3, 2,
+ 2, 2, 105, 108, 3, 2, 2, 2, 106, 104, 3, 2, 2, 2, 106, 107, 3, 2, 2, 2,
+ 107, 109, 3, 2, 2, 2, 108, 106, 3, 2, 2, 2, 109, 110, 7, 5, 2, 2, 110,
+ 21, 3, 2, 2, 2, 11, 40, 56, 62, 66, 68, 75, 93, 96, 106,
+}
+var literalNames = []string{
+ "", "", "'('", "')'", "','", "'''", "'\"'", "'AND'", "'OR'", "'XOR'", "'NOT'",
+ "'*'", "'/'", "'%'", "'+'", "'-'", "'='", "'!='", "'>'", "'>='", "'<'",
+ "'<>'", "'<='", "'LIKE'", "'EXISTS'", "'IN'", "'TRUE'", "'FALSE'",
+}
+var symbolicNames = []string{
+ "", "SPACE", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
+ "DOUBLE_QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE",
+ "PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL", "LESS",
+ "LESS_GREATER", "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE", "FALSE",
+ "DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL",
+ "IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
+}
+
+var ruleNames = []string{
+ "cesql", "expression", "atom", "identifier", "functionIdentifier", "booleanLiteral",
+ "stringLiteral", "integerLiteral", "functionParameterList", "setExpression",
+}
+
+type CESQLParserParser struct {
+ *antlr.BaseParser
+}
+
+// NewCESQLParserParser produces a new parser instance for the optional input antlr.TokenStream.
+//
+// The *CESQLParserParser instance produced may be reused by calling the SetInputStream method.
+// The initial parser configuration is expensive to construct, and the object is not thread-safe;
+// however, if used within a Golang sync.Pool, the construction cost amortizes well and the
+// objects can be used in a thread-safe manner.
+func NewCESQLParserParser(input antlr.TokenStream) *CESQLParserParser {
+ this := new(CESQLParserParser)
+ deserializer := antlr.NewATNDeserializer(nil)
+ deserializedATN := deserializer.DeserializeFromUInt16(parserATN)
+ decisionToDFA := make([]*antlr.DFA, len(deserializedATN.DecisionToState))
+ for index, ds := range deserializedATN.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(ds, index)
+ }
+ this.BaseParser = antlr.NewBaseParser(input)
+
+ this.Interpreter = antlr.NewParserATNSimulator(this, deserializedATN, decisionToDFA, antlr.NewPredictionContextCache())
+ this.RuleNames = ruleNames
+ this.LiteralNames = literalNames
+ this.SymbolicNames = symbolicNames
+ this.GrammarFileName = "CESQLParser.g4"
+
+ return this
+}
+
+// CESQLParserParser tokens.
+const (
+ CESQLParserParserEOF = antlr.TokenEOF
+ CESQLParserParserSPACE = 1
+ CESQLParserParserLR_BRACKET = 2
+ CESQLParserParserRR_BRACKET = 3
+ CESQLParserParserCOMMA = 4
+ CESQLParserParserSINGLE_QUOTE_SYMB = 5
+ CESQLParserParserDOUBLE_QUOTE_SYMB = 6
+ CESQLParserParserAND = 7
+ CESQLParserParserOR = 8
+ CESQLParserParserXOR = 9
+ CESQLParserParserNOT = 10
+ CESQLParserParserSTAR = 11
+ CESQLParserParserDIVIDE = 12
+ CESQLParserParserMODULE = 13
+ CESQLParserParserPLUS = 14
+ CESQLParserParserMINUS = 15
+ CESQLParserParserEQUAL = 16
+ CESQLParserParserNOT_EQUAL = 17
+ CESQLParserParserGREATER = 18
+ CESQLParserParserGREATER_OR_EQUAL = 19
+ CESQLParserParserLESS = 20
+ CESQLParserParserLESS_GREATER = 21
+ CESQLParserParserLESS_OR_EQUAL = 22
+ CESQLParserParserLIKE = 23
+ CESQLParserParserEXISTS = 24
+ CESQLParserParserIN = 25
+ CESQLParserParserTRUE = 26
+ CESQLParserParserFALSE = 27
+ CESQLParserParserDQUOTED_STRING_LITERAL = 28
+ CESQLParserParserSQUOTED_STRING_LITERAL = 29
+ CESQLParserParserINTEGER_LITERAL = 30
+ CESQLParserParserIDENTIFIER = 31
+ CESQLParserParserIDENTIFIER_WITH_NUMBER = 32
+ CESQLParserParserFUNCTION_IDENTIFIER_WITH_UNDERSCORE = 33
+)
+
+// CESQLParserParser rules.
+const (
+ CESQLParserParserRULE_cesql = 0
+ CESQLParserParserRULE_expression = 1
+ CESQLParserParserRULE_atom = 2
+ CESQLParserParserRULE_identifier = 3
+ CESQLParserParserRULE_functionIdentifier = 4
+ CESQLParserParserRULE_booleanLiteral = 5
+ CESQLParserParserRULE_stringLiteral = 6
+ CESQLParserParserRULE_integerLiteral = 7
+ CESQLParserParserRULE_functionParameterList = 8
+ CESQLParserParserRULE_setExpression = 9
+)
+
+// ICesqlContext is an interface to support dynamic dispatch.
+type ICesqlContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // IsCesqlContext differentiates from other interfaces.
+ IsCesqlContext()
+}
+
+type CesqlContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyCesqlContext() *CesqlContext {
+ var p = new(CesqlContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CESQLParserParserRULE_cesql
+ return p
+}
+
+func (*CesqlContext) IsCesqlContext() {}
+
+func NewCesqlContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CesqlContext {
+ var p = new(CesqlContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CESQLParserParserRULE_cesql
+
+ return p
+}
+
+func (s *CesqlContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *CesqlContext) Expression() IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *CesqlContext) EOF() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserEOF, 0)
+}
+
+func (s *CesqlContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CesqlContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *CesqlContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitCesql(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CESQLParserParser) Cesql() (localctx ICesqlContext) {
+ localctx = NewCesqlContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 0, CESQLParserParserRULE_cesql)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(20)
+ p.expression(0)
+ }
+ {
+ p.SetState(21)
+ p.Match(CESQLParserParserEOF)
+ }
+
+ return localctx
+}
+
+// IExpressionContext is an interface to support dynamic dispatch.
+type IExpressionContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // IsExpressionContext differentiates from other interfaces.
+ IsExpressionContext()
+}
+
+type ExpressionContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyExpressionContext() *ExpressionContext {
+ var p = new(ExpressionContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CESQLParserParserRULE_expression
+ return p
+}
+
+func (*ExpressionContext) IsExpressionContext() {}
+
+func NewExpressionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExpressionContext {
+ var p = new(ExpressionContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CESQLParserParserRULE_expression
+
+ return p
+}
+
+func (s *ExpressionContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExpressionContext) CopyFrom(ctx *ExpressionContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
+}
+
+func (s *ExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExpressionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+type InExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewInExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *InExpressionContext {
+ var p = new(InExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *InExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *InExpressionContext) Expression() IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *InExpressionContext) IN() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserIN, 0)
+}
+
+func (s *InExpressionContext) SetExpression() ISetExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*ISetExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ISetExpressionContext)
+}
+
+func (s *InExpressionContext) NOT() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT, 0)
+}
+
+func (s *InExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitInExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryComparisonExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryComparisonExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryComparisonExpressionContext {
+ var p = new(BinaryComparisonExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryComparisonExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryComparisonExpressionContext) AllExpression() []IExpressionContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpressionContext)(nil)).Elem())
+ var tst = make([]IExpressionContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IExpressionContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryComparisonExpressionContext) Expression(i int) IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryComparisonExpressionContext) EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserEQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) NOT_EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT_EQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) LESS_GREATER() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLESS_GREATER, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) GREATER_OR_EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserGREATER_OR_EQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) LESS_OR_EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLESS_OR_EQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) LESS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLESS, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) GREATER() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserGREATER, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryComparisonExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type AtomExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewAtomExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *AtomExpressionContext {
+ var p = new(AtomExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *AtomExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *AtomExpressionContext) Atom() IAtomContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IAtomContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IAtomContext)
+}
+
+func (s *AtomExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitAtomExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type ExistsExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewExistsExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExistsExpressionContext {
+ var p = new(ExistsExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *ExistsExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExistsExpressionContext) EXISTS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserEXISTS, 0)
+}
+
+func (s *ExistsExpressionContext) Identifier() IIdentifierContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IIdentifierContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IIdentifierContext)
+}
+
+func (s *ExistsExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitExistsExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryLogicExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryLogicExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryLogicExpressionContext {
+ var p = new(BinaryLogicExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryLogicExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryLogicExpressionContext) AllExpression() []IExpressionContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpressionContext)(nil)).Elem())
+ var tst = make([]IExpressionContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IExpressionContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryLogicExpressionContext) Expression(i int) IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryLogicExpressionContext) AND() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserAND, 0)
+}
+
+func (s *BinaryLogicExpressionContext) OR() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserOR, 0)
+}
+
+func (s *BinaryLogicExpressionContext) XOR() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserXOR, 0)
+}
+
+func (s *BinaryLogicExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryLogicExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type LikeExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewLikeExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LikeExpressionContext {
+ var p = new(LikeExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *LikeExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *LikeExpressionContext) Expression() IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *LikeExpressionContext) LIKE() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLIKE, 0)
+}
+
+func (s *LikeExpressionContext) StringLiteral() IStringLiteralContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IStringLiteralContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IStringLiteralContext)
+}
+
+func (s *LikeExpressionContext) NOT() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT, 0)
+}
+
+func (s *LikeExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitLikeExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type FunctionInvocationExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewFunctionInvocationExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *FunctionInvocationExpressionContext {
+ var p = new(FunctionInvocationExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *FunctionInvocationExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *FunctionInvocationExpressionContext) FunctionIdentifier() IFunctionIdentifierContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IFunctionIdentifierContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IFunctionIdentifierContext)
+}
+
+func (s *FunctionInvocationExpressionContext) FunctionParameterList() IFunctionParameterListContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IFunctionParameterListContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IFunctionParameterListContext)
+}
+
+func (s *FunctionInvocationExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitFunctionInvocationExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryMultiplicativeExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryMultiplicativeExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryMultiplicativeExpressionContext {
+ var p = new(BinaryMultiplicativeExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryMultiplicativeExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryMultiplicativeExpressionContext) AllExpression() []IExpressionContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpressionContext)(nil)).Elem())
+ var tst = make([]IExpressionContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IExpressionContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryMultiplicativeExpressionContext) Expression(i int) IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) STAR() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserSTAR, 0)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) DIVIDE() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserDIVIDE, 0)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) MODULE() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserMODULE, 0)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryMultiplicativeExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type UnaryLogicExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewUnaryLogicExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UnaryLogicExpressionContext {
+ var p = new(UnaryLogicExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *UnaryLogicExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UnaryLogicExpressionContext) NOT() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT, 0)
+}
+
+func (s *UnaryLogicExpressionContext) Expression() IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *UnaryLogicExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitUnaryLogicExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type UnaryNumericExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewUnaryNumericExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UnaryNumericExpressionContext {
+ var p = new(UnaryNumericExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *UnaryNumericExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UnaryNumericExpressionContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserMINUS, 0)
+}
+
+func (s *UnaryNumericExpressionContext) Expression() IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *UnaryNumericExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitUnaryNumericExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type SubExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewSubExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SubExpressionContext {
+ var p = new(SubExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *SubExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *SubExpressionContext) LR_BRACKET() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLR_BRACKET, 0)
+}
+
+func (s *SubExpressionContext) Expression() IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), 0)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *SubExpressionContext) RR_BRACKET() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserRR_BRACKET, 0)
+}
+
+func (s *SubExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitSubExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryAdditiveExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryAdditiveExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryAdditiveExpressionContext {
+ var p = new(BinaryAdditiveExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryAdditiveExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryAdditiveExpressionContext) AllExpression() []IExpressionContext {
+ var ts = s.GetTypedRuleContexts(reflect.TypeOf((*IExpressionContext)(nil)).Elem())
+ var tst = make([]IExpressionContext, len(ts))
+
+ for i, t := range ts {
+ if t != nil {
+ tst[i] = t.(IExpressionContext)
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryAdditiveExpressionContext) Expression(i int) IExpressionContext {
+ var t = s.GetTypedRuleContext(reflect.TypeOf((*IExpressionContext)(nil)).Elem(), i)
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryAdditiveExpressionContext) PLUS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserPLUS, 0)
+}
+
+func (s *BinaryAdditiveExpressionContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserMINUS, 0)
+}
+
+func (s *BinaryAdditiveExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryAdditiveExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CESQLParserParser) Expression() (localctx IExpressionContext) {
+ return p.expression(0)
+}
+
+func (p *CESQLParserParser) expression(_p int) (localctx IExpressionContext) {
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ _parentState := p.GetState()
+ localctx = NewExpressionContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IExpressionContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 2
+ p.EnterRecursionRule(localctx, 2, CESQLParserParserRULE_expression, _p)
+ var _la int
+
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(38)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 0, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewFunctionInvocationExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+
+ {
+ p.SetState(24)
+ p.FunctionIdentifier()
+ }
+ {
+ p.SetState(25)
+ p.FunctionParameterList()
+ }
+
+ case 2:
+ localctx = NewUnaryLogicExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(27)
+ p.Match(CESQLParserParserNOT)
+ }
+ {
+ p.SetState(28)
+ p.expression(11)
+ }
+
+ case 3:
+ localctx = NewUnaryNumericExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(29)
+ p.Match(CESQLParserParserMINUS)
+ }
+ {
+ p.SetState(30)
+ p.expression(10)
+ }
+
+ case 4:
+ localctx = NewExistsExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(31)
+ p.Match(CESQLParserParserEXISTS)
+ }
+ {
+ p.SetState(32)
+ p.Identifier()
+ }
+
+ case 5:
+ localctx = NewSubExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(33)
+ p.Match(CESQLParserParserLR_BRACKET)
+ }
+ {
+ p.SetState(34)
+ p.expression(0)
+ }
+ {
+ p.SetState(35)
+ p.Match(CESQLParserParserRR_BRACKET)
+ }
+
+ case 6:
+ localctx = NewAtomExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(37)
+ p.Atom()
+ }
+
+ }
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(66)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ p.SetState(64)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewBinaryMultiplicativeExpressionContext(p, NewExpressionContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CESQLParserParserRULE_expression)
+ p.SetState(40)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 6)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 6)", ""))
+ }
+ {
+ p.SetState(41)
+ _la = p.GetTokenStream().LA(1)
+
+ if !(((_la)&-(0x1f+1)) == 0 && ((int64(1)< maxArity {
+ maxArity = a
+ }
+ }
+ if maxArity >= function.Arity() {
+ return errors.New("cannot add the variadic function, " +
+ "because there is already another function defined with the same name and same or greater arity")
+ }
+
+ item.variadicFunction = function
+ return nil
+ } else {
+ if _, ok := item.fixedArgsFunctions[function.Arity()]; ok {
+ return errors.New("cannot add the function, " +
+ "because there is already another function defined with the same arity and same name")
+ }
+
+ item.fixedArgsFunctions[function.Arity()] = function
+ return nil
+ }
+}
+
+func (table functionTable) ResolveFunction(name string, args int) cesql.Function {
+ item := table[strings.ToUpper(name)]
+ if item == nil {
+ return nil
+ }
+
+ if fn, ok := item.fixedArgsFunctions[args]; ok {
+ return fn
+ }
+
+ if item.variadicFunction == nil || item.variadicFunction.Arity() > args {
+ return nil
+ }
+
+ return item.variadicFunction
+}
+
+var globalFunctionTable = functionTable{}
+
+func init() {
+ for _, fn := range []cesql.Function{
+ function.IntFunction,
+ function.BoolFunction,
+ function.StringFunction,
+ function.IsBoolFunction,
+ function.IsIntFunction,
+ function.AbsFunction,
+ function.LengthFunction,
+ function.ConcatFunction,
+ function.ConcatWSFunction,
+ function.LowerFunction,
+ function.UpperFunction,
+ function.TrimFunction,
+ function.LeftFunction,
+ function.RightFunction,
+ function.SubstringFunction,
+ function.SubstringWithLengthFunction,
+ } {
+ if err := globalFunctionTable.AddFunction(fn); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func ResolveFunction(name string, args int) cesql.Function {
+ return globalFunctionTable.ResolveFunction(name, args)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/types.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/types.go
new file mode 100644
index 000000000..0c1f20078
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/types.go
@@ -0,0 +1,47 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package v2
+
+type Type uint8
+
+const (
+ StringType Type = iota
+ IntegerType
+ BooleanType
+ AnyType
+)
+
+func TypePtr(t Type) *Type {
+ return &t
+}
+
+func (t Type) IsSameType(val interface{}) bool {
+ return TypeFromVal(val) == t
+}
+
+func (t Type) String() string {
+ switch t {
+ case IntegerType:
+ return "Integer"
+ case BooleanType:
+ return "Boolean"
+ case StringType:
+ return "String"
+ }
+ return "Any"
+}
+
+func TypeFromVal(val interface{}) Type {
+ switch val.(type) {
+ case string:
+ return StringType
+ case int32:
+ return IntegerType
+ case bool:
+ return BooleanType
+ }
+ return AnyType
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/casting.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/casting.go
new file mode 100644
index 000000000..9cf05b004
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/casting.go
@@ -0,0 +1,65 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package utils
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+)
+
+func Cast(val interface{}, target cesql.Type) (interface{}, error) {
+ if target.IsSameType(val) {
+ return val, nil
+ }
+ switch target {
+ case cesql.StringType:
+ switch val.(type) {
+ case int32:
+ return strconv.Itoa(int(val.(int32))), nil
+ case bool:
+ if val.(bool) {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ }
+ // Casting to string is always defined
+ return fmt.Sprintf("%v", val), nil
+ case cesql.IntegerType:
+ switch val.(type) {
+ case string:
+ v, err := strconv.Atoi(val.(string))
+ if err != nil {
+ err = fmt.Errorf("cannot cast from String to Integer: %w", err)
+ }
+ return int32(v), err
+ }
+ return 0, fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target)
+ case cesql.BooleanType:
+ switch val.(type) {
+ case string:
+ lowerCase := strings.ToLower(val.(string))
+ if lowerCase == "true" {
+ return true, nil
+ } else if lowerCase == "false" {
+ return false, nil
+ }
+ return false, fmt.Errorf("cannot cast String to Boolean, actual value: %v", val)
+ }
+ return false, fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target)
+ }
+
+ // AnyType doesn't need casting
+ return val, nil
+}
+
+func CanCast(val interface{}, target cesql.Type) bool {
+ _, err := Cast(val, target)
+ return err == nil
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/event.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/event.go
new file mode 100644
index 000000000..8525d4bf4
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/event.go
@@ -0,0 +1,67 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package utils
+
+import (
+ "fmt"
+ "time"
+
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+ "github.com/cloudevents/sdk-go/v2/binding/spec"
+ "github.com/cloudevents/sdk-go/v2/types"
+)
+
+func GetAttribute(event cloudevents.Event, attributeName string) interface{} {
+ var val interface{}
+
+ if a := spec.V1.Attribute(attributeName); a != nil { // Standard attribute
+ val = a.Get(event.Context)
+ } else {
+ val = event.Extensions()[attributeName]
+ }
+
+ if val == nil {
+ return nil
+ }
+
+ // Type cohercion
+ switch val.(type) {
+ case bool, int32, string:
+ return val
+ case int8:
+ return int32(val.(int8))
+ case uint8:
+ return int32(val.(uint8))
+ case int16:
+ return int32(val.(int16))
+ case uint16:
+ return int32(val.(uint16))
+ case uint32:
+ return int32(val.(uint32))
+ case int64:
+ return int32(val.(int64))
+ case uint64:
+ return int32(val.(uint64))
+ case time.Time:
+ return val.(time.Time).Format(time.RFC3339Nano)
+ case []byte:
+ return types.FormatBinary(val.([]byte))
+ }
+ return fmt.Sprintf("%v", val)
+}
+
+func ContainsAttribute(event cloudevents.Event, attributeName string) bool {
+ if attributeName == "specversion" || attributeName == "id" || attributeName == "source" || attributeName == "type" {
+ return true
+ }
+
+ if attr := spec.V1.Attribute(attributeName); attr != nil {
+ return attr.Get(event.Context) != nil
+ }
+
+ _, ok := event.Extensions()[attributeName]
+ return ok
+}
diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml
deleted file mode 100644
index 50e4afd19..000000000
--- a/vendor/github.com/evanphx/json-patch/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-language: go
-
-go:
- - 1.14
- - 1.13
-
-install:
- - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- - go get github.com/jessevdk/go-flags
-
-script:
- - go get
- - go test -cover ./...
- - cd ./v5
- - go get
- - go test -cover ./...
-
-notifications:
- email: false
diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md
index 121b039db..28e351693 100644
--- a/vendor/github.com/evanphx/json-patch/README.md
+++ b/vendor/github.com/evanphx/json-patch/README.md
@@ -39,6 +39,25 @@ go get -u github.com/evanphx/json-patch/v5
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.
+These global variables control the behavior of `jsonpatch.Apply`.
+
+An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
+is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
+
+Structure `jsonpatch.ApplyOptions` includes the configuration options above
+and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
+
+When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
+`remove` operations whose `path` points to a non-existent location in the JSON document.
+`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
+returning an error when hitting a missing `path` on `remove`.
+
+When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
+that `add` operations produce all the `path` elements that are missing from the target object.
+
+Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
+whose values are populated from the global configuration variables.
+
## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go
index 14e8bb5ce..ad88d4018 100644
--- a/vendor/github.com/evanphx/json-patch/merge.go
+++ b/vendor/github.com/evanphx/json-patch/merge.go
@@ -38,7 +38,10 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
cur, ok := (*doc)[k]
if !ok || cur == nil {
- pruneNulls(v)
+ if !mergeMerge {
+ pruneNulls(v)
+ }
+
(*doc)[k] = v
} else {
(*doc)[k] = merge(cur, v, mergeMerge)
@@ -79,8 +82,8 @@ func pruneAryNulls(ary *partialArray) *partialArray {
for _, v := range *ary {
if v != nil {
pruneNulls(v)
- newAry = append(newAry, v)
}
+ newAry = append(newAry, v)
}
*ary = newAry
@@ -88,8 +91,8 @@ func pruneAryNulls(ary *partialArray) *partialArray {
return ary
}
-var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
-var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
+var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
+var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
// MergeMergePatches merges two merge patches together, such that
@@ -114,19 +117,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
patchErr := json.Unmarshal(patchData, patch)
if _, ok := docErr.(*json.SyntaxError); ok {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
if _, ok := patchErr.(*json.SyntaxError); ok {
- return nil, errBadJSONPatch
+ return nil, ErrBadJSONPatch
}
if docErr == nil && *doc == nil {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
if patchErr == nil && *patch == nil {
- return nil, errBadJSONPatch
+ return nil, ErrBadJSONPatch
}
if docErr != nil || patchErr != nil {
@@ -142,7 +145,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
patchErr = json.Unmarshal(patchData, patchAry)
if patchErr != nil {
- return nil, errBadJSONPatch
+ return nil, ErrBadJSONPatch
}
pruneAryNulls(patchAry)
@@ -150,7 +153,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
out, patchErr := json.Marshal(patchAry)
if patchErr != nil {
- return nil, errBadJSONPatch
+ return nil, ErrBadJSONPatch
}
return out, nil
@@ -207,12 +210,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
err := json.Unmarshal(originalJSON, &originalDoc)
if err != nil {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
dest, err := getDiff(originalDoc, modifiedDoc)
@@ -233,17 +236,17 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
err := json.Unmarshal(originalJSON, &originalDocs)
if err != nil {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
total := len(originalDocs)
if len(modifiedDocs) != total {
- return nil, errBadJSONDoc
+ return nil, ErrBadJSONDoc
}
result := []json.RawMessage{}
diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go
index f185a45b2..182985490 100644
--- a/vendor/github.com/evanphx/json-patch/patch.go
+++ b/vendor/github.com/evanphx/json-patch/patch.go
@@ -721,6 +721,10 @@ func (p Patch) Apply(doc []byte) ([]byte, error) {
// ApplyIndent mutates a JSON document according to the patch, and returns the new
// document indented.
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
+ if len(doc) == 0 {
+ return doc, nil
+ }
+
var pd container
if doc[0] == '[' {
pd = &partialArray{}
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
index 6ff062f9b..b83acdbc6 100644
--- a/vendor/github.com/google/btree/btree.go
+++ b/vendor/github.com/google/btree/btree.go
@@ -796,7 +796,7 @@ func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
}
// DescendGreaterThan calls the iterator for every value in the tree within
-// the range (pivot, last], until iterator returns false.
+// the range [last, pivot), until iterator returns false.
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
diff --git a/vendor/github.com/google/btree/go.mod b/vendor/github.com/google/btree/go.mod
new file mode 100644
index 000000000..fe4d5ca17
--- /dev/null
+++ b/vendor/github.com/google/btree/go.mod
@@ -0,0 +1,17 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+module github.com/google/btree
+
+go 1.12
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
index 6b6a8e283..5aab58ebf 100644
--- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
@@ -14,17 +14,16 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.24.0
-// protoc v3.12.0
+// protoc-gen-go v1.26.0
+// protoc v3.15.5
// source: extensions/extension.proto
package gnostic_extension_v1
import (
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
)
@@ -36,10 +35,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// This is a compile-time assertion that a sufficiently up-to-date version
-// of the legacy proto package is being used.
-const _ = proto.ProtoPackageIsVersion4
-
// The version number of Gnostic.
type Version struct {
state protoimpl.MessageState
@@ -191,7 +186,7 @@ type ExtensionHandlerResponse struct {
// status code.
Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"`
// text output
- Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+ Value *anypb.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
}
func (x *ExtensionHandlerResponse) Reset() {
@@ -240,7 +235,7 @@ func (x *ExtensionHandlerResponse) GetErrors() []string {
return nil
}
-func (x *ExtensionHandlerResponse) GetValue() *any.Any {
+func (x *ExtensionHandlerResponse) GetValue() *anypb.Any {
if x != nil {
return x.Value
}
@@ -350,12 +345,13 @@ var file_extensions_extension_proto_rawDesc = []byte{
0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4d, 0x0a, 0x0e, 0x6f,
0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47,
0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50,
- 0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e,
- 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f,
- 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x01, 0x5a, 0x21, 0x2e, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b,
+ 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
}
var (
@@ -376,7 +372,7 @@ var file_extensions_extension_proto_goTypes = []interface{}{
(*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest
(*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse
(*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper
- (*any.Any)(nil), // 4: google.protobuf.Any
+ (*anypb.Any)(nil), // 4: google.protobuf.Any
}
var file_extensions_extension_proto_depIdxs = []int32{
3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
index 8ac1faffc..875137c1a 100644
--- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
@@ -38,10 +38,11 @@ option java_package = "org.gnostic.v1";
// hopefully unique enough to not conflict with things that may come along in
// the future. 'GPB' is reserved for the protocol buffer implementation itself.
//
-option objc_class_prefix = "GNX"; // "Gnostic Extension"
+// "Gnostic Extension"
+option objc_class_prefix = "GNX";
// The Go package name.
-option go_package = "extensions;gnostic_extension_v1";
+option go_package = "./extensions;gnostic_extension_v1";
// The version number of Gnostic.
message Version {
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
index eb93b65e8..727d7f4ad 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.go
@@ -7585,7 +7585,10 @@ func (m *NamedAny) ToRawInfo() *yaml.Node {
info.Content = append(info.Content, compiler.NewScalarNodeForString("name"))
info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name))
}
- // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+ if m.Value != nil {
+ info.Content = append(info.Content, compiler.NewScalarNodeForString("value"))
+ info.Content = append(info.Content, m.Value.ToRawInfo())
+ }
return info
}
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
index 4320dc376..8a5f302f3 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.pb.go
@@ -16,17 +16,16 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.24.0
-// protoc v3.12.0
+// protoc-gen-go v1.26.0
+// protoc v3.15.5
// source: openapiv2/OpenAPIv2.proto
package openapi_v2
import (
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
sync "sync"
)
@@ -38,10 +37,6 @@ const (
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
-// This is a compile-time assertion that a sufficiently up-to-date version
-// of the legacy proto package is being used.
-const _ = proto.ProtoPackageIsVersion4
-
type AdditionalPropertiesItem struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -127,8 +122,8 @@ type Any struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
- Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"`
+ Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"`
}
func (x *Any) Reset() {
@@ -163,7 +158,7 @@ func (*Any) Descriptor() ([]byte, []int) {
return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1}
}
-func (x *Any) GetValue() *any.Any {
+func (x *Any) GetValue() *anypb.Any {
if x != nil {
return x.Value
}
@@ -6341,11 +6336,11 @@ var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{
0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e,
0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64,
0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e,
- 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3c, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
- 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x4f,
- 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02,
+ 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -6422,7 +6417,7 @@ var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{
(*TypeItem)(nil), // 57: openapi.v2.TypeItem
(*VendorExtension)(nil), // 58: openapi.v2.VendorExtension
(*Xml)(nil), // 59: openapi.v2.Xml
- (*any.Any)(nil), // 60: google.protobuf.Any
+ (*anypb.Any)(nil), // 60: google.protobuf.Any
}
var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{
50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema
diff --git a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
index 00ac1b0a0..1c59b2f4a 100644
--- a/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
+++ b/vendor/github.com/googleapis/gnostic/openapiv2/OpenAPIv2.proto
@@ -42,7 +42,7 @@ option java_package = "org.openapi_v2";
option objc_class_prefix = "OAS";
// The Go package name.
-option go_package = "openapiv2;openapi_v2";
+option go_package = "./openapiv2;openapi_v2";
message AdditionalPropertiesItem {
oneof oneof {
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index c67e9b7f5..d219dc4c7 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -719,7 +719,15 @@ func (sc *serverConn) canonicalHeader(v string) string {
sc.canonHeader = make(map[string]string)
}
cv = http.CanonicalHeaderKey(v)
- sc.canonHeader[v] = cv
+ // maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
+ // entries in the canonHeader cache. This should be larger than the number
+ // of unique, uncommon header keys likely to be sent by the peer, while not
+ // so high as to permit unreaasonable memory usage if the peer sends an unbounded
+ // number of unique header keys.
+ const maxCachedCanonicalHeaders = 32
+ if len(sc.canonHeader) < maxCachedCanonicalHeaders {
+ sc.canonHeader[v] = cv
+ }
return cv
}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index f24d2b1e7..c7cd00173 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -32,7 +32,8 @@ type WriteScheduler interface {
// Pop dequeues the next frame to write. Returns false if no frames can
// be written. Frames with a given wr.StreamID() are Pop'd in the same
- // order they are Push'd. No frames should be discarded except by CloseStream.
+ // order they are Push'd, except RST_STREAM frames. No frames should be
+ // discarded except by CloseStream.
Pop() (wr FrameWriteRequest, ok bool)
}
@@ -52,6 +53,7 @@ type FrameWriteRequest struct {
// stream is the stream on which this frame will be written.
// nil for non-stream frames like PING and SETTINGS.
+ // nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
stream *stream
// done, if non-nil, must be a buffered channel with space for
diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go
index 9a7b9e581..f2e55e05c 100644
--- a/vendor/golang.org/x/net/http2/writesched_random.go
+++ b/vendor/golang.org/x/net/http2/writesched_random.go
@@ -45,11 +45,11 @@ func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityP
}
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
- id := wr.StreamID()
- if id == 0 {
+ if wr.isControl() {
ws.zero.push(wr)
return
}
+ id := wr.StreamID()
q, ok := ws.sq[id]
if !ok {
q = ws.queuePool.get()
@@ -59,7 +59,7 @@ func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
}
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
- // Control frames first.
+ // Control and RST_STREAM frames first.
if !ws.zero.empty() {
return ws.zero.shift(), true
}
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
index 539c8c9e2..c4506eb92 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
@@ -86,9 +86,12 @@ message StorageVersionCondition {
// A list of StorageVersions.
message StorageVersionList {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+ // Items holds a list of StorageVersion
repeated StorageVersion items = 2;
}
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
index 880091b6f..bfa249e13 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
@@ -121,7 +121,10 @@ type StorageVersionCondition struct {
// A list of StorageVersions.
type StorageVersionList struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"`
+ // Items holds a list of StorageVersion
+ Items []StorageVersion `json:"items" protobuf:"bytes,2,rep,name=items"`
}
diff --git a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go
index b05c28595..297ed08a7 100644
--- a/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apiserverinternal/v1alpha1/types_swagger_doc_generated.go
@@ -64,7 +64,9 @@ func (StorageVersionCondition) SwaggerDoc() map[string]string {
}
var map_StorageVersionList = map[string]string{
- "": "A list of StorageVersions.",
+ "": "A list of StorageVersions.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items holds a list of StorageVersion",
}
func (StorageVersionList) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go
index 19fe45638..0a15aff4d 100644
--- a/vendor/k8s.io/api/apps/v1/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1/generated.pb.go
@@ -868,134 +868,135 @@ func init() {
}
var fileDescriptor_e1014cab6f31e43b = []byte{
- // 2031 bytes of a gzipped FileDescriptorProto
+ // 2047 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47,
0x15, 0x77, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19,
0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78,
0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea,
- 0x61, 0x47, 0x5c, 0x10, 0x12, 0x37, 0x0e, 0xfc, 0x27, 0x08, 0x21, 0xb8, 0xa1, 0x08, 0x71, 0xd9,
- 0x0b, 0x52, 0xc4, 0x85, 0x9c, 0x2c, 0x76, 0x72, 0x42, 0x28, 0x47, 0x2e, 0xb9, 0x80, 0xaa, 0xba,
- 0xfa, 0xbb, 0xda, 0x33, 0xf6, 0x26, 0xce, 0x87, 0x72, 0xf3, 0x54, 0xfd, 0xde, 0xaf, 0xde, 0xab,
- 0x7a, 0x55, 0xef, 0xd7, 0x55, 0x06, 0xf7, 0x8e, 0xbf, 0xeb, 0xa9, 0xba, 0xdd, 0x3e, 0xf6, 0x0f,
- 0x89, 0x6b, 0x11, 0x4a, 0xbc, 0xf6, 0x90, 0x58, 0x9a, 0xed, 0xb6, 0x45, 0x07, 0x76, 0xf4, 0x36,
- 0x76, 0x1c, 0xaf, 0x3d, 0xbc, 0xdd, 0x1e, 0x10, 0x8b, 0xb8, 0x98, 0x12, 0x4d, 0x75, 0x5c, 0x9b,
- 0xda, 0x10, 0x06, 0x18, 0x15, 0x3b, 0xba, 0xca, 0x30, 0xea, 0xf0, 0xf6, 0xd5, 0x5b, 0x03, 0x9d,
- 0x1e, 0xf9, 0x87, 0x6a, 0xdf, 0x36, 0xdb, 0x03, 0x7b, 0x60, 0xb7, 0x39, 0xf4, 0xd0, 0x7f, 0xc4,
- 0x7f, 0xf1, 0x1f, 0xfc, 0xaf, 0x80, 0xe2, 0x6a, 0x2b, 0x31, 0x4c, 0xdf, 0x76, 0x89, 0x64, 0x98,
- 0xab, 0x77, 0x63, 0x8c, 0x89, 0xfb, 0x47, 0xba, 0x45, 0xdc, 0x51, 0xdb, 0x39, 0x1e, 0xb0, 0x06,
- 0xaf, 0x6d, 0x12, 0x8a, 0x65, 0x56, 0xed, 0x22, 0x2b, 0xd7, 0xb7, 0xa8, 0x6e, 0x92, 0x9c, 0xc1,
- 0x6b, 0x93, 0x0c, 0xbc, 0xfe, 0x11, 0x31, 0x71, 0xce, 0xee, 0x95, 0x22, 0x3b, 0x9f, 0xea, 0x46,
- 0x5b, 0xb7, 0xa8, 0x47, 0xdd, 0xac, 0x51, 0xeb, 0xbf, 0x0a, 0x80, 0x5d, 0xdb, 0xa2, 0xae, 0x6d,
- 0x18, 0xc4, 0x45, 0x64, 0xa8, 0x7b, 0xba, 0x6d, 0xc1, 0x9f, 0x83, 0x1a, 0x8b, 0x47, 0xc3, 0x14,
- 0xd7, 0x95, 0xeb, 0xca, 0xc6, 0xc2, 0x9d, 0xef, 0xa8, 0xf1, 0x24, 0x47, 0xf4, 0xaa, 0x73, 0x3c,
- 0x60, 0x0d, 0x9e, 0xca, 0xd0, 0xea, 0xf0, 0xb6, 0xba, 0x7b, 0xf8, 0x2e, 0xe9, 0xd3, 0x1e, 0xa1,
- 0xb8, 0x03, 0x9f, 0x9c, 0x34, 0x67, 0xc6, 0x27, 0x4d, 0x10, 0xb7, 0xa1, 0x88, 0x15, 0xee, 0x82,
- 0x0a, 0x67, 0x2f, 0x71, 0xf6, 0x5b, 0x85, 0xec, 0x22, 0x68, 0x15, 0xe1, 0x5f, 0xbc, 0xf1, 0x98,
- 0x12, 0x8b, 0xb9, 0xd7, 0xb9, 0x24, 0xa8, 0x2b, 0x5b, 0x98, 0x62, 0xc4, 0x89, 0xe0, 0xcb, 0xa0,
- 0xe6, 0x0a, 0xf7, 0xeb, 0xe5, 0xeb, 0xca, 0x46, 0xb9, 0x73, 0x59, 0xa0, 0x6a, 0x61, 0x58, 0x28,
- 0x42, 0xb4, 0xfe, 0xa6, 0x80, 0xb5, 0x7c, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x3b, 0xb9, 0xd8, 0xd5,
- 0xe9, 0x62, 0x67, 0xd6, 0x3c, 0xf2, 0x68, 0xe0, 0xb0, 0x25, 0x11, 0xf7, 0xdb, 0xa0, 0xaa, 0x53,
- 0x62, 0x7a, 0xf5, 0xd2, 0xf5, 0xf2, 0xc6, 0xc2, 0x9d, 0x1b, 0x6a, 0x3e, 0x77, 0xd5, 0xbc, 0x63,
- 0x9d, 0x45, 0x41, 0x59, 0x7d, 0x8b, 0x19, 0xa3, 0x80, 0xa3, 0xf5, 0x3f, 0x05, 0xcc, 0x6f, 0x61,
- 0x62, 0xda, 0xd6, 0x3e, 0xa1, 0x17, 0xb0, 0x68, 0x5d, 0x50, 0xf1, 0x1c, 0xd2, 0x17, 0x8b, 0xf6,
- 0x0d, 0x99, 0xef, 0x91, 0x3b, 0xfb, 0x0e, 0xe9, 0xc7, 0x0b, 0xc5, 0x7e, 0x21, 0x6e, 0x0c, 0xdf,
- 0x06, 0xb3, 0x1e, 0xc5, 0xd4, 0xf7, 0xf8, 0x32, 0x2d, 0xdc, 0xf9, 0xe6, 0xe9, 0x34, 0x1c, 0xda,
- 0x59, 0x12, 0x44, 0xb3, 0xc1, 0x6f, 0x24, 0x28, 0x5a, 0xff, 0x2e, 0x01, 0x18, 0x61, 0xbb, 0xb6,
- 0xa5, 0xe9, 0x94, 0xe5, 0xef, 0xeb, 0xa0, 0x42, 0x47, 0x0e, 0xe1, 0xd3, 0x30, 0xdf, 0xb9, 0x11,
- 0x7a, 0x71, 0x7f, 0xe4, 0x90, 0x8f, 0x4f, 0x9a, 0x6b, 0x79, 0x0b, 0xd6, 0x83, 0xb8, 0x0d, 0xdc,
- 0x8e, 0xfc, 0x2b, 0x71, 0xeb, 0xbb, 0xe9, 0xa1, 0x3f, 0x3e, 0x69, 0x4a, 0x0e, 0x0b, 0x35, 0x62,
- 0x4a, 0x3b, 0x08, 0x87, 0x00, 0x1a, 0xd8, 0xa3, 0xf7, 0x5d, 0x6c, 0x79, 0xc1, 0x48, 0xba, 0x49,
- 0x44, 0xe4, 0x2f, 0x4d, 0xb7, 0x3c, 0xcc, 0xa2, 0x73, 0x55, 0x78, 0x01, 0xb7, 0x73, 0x6c, 0x48,
- 0x32, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0x78, 0x14, 0xd1, 0x04, 0x22,
- 0xde, 0x8a, 0x44, 0x2f, 0x7c, 0x11, 0xcc, 0x99, 0xc4, 0xf3, 0xf0, 0x80, 0xd4, 0xab, 0x1c, 0xb8,
- 0x2c, 0x80, 0x73, 0xbd, 0xa0, 0x19, 0x85, 0xfd, 0xad, 0x3f, 0x28, 0x60, 0x31, 0x9a, 0xb9, 0x0b,
- 0xd8, 0x2a, 0x9d, 0xf4, 0x56, 0x79, 0xfe, 0xd4, 0x3c, 0x29, 0xd8, 0x21, 0xef, 0x95, 0x13, 0x3e,
- 0xb3, 0x24, 0x84, 0x3f, 0x03, 0x35, 0x8f, 0x18, 0xa4, 0x4f, 0x6d, 0x57, 0xf8, 0xfc, 0xca, 0x94,
- 0x3e, 0xe3, 0x43, 0x62, 0xec, 0x0b, 0xd3, 0xce, 0x25, 0xe6, 0x74, 0xf8, 0x0b, 0x45, 0x94, 0xf0,
- 0x27, 0xa0, 0x46, 0x89, 0xe9, 0x18, 0x98, 0x12, 0xb1, 0x4d, 0x52, 0xf9, 0xcd, 0xd2, 0x85, 0x91,
- 0xed, 0xd9, 0xda, 0x7d, 0x01, 0xe3, 0x1b, 0x25, 0x9a, 0x87, 0xb0, 0x15, 0x45, 0x34, 0xf0, 0x18,
- 0x2c, 0xf9, 0x8e, 0xc6, 0x90, 0x94, 0x1d, 0xdd, 0x83, 0x91, 0x48, 0x9f, 0x9b, 0xa7, 0x4e, 0xc8,
- 0x41, 0xca, 0xa4, 0xb3, 0x26, 0x06, 0x58, 0x4a, 0xb7, 0xa3, 0x0c, 0x35, 0xdc, 0x04, 0xcb, 0xa6,
- 0x6e, 0x21, 0x82, 0xb5, 0xd1, 0x3e, 0xe9, 0xdb, 0x96, 0xe6, 0xf1, 0x04, 0xaa, 0x76, 0xd6, 0x05,
- 0xc1, 0x72, 0x2f, 0xdd, 0x8d, 0xb2, 0x78, 0xb8, 0x0d, 0x56, 0xc3, 0x73, 0xf6, 0x47, 0xba, 0x47,
- 0x6d, 0x77, 0xb4, 0xad, 0x9b, 0x3a, 0xad, 0xcf, 0x72, 0x9e, 0xfa, 0xf8, 0xa4, 0xb9, 0x8a, 0x24,
- 0xfd, 0x48, 0x6a, 0xd5, 0xfa, 0xed, 0x2c, 0x58, 0xce, 0x9c, 0x06, 0xf0, 0x01, 0x58, 0xeb, 0xfb,
- 0xae, 0x4b, 0x2c, 0xba, 0xe3, 0x9b, 0x87, 0xc4, 0xdd, 0xef, 0x1f, 0x11, 0xcd, 0x37, 0x88, 0xc6,
- 0x57, 0xb4, 0xda, 0x69, 0x08, 0x5f, 0xd7, 0xba, 0x52, 0x14, 0x2a, 0xb0, 0x86, 0x3f, 0x06, 0xd0,
- 0xe2, 0x4d, 0x3d, 0xdd, 0xf3, 0x22, 0xce, 0x12, 0xe7, 0x8c, 0x36, 0xe0, 0x4e, 0x0e, 0x81, 0x24,
- 0x56, 0xcc, 0x47, 0x8d, 0x78, 0xba, 0x4b, 0xb4, 0xac, 0x8f, 0xe5, 0xb4, 0x8f, 0x5b, 0x52, 0x14,
- 0x2a, 0xb0, 0x86, 0xaf, 0x82, 0x85, 0x60, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0x56, 0x04, 0xd9, 0xc2,
- 0x4e, 0xdc, 0x85, 0x92, 0x38, 0x16, 0x9a, 0x7d, 0xe8, 0x11, 0x77, 0x48, 0xb4, 0x37, 0x03, 0x0d,
- 0xc0, 0x0a, 0x65, 0x95, 0x17, 0xca, 0x28, 0xb4, 0xdd, 0x1c, 0x02, 0x49, 0xac, 0x58, 0x68, 0x41,
- 0xd6, 0xe4, 0x42, 0x9b, 0x4d, 0x87, 0x76, 0x20, 0x45, 0xa1, 0x02, 0x6b, 0x96, 0x7b, 0x81, 0xcb,
- 0x9b, 0x43, 0xac, 0x1b, 0xf8, 0xd0, 0x20, 0xf5, 0xb9, 0x74, 0xee, 0xed, 0xa4, 0xbb, 0x51, 0x16,
- 0x0f, 0xdf, 0x04, 0x57, 0x82, 0xa6, 0x03, 0x0b, 0x47, 0x24, 0x35, 0x4e, 0xf2, 0x9c, 0x20, 0xb9,
- 0xb2, 0x93, 0x05, 0xa0, 0xbc, 0x0d, 0x7c, 0x1d, 0x2c, 0xf5, 0x6d, 0xc3, 0xe0, 0xf9, 0xd8, 0xb5,
- 0x7d, 0x8b, 0xd6, 0xe7, 0x39, 0x0b, 0x64, 0x7b, 0xa8, 0x9b, 0xea, 0x41, 0x19, 0x24, 0x7c, 0x08,
- 0x40, 0x3f, 0x2c, 0x07, 0x5e, 0x1d, 0x14, 0x17, 0xfa, 0x7c, 0x1d, 0x8a, 0x0b, 0x70, 0xd4, 0xe4,
- 0xa1, 0x04, 0x5b, 0xeb, 0x3d, 0x05, 0xac, 0x17, 0xec, 0x71, 0xf8, 0x83, 0x54, 0xd5, 0xbb, 0x99,
- 0xa9, 0x7a, 0xd7, 0x0a, 0xcc, 0x12, 0xa5, 0xaf, 0x0f, 0x16, 0x99, 0xee, 0xd0, 0xad, 0x41, 0x00,
- 0x11, 0x27, 0xd8, 0x4b, 0x32, 0xdf, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, 0xcd, 0xc5,
- 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x5d, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, 0x89, 0x75,
- 0x11, 0xaa, 0x65, 0x2b, 0xa5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xfc, 0x29, 0x94, 0x2d, 0xdb, 0x19,
- 0xd9, 0xf2, 0xad, 0x09, 0x3c, 0xa7, 0xeb, 0x96, 0x7f, 0x96, 0xc1, 0x4a, 0x0c, 0x8e, 0x85, 0xcb,
- 0xbd, 0xd4, 0x12, 0xbe, 0x90, 0x59, 0xc2, 0x75, 0x89, 0xc9, 0xa7, 0xa6, 0x5c, 0xde, 0x05, 0x4b,
- 0x4c, 0x57, 0x04, 0xab, 0xc6, 0x55, 0xcb, 0xec, 0x99, 0x55, 0x4b, 0x54, 0x75, 0xb6, 0x53, 0x4c,
- 0x28, 0xc3, 0x5c, 0xa0, 0x92, 0xe6, 0xbe, 0x88, 0x2a, 0xe9, 0x8f, 0x0a, 0x58, 0x8a, 0x97, 0xe9,
- 0x02, 0x64, 0x52, 0x37, 0x2d, 0x93, 0x1a, 0xa7, 0xe7, 0x65, 0x81, 0x4e, 0xfa, 0x47, 0x25, 0xe9,
- 0x35, 0x17, 0x4a, 0x1b, 0xec, 0x83, 0xca, 0x31, 0xf4, 0x3e, 0xf6, 0x44, 0x59, 0xbd, 0x14, 0x7c,
- 0x4c, 0x05, 0x6d, 0x28, 0xea, 0x4d, 0x49, 0xaa, 0xd2, 0xa7, 0x2b, 0xa9, 0xca, 0x9f, 0x8c, 0xa4,
- 0xba, 0x0f, 0x6a, 0x5e, 0x28, 0xa6, 0x2a, 0x9c, 0xf2, 0xc6, 0xa4, 0xed, 0x2c, 0x74, 0x54, 0xc4,
- 0x1a, 0x29, 0xa8, 0x88, 0x49, 0xa6, 0x9d, 0xaa, 0x9f, 0xa5, 0x76, 0x62, 0xe9, 0xed, 0x60, 0xdf,
- 0x23, 0x1a, 0xdf, 0x4a, 0xb5, 0x38, 0xbd, 0xf7, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x00, 0xac, 0x3b,
- 0xae, 0x3d, 0x70, 0x89, 0xe7, 0x6d, 0x11, 0xac, 0x19, 0xba, 0x45, 0xc2, 0x00, 0x82, 0xaa, 0x77,
- 0x6d, 0x7c, 0xd2, 0x5c, 0xdf, 0x93, 0x43, 0x50, 0x91, 0x6d, 0xeb, 0x2f, 0x15, 0x70, 0x39, 0x7b,
- 0x22, 0x16, 0x08, 0x11, 0xe5, 0x5c, 0x42, 0xe4, 0xe5, 0x44, 0x8a, 0x06, 0x2a, 0x2d, 0xf1, 0xcd,
- 0x9f, 0x4b, 0xd3, 0x4d, 0xb0, 0x2c, 0x84, 0x47, 0xd8, 0x29, 0xa4, 0x58, 0xb4, 0x3c, 0x07, 0xe9,
- 0x6e, 0x94, 0xc5, 0xc3, 0x7b, 0x60, 0xd1, 0xe5, 0xda, 0x2a, 0x24, 0x08, 0xf4, 0xc9, 0xd7, 0x04,
- 0xc1, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x96, 0x69, 0x93, 0x58, 0x72, 0x84, 0x04, 0x95, 0xb4, 0x36,
- 0xd9, 0xcc, 0x02, 0x50, 0xde, 0x06, 0xf6, 0xc0, 0x8a, 0x6f, 0xe5, 0xa9, 0x82, 0x5c, 0xbb, 0x26,
- 0xa8, 0x56, 0x0e, 0xf2, 0x10, 0x24, 0xb3, 0x83, 0x3f, 0x4d, 0xc9, 0x95, 0x59, 0x7e, 0x8a, 0xbc,
- 0x70, 0xfa, 0x76, 0x98, 0x5a, 0xaf, 0x48, 0x74, 0x54, 0x6d, 0x5a, 0x1d, 0xd5, 0xfa, 0xb3, 0x02,
- 0x60, 0x7e, 0x0b, 0x4e, 0xfc, 0xb8, 0xcf, 0x59, 0x24, 0x4a, 0xa4, 0x26, 0x57, 0x38, 0x37, 0x27,
- 0x2b, 0x9c, 0xf8, 0x04, 0x9d, 0x4e, 0xe2, 0x88, 0xe9, 0xbd, 0x98, 0x8b, 0x99, 0x29, 0x24, 0x4e,
- 0xec, 0xcf, 0xb3, 0x49, 0x9c, 0x04, 0xcf, 0xe9, 0x12, 0xe7, 0x3f, 0x25, 0xb0, 0x12, 0x83, 0xa7,
- 0x96, 0x38, 0x12, 0x93, 0xaf, 0x2e, 0x67, 0xa6, 0x93, 0x1d, 0xf1, 0xd4, 0x7d, 0x4e, 0x64, 0x47,
- 0xec, 0x50, 0x81, 0xec, 0xf8, 0x7d, 0x29, 0xe9, 0xf5, 0x19, 0x65, 0xc7, 0x27, 0x70, 0x55, 0xf1,
- 0x85, 0x53, 0x2e, 0xad, 0xbf, 0x96, 0xc1, 0xe5, 0xec, 0x16, 0x4c, 0xd5, 0x41, 0x65, 0x62, 0x1d,
- 0xdc, 0x03, 0xab, 0x8f, 0x7c, 0xc3, 0x18, 0xf1, 0x18, 0x12, 0xc5, 0x30, 0xa8, 0xa0, 0x5f, 0x17,
- 0x96, 0xab, 0x3f, 0x94, 0x60, 0x90, 0xd4, 0x32, 0x5f, 0x16, 0x2b, 0xcf, 0x5a, 0x16, 0xab, 0xe7,
- 0x28, 0x8b, 0x72, 0x65, 0x51, 0x3e, 0x97, 0xb2, 0x98, 0xba, 0x26, 0x4a, 0x8e, 0xab, 0x89, 0xdf,
- 0xf0, 0x63, 0x05, 0xac, 0xc9, 0x3f, 0x9f, 0xa1, 0x01, 0x96, 0x4c, 0xfc, 0x38, 0x79, 0x79, 0x31,
- 0xa9, 0x60, 0xf8, 0x54, 0x37, 0xd4, 0xe0, 0x75, 0x47, 0x7d, 0xcb, 0xa2, 0xbb, 0xee, 0x3e, 0x75,
- 0x75, 0x6b, 0x10, 0x14, 0xd8, 0x5e, 0x8a, 0x0b, 0x65, 0xb8, 0xe1, 0x43, 0x50, 0x33, 0xf1, 0xe3,
- 0x7d, 0xdf, 0x1d, 0x84, 0x85, 0xf0, 0xec, 0xe3, 0xf0, 0xdc, 0xef, 0x09, 0x16, 0x14, 0xf1, 0xb5,
- 0x3e, 0x54, 0xc0, 0x7a, 0x41, 0x05, 0xfd, 0x12, 0x45, 0xb9, 0x0b, 0xae, 0xa7, 0x82, 0x64, 0x1b,
- 0x92, 0x3c, 0xf2, 0x0d, 0xbe, 0x37, 0x85, 0x5e, 0xb9, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x12,
- 0xba, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0x9b, 0x12,
- 0x58, 0x48, 0x90, 0x5c, 0x80, 0x76, 0x78, 0x23, 0xa5, 0x1d, 0xa4, 0xaf, 0x31, 0xc9, 0xa8, 0x8a,
- 0xc4, 0x43, 0x2f, 0x23, 0x1e, 0xbe, 0x3d, 0x89, 0xe8, 0x74, 0xf5, 0xf0, 0x51, 0x09, 0xac, 0x26,
- 0xd0, 0xb1, 0x7c, 0xf8, 0x5e, 0x4a, 0x3e, 0x6c, 0x64, 0xe4, 0x43, 0x5d, 0x66, 0xf3, 0x95, 0x7e,
- 0x98, 0xac, 0x1f, 0xfe, 0xa4, 0x80, 0xe5, 0xc4, 0xdc, 0x5d, 0x80, 0x80, 0xd8, 0x4a, 0x0b, 0x88,
- 0xe6, 0x84, 0x7c, 0x29, 0x50, 0x10, 0x4f, 0xaa, 0x29, 0xbf, 0xbf, 0xf4, 0x37, 0x17, 0xbf, 0x04,
- 0xab, 0x43, 0xdb, 0xf0, 0x4d, 0xd2, 0x35, 0xb0, 0x6e, 0x86, 0x00, 0x56, 0x71, 0xd9, 0x24, 0xbe,
- 0x28, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, 0x6c, 0x19, 0xd7, 0xf9, 0x07, 0x12,
- 0x3a, 0x24, 0x1d, 0x04, 0xbe, 0x0a, 0x16, 0x58, 0xa5, 0xd4, 0xfb, 0x64, 0x07, 0x9b, 0x61, 0x4e,
- 0x45, 0x6f, 0x0f, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d,
- 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, 0x77, 0x1a, 0xf3, 0x9d, 0xd7, 0xc2,
- 0xef, 0xd5, 0xbd, 0x3c, 0x84, 0x7d, 0x0f, 0x48, 0x9a, 0xf9, 0x7e, 0x96, 0x51, 0x42, 0x33, 0xf7,
- 0x54, 0x36, 0x97, 0xfb, 0xff, 0x02, 0x59, 0x72, 0x9d, 0xf3, 0xb1, 0xac, 0xe8, 0xb6, 0xa6, 0x76,
- 0xae, 0x97, 0xae, 0x8f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, 0x0c, 0xef, 0x4b, 0x72, 0xaa, 0xae,
- 0x7c, 0x06, 0x55, 0xb7, 0x09, 0x96, 0xc5, 0x23, 0x5b, 0x46, 0x14, 0x46, 0xe2, 0xbc, 0x9b, 0xee,
- 0x46, 0x59, 0xbc, 0xec, 0xbe, 0xa6, 0x7a, 0xc6, 0xfb, 0x9a, 0xa4, 0x17, 0xe2, 0x7f, 0x43, 0x82,
- 0xac, 0xcb, 0x7b, 0x21, 0xfe, 0x45, 0x24, 0x8b, 0x87, 0xdf, 0x0f, 0x53, 0x2a, 0x62, 0x98, 0xe3,
- 0x0c, 0x99, 0x1c, 0x89, 0x08, 0x32, 0xe8, 0x67, 0x7a, 0x48, 0x7a, 0x47, 0xf2, 0x90, 0xb4, 0x31,
- 0x21, 0x95, 0xa7, 0x97, 0xa1, 0x7f, 0x57, 0xc0, 0x73, 0x85, 0x7b, 0x00, 0x6e, 0xa6, 0xea, 0xec,
- 0xad, 0x4c, 0x9d, 0x7d, 0xbe, 0xd0, 0x30, 0x51, 0x6c, 0x4d, 0xf9, 0x65, 0xcb, 0xdd, 0x89, 0x97,
- 0x2d, 0x12, 0x15, 0x35, 0xf9, 0xd6, 0xa5, 0xb3, 0xf1, 0xe4, 0x69, 0x63, 0xe6, 0xfd, 0xa7, 0x8d,
- 0x99, 0x0f, 0x9e, 0x36, 0x66, 0x7e, 0x35, 0x6e, 0x28, 0x4f, 0xc6, 0x0d, 0xe5, 0xfd, 0x71, 0x43,
- 0xf9, 0x60, 0xdc, 0x50, 0xfe, 0x35, 0x6e, 0x28, 0xbf, 0xfb, 0xb0, 0x31, 0xf3, 0xb0, 0x34, 0xbc,
- 0xfd, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xc9, 0x16, 0xd9, 0x6e, 0x26, 0x00, 0x00,
+ 0x61, 0x47, 0x5c, 0x10, 0x12, 0x27, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xc8, 0x0d, 0x45, 0x88, 0xcb,
+ 0x5e, 0x90, 0x22, 0x2e, 0xe4, 0x64, 0xb1, 0x93, 0x13, 0x42, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea,
+ 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0x6d, 0xba, 0xea, 0xf7, 0x7e, 0xf5, 0x5e,
+ 0xd5, 0xab, 0x7a, 0xbf, 0xae, 0x1e, 0x70, 0xef, 0xf8, 0xdb, 0x9e, 0xaa, 0xdb, 0xed, 0x63, 0xff,
+ 0x90, 0xb8, 0x16, 0xa1, 0xc4, 0x6b, 0x0f, 0x89, 0xa5, 0xd9, 0x6e, 0x5b, 0x74, 0x60, 0x47, 0x6f,
+ 0x63, 0xc7, 0xf1, 0xda, 0xc3, 0xdb, 0xed, 0x01, 0xb1, 0x88, 0x8b, 0x29, 0xd1, 0x54, 0xc7, 0xb5,
+ 0xa9, 0x0d, 0x61, 0x80, 0x51, 0xb1, 0xa3, 0xab, 0x0c, 0xa3, 0x0e, 0x6f, 0x5f, 0xbd, 0x35, 0xd0,
+ 0xe9, 0x91, 0x7f, 0xa8, 0xf6, 0x6d, 0xb3, 0x3d, 0xb0, 0x07, 0x76, 0x9b, 0x43, 0x0f, 0xfd, 0x47,
+ 0xfc, 0x89, 0x3f, 0xf0, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0x62, 0x98, 0xbe, 0xed, 0x12, 0xc9, 0x30,
+ 0x57, 0xef, 0xc6, 0x18, 0x13, 0xf7, 0x8f, 0x74, 0x8b, 0xb8, 0xa3, 0xb6, 0x73, 0x3c, 0x60, 0x0d,
+ 0x5e, 0xdb, 0x24, 0x14, 0xcb, 0xac, 0xda, 0x45, 0x56, 0xae, 0x6f, 0x51, 0xdd, 0x24, 0x39, 0x83,
+ 0xd7, 0x26, 0x19, 0x78, 0xfd, 0x23, 0x62, 0xe2, 0x9c, 0xdd, 0x2b, 0x45, 0x76, 0x3e, 0xd5, 0x8d,
+ 0xb6, 0x6e, 0x51, 0x8f, 0xba, 0x59, 0xa3, 0xd6, 0x7f, 0x14, 0x00, 0xbb, 0xb6, 0x45, 0x5d, 0xdb,
+ 0x30, 0x88, 0x8b, 0xc8, 0x50, 0xf7, 0x74, 0xdb, 0x82, 0x3f, 0x05, 0x35, 0x16, 0x8f, 0x86, 0x29,
+ 0xae, 0x2b, 0xd7, 0x95, 0x8d, 0x85, 0x3b, 0xdf, 0x52, 0xe3, 0x49, 0x8e, 0xe8, 0x55, 0xe7, 0x78,
+ 0xc0, 0x1a, 0x3c, 0x95, 0xa1, 0xd5, 0xe1, 0x6d, 0x75, 0xf7, 0xf0, 0x5d, 0xd2, 0xa7, 0x3d, 0x42,
+ 0x71, 0x07, 0x3e, 0x39, 0x69, 0xce, 0x8c, 0x4f, 0x9a, 0x20, 0x6e, 0x43, 0x11, 0x2b, 0xdc, 0x05,
+ 0x15, 0xce, 0x5e, 0xe2, 0xec, 0xb7, 0x0a, 0xd9, 0x45, 0xd0, 0x2a, 0xc2, 0x3f, 0x7b, 0xe3, 0x31,
+ 0x25, 0x16, 0x73, 0xaf, 0x73, 0x49, 0x50, 0x57, 0xb6, 0x30, 0xc5, 0x88, 0x13, 0xc1, 0x97, 0x41,
+ 0xcd, 0x15, 0xee, 0xd7, 0xcb, 0xd7, 0x95, 0x8d, 0x72, 0xe7, 0xb2, 0x40, 0xd5, 0xc2, 0xb0, 0x50,
+ 0x84, 0x68, 0xfd, 0x45, 0x01, 0x6b, 0xf9, 0xb8, 0xb7, 0x75, 0x8f, 0xc2, 0x77, 0x72, 0xb1, 0xab,
+ 0xd3, 0xc5, 0xce, 0xac, 0x79, 0xe4, 0xd1, 0xc0, 0x61, 0x4b, 0x22, 0xee, 0xb7, 0x41, 0x55, 0xa7,
+ 0xc4, 0xf4, 0xea, 0xa5, 0xeb, 0xe5, 0x8d, 0x85, 0x3b, 0x37, 0xd4, 0x7c, 0xee, 0xaa, 0x79, 0xc7,
+ 0x3a, 0x8b, 0x82, 0xb2, 0xfa, 0x16, 0x33, 0x46, 0x01, 0x47, 0xeb, 0xbf, 0x0a, 0x98, 0xdf, 0xc2,
+ 0xc4, 0xb4, 0xad, 0x7d, 0x42, 0x2f, 0x60, 0xd1, 0xba, 0xa0, 0xe2, 0x39, 0xa4, 0x2f, 0x16, 0xed,
+ 0x6b, 0x32, 0xdf, 0x23, 0x77, 0xf6, 0x1d, 0xd2, 0x8f, 0x17, 0x8a, 0x3d, 0x21, 0x6e, 0x0c, 0xdf,
+ 0x06, 0xb3, 0x1e, 0xc5, 0xd4, 0xf7, 0xf8, 0x32, 0x2d, 0xdc, 0xf9, 0xfa, 0xe9, 0x34, 0x1c, 0xda,
+ 0x59, 0x12, 0x44, 0xb3, 0xc1, 0x33, 0x12, 0x14, 0xad, 0x7f, 0x96, 0x00, 0x8c, 0xb0, 0x5d, 0xdb,
+ 0xd2, 0x74, 0xca, 0xf2, 0xf7, 0x75, 0x50, 0xa1, 0x23, 0x87, 0xf0, 0x69, 0x98, 0xef, 0xdc, 0x08,
+ 0xbd, 0xb8, 0x3f, 0x72, 0xc8, 0xc7, 0x27, 0xcd, 0xb5, 0xbc, 0x05, 0xeb, 0x41, 0xdc, 0x06, 0x6e,
+ 0x47, 0xfe, 0x95, 0xb8, 0xf5, 0xdd, 0xf4, 0xd0, 0x1f, 0x9f, 0x34, 0x25, 0x87, 0x85, 0x1a, 0x31,
+ 0xa5, 0x1d, 0x84, 0x43, 0x00, 0x0d, 0xec, 0xd1, 0xfb, 0x2e, 0xb6, 0xbc, 0x60, 0x24, 0xdd, 0x24,
+ 0x22, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x66, 0xd1, 0xb9, 0x2a, 0xbc, 0x80, 0xdb, 0x39, 0x36, 0x24,
+ 0x19, 0x01, 0xde, 0x00, 0xb3, 0x2e, 0xc1, 0x9e, 0x6d, 0xd5, 0x2b, 0x3c, 0x8a, 0x68, 0x02, 0x11,
+ 0x6f, 0x45, 0xa2, 0x17, 0xbe, 0x08, 0xe6, 0x4c, 0xe2, 0x79, 0x78, 0x40, 0xea, 0x55, 0x0e, 0x5c,
+ 0x16, 0xc0, 0xb9, 0x5e, 0xd0, 0x8c, 0xc2, 0xfe, 0xd6, 0xef, 0x15, 0xb0, 0x18, 0xcd, 0xdc, 0x05,
+ 0x6c, 0x95, 0x4e, 0x7a, 0xab, 0x3c, 0x7f, 0x6a, 0x9e, 0x14, 0xec, 0x90, 0xf7, 0xcb, 0x09, 0x9f,
+ 0x59, 0x12, 0xc2, 0x9f, 0x80, 0x9a, 0x47, 0x0c, 0xd2, 0xa7, 0xb6, 0x2b, 0x7c, 0x7e, 0x65, 0x4a,
+ 0x9f, 0xf1, 0x21, 0x31, 0xf6, 0x85, 0x69, 0xe7, 0x12, 0x73, 0x3a, 0x7c, 0x42, 0x11, 0x25, 0xfc,
+ 0x11, 0xa8, 0x51, 0x62, 0x3a, 0x06, 0xa6, 0x44, 0x6c, 0x93, 0x54, 0x7e, 0xb3, 0x74, 0x61, 0x64,
+ 0x7b, 0xb6, 0x76, 0x5f, 0xc0, 0xf8, 0x46, 0x89, 0xe6, 0x21, 0x6c, 0x45, 0x11, 0x0d, 0x3c, 0x06,
+ 0x4b, 0xbe, 0xa3, 0x31, 0x24, 0x65, 0x47, 0xf7, 0x60, 0x24, 0xd2, 0xe7, 0xe6, 0xa9, 0x13, 0x72,
+ 0x90, 0x32, 0xe9, 0xac, 0x89, 0x01, 0x96, 0xd2, 0xed, 0x28, 0x43, 0x0d, 0x37, 0xc1, 0xb2, 0xa9,
+ 0x5b, 0x88, 0x60, 0x6d, 0xb4, 0x4f, 0xfa, 0xb6, 0xa5, 0x79, 0x3c, 0x81, 0xaa, 0x9d, 0x75, 0x41,
+ 0xb0, 0xdc, 0x4b, 0x77, 0xa3, 0x2c, 0x1e, 0x6e, 0x83, 0xd5, 0xf0, 0x9c, 0xfd, 0x81, 0xee, 0x51,
+ 0xdb, 0x1d, 0x6d, 0xeb, 0xa6, 0x4e, 0xeb, 0xb3, 0x9c, 0xa7, 0x3e, 0x3e, 0x69, 0xae, 0x22, 0x49,
+ 0x3f, 0x92, 0x5a, 0xb5, 0x7e, 0x33, 0x0b, 0x96, 0x33, 0xa7, 0x01, 0x7c, 0x00, 0xd6, 0xfa, 0xbe,
+ 0xeb, 0x12, 0x8b, 0xee, 0xf8, 0xe6, 0x21, 0x71, 0xf7, 0xfb, 0x47, 0x44, 0xf3, 0x0d, 0xa2, 0xf1,
+ 0x15, 0xad, 0x76, 0x1a, 0xc2, 0xd7, 0xb5, 0xae, 0x14, 0x85, 0x0a, 0xac, 0xe1, 0x0f, 0x01, 0xb4,
+ 0x78, 0x53, 0x4f, 0xf7, 0xbc, 0x88, 0xb3, 0xc4, 0x39, 0xa3, 0x0d, 0xb8, 0x93, 0x43, 0x20, 0x89,
+ 0x15, 0xf3, 0x51, 0x23, 0x9e, 0xee, 0x12, 0x2d, 0xeb, 0x63, 0x39, 0xed, 0xe3, 0x96, 0x14, 0x85,
+ 0x0a, 0xac, 0xe1, 0xab, 0x60, 0x21, 0x18, 0x8d, 0xcf, 0xb9, 0x58, 0x9c, 0x15, 0x41, 0xb6, 0xb0,
+ 0x13, 0x77, 0xa1, 0x24, 0x8e, 0x85, 0x66, 0x1f, 0x7a, 0xc4, 0x1d, 0x12, 0xed, 0xcd, 0x40, 0x03,
+ 0xb0, 0x42, 0x59, 0xe5, 0x85, 0x32, 0x0a, 0x6d, 0x37, 0x87, 0x40, 0x12, 0x2b, 0x16, 0x5a, 0x90,
+ 0x35, 0xb9, 0xd0, 0x66, 0xd3, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0xc0, 0x9a, 0xe5, 0x5e, 0xe0, 0xf2,
+ 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x9d, 0x7b, 0x3b, 0xe9, 0x6e, 0x94, 0xc5,
+ 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, 0x27, 0x48, 0xae,
+ 0xec, 0x64, 0x01, 0x28, 0x6f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, 0x3e, 0x76, 0x6d,
+ 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, 0x09, 0x1f, 0x02,
+ 0xd0, 0x0f, 0xcb, 0x81, 0x57, 0x07, 0xc5, 0x85, 0x3e, 0x5f, 0x87, 0xe2, 0x02, 0x1c, 0x35, 0x79,
+ 0x28, 0xc1, 0xd6, 0x7a, 0x5f, 0x01, 0xeb, 0x05, 0x7b, 0x1c, 0x7e, 0x2f, 0x55, 0xf5, 0x6e, 0x66,
+ 0xaa, 0xde, 0xb5, 0x02, 0xb3, 0x44, 0xe9, 0xeb, 0x83, 0x45, 0xa6, 0x3b, 0x74, 0x6b, 0x10, 0x40,
+ 0xc4, 0x09, 0xf6, 0x92, 0xcc, 0x77, 0x94, 0x04, 0xc6, 0xc7, 0xf0, 0x95, 0xf1, 0x49, 0x73, 0x31,
+ 0xd5, 0x87, 0xd2, 0x9c, 0xad, 0x5f, 0x96, 0x00, 0xd8, 0x22, 0x8e, 0x61, 0x8f, 0x4c, 0x62, 0x5d,
+ 0x84, 0x6a, 0xd9, 0x4a, 0xa9, 0x96, 0x96, 0x74, 0x21, 0x22, 0x7f, 0x0a, 0x65, 0xcb, 0x76, 0x46,
+ 0xb6, 0x7c, 0x63, 0x02, 0xcf, 0xe9, 0xba, 0xe5, 0xef, 0x65, 0xb0, 0x12, 0x83, 0x63, 0xe1, 0x72,
+ 0x2f, 0xb5, 0x84, 0x2f, 0x64, 0x96, 0x70, 0x5d, 0x62, 0xf2, 0xa9, 0x29, 0x97, 0x77, 0xc1, 0x12,
+ 0xd3, 0x15, 0xc1, 0xaa, 0x71, 0xd5, 0x32, 0x7b, 0x66, 0xd5, 0x12, 0x55, 0x9d, 0xed, 0x14, 0x13,
+ 0xca, 0x30, 0x17, 0xa8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4a, 0xfa, 0x83, 0x02, 0x96, 0xe2, 0x65, 0xba,
+ 0x00, 0x99, 0xd4, 0x4d, 0xcb, 0xa4, 0xc6, 0xe9, 0x79, 0x59, 0xa0, 0x93, 0xfe, 0x56, 0x49, 0x7a,
+ 0xcd, 0x85, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x51, 0x56, 0x2f, 0x05, 0x2f,
+ 0x53, 0x41, 0x1b, 0x8a, 0x7a, 0x53, 0x92, 0xaa, 0xf4, 0xe9, 0x4a, 0xaa, 0xf2, 0x27, 0x23, 0xa9,
+ 0xee, 0x83, 0x9a, 0x17, 0x8a, 0xa9, 0x0a, 0xa7, 0xbc, 0x31, 0x69, 0x3b, 0x0b, 0x1d, 0x15, 0xb1,
+ 0x46, 0x0a, 0x2a, 0x62, 0x92, 0x69, 0xa7, 0xea, 0x67, 0xa9, 0x9d, 0x58, 0x7a, 0x3b, 0xd8, 0xf7,
+ 0x88, 0xc6, 0xb7, 0x52, 0x2d, 0x4e, 0xef, 0x3d, 0xde, 0x8a, 0x44, 0x2f, 0x3c, 0x00, 0xeb, 0x8e,
+ 0x6b, 0x0f, 0x5c, 0xe2, 0x79, 0x5b, 0x04, 0x6b, 0x86, 0x6e, 0x91, 0x30, 0x80, 0xa0, 0xea, 0x5d,
+ 0x1b, 0x9f, 0x34, 0xd7, 0xf7, 0xe4, 0x10, 0x54, 0x64, 0xdb, 0xfa, 0x53, 0x05, 0x5c, 0xce, 0x9e,
+ 0x88, 0x05, 0x42, 0x44, 0x39, 0x97, 0x10, 0x79, 0x39, 0x91, 0xa2, 0x81, 0x4a, 0x4b, 0xbc, 0xf3,
+ 0xe7, 0xd2, 0x74, 0x13, 0x2c, 0x0b, 0xe1, 0x11, 0x76, 0x0a, 0x29, 0x16, 0x2d, 0xcf, 0x41, 0xba,
+ 0x1b, 0x65, 0xf1, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xb6, 0x0a, 0x09, 0x02, 0x7d, 0xf2, 0x15, 0x41,
+ 0xb0, 0x88, 0x92, 0x9d, 0x28, 0x8d, 0x65, 0xda, 0x24, 0x96, 0x1c, 0x21, 0x41, 0x25, 0xad, 0x4d,
+ 0x36, 0xb3, 0x00, 0x94, 0xb7, 0x81, 0x3d, 0xb0, 0xe2, 0x5b, 0x79, 0xaa, 0x20, 0xd7, 0xae, 0x09,
+ 0xaa, 0x95, 0x83, 0x3c, 0x04, 0xc9, 0xec, 0xe0, 0x8f, 0x53, 0x72, 0x65, 0x96, 0x9f, 0x22, 0x2f,
+ 0x9c, 0xbe, 0x1d, 0xa6, 0xd6, 0x2b, 0x12, 0x1d, 0x55, 0x9b, 0x56, 0x47, 0xb5, 0xde, 0x53, 0x00,
+ 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0x39, 0x8b, 0x44, 0x89, 0xd4, 0xe4, 0x0a, 0xe7, 0xe6, 0x64,
+ 0x85, 0x13, 0x9f, 0xa0, 0xd3, 0x49, 0x1c, 0x31, 0xbd, 0x17, 0x73, 0x31, 0x33, 0x85, 0xc4, 0x89,
+ 0xfd, 0x79, 0x36, 0x89, 0x93, 0xe0, 0x39, 0x5d, 0xe2, 0xfc, 0xab, 0x04, 0x56, 0x62, 0xf0, 0xd4,
+ 0x12, 0x47, 0x62, 0xf2, 0xe5, 0xe5, 0xcc, 0x74, 0xb2, 0x23, 0x9e, 0xba, 0xff, 0x13, 0xd9, 0x11,
+ 0x3b, 0x54, 0x20, 0x3b, 0x7e, 0x57, 0x4a, 0x7a, 0x7d, 0x46, 0xd9, 0xf1, 0x09, 0x5c, 0x55, 0x7c,
+ 0xee, 0x94, 0x4b, 0xeb, 0xcf, 0x65, 0x70, 0x39, 0xbb, 0x05, 0x53, 0x75, 0x50, 0x99, 0x58, 0x07,
+ 0xf7, 0xc0, 0xea, 0x23, 0xdf, 0x30, 0x46, 0x3c, 0x86, 0x44, 0x31, 0x0c, 0x2a, 0xe8, 0x57, 0x85,
+ 0xe5, 0xea, 0xf7, 0x25, 0x18, 0x24, 0xb5, 0xcc, 0x97, 0xc5, 0xca, 0xb3, 0x96, 0xc5, 0xea, 0x39,
+ 0xca, 0xa2, 0x5c, 0x59, 0x94, 0xcf, 0xa5, 0x2c, 0xa6, 0xae, 0x89, 0x92, 0xe3, 0x6a, 0xe2, 0x3b,
+ 0xfc, 0x58, 0x01, 0x6b, 0xf2, 0xd7, 0x67, 0x68, 0x80, 0x25, 0x13, 0x3f, 0x4e, 0x5e, 0x5e, 0x4c,
+ 0x2a, 0x18, 0x3e, 0xd5, 0x0d, 0x35, 0xf8, 0xba, 0xa3, 0xbe, 0x65, 0xd1, 0x5d, 0x77, 0x9f, 0xba,
+ 0xba, 0x35, 0x08, 0x0a, 0x6c, 0x2f, 0xc5, 0x85, 0x32, 0xdc, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1,
+ 0xbe, 0xef, 0x0e, 0xc2, 0x42, 0x78, 0xf6, 0x71, 0x78, 0xee, 0xf7, 0x04, 0x0b, 0x8a, 0xf8, 0x5a,
+ 0x1f, 0x29, 0x60, 0xbd, 0xa0, 0x82, 0x7e, 0x81, 0xa2, 0xdc, 0x05, 0xd7, 0x53, 0x41, 0xb2, 0x0d,
+ 0x49, 0x1e, 0xf9, 0x06, 0xdf, 0x9b, 0x42, 0xaf, 0xdc, 0x04, 0xf3, 0x0e, 0x76, 0xa9, 0x1e, 0x09,
+ 0xdd, 0x6a, 0x67, 0x71, 0x7c, 0xd2, 0x9c, 0xdf, 0x0b, 0x1b, 0x51, 0xdc, 0xdf, 0xfa, 0x55, 0x09,
+ 0x2c, 0x24, 0x48, 0x2e, 0x40, 0x3b, 0xbc, 0x91, 0xd2, 0x0e, 0xd2, 0xaf, 0x31, 0xc9, 0xa8, 0x8a,
+ 0xc4, 0x43, 0x2f, 0x23, 0x1e, 0xbe, 0x39, 0x89, 0xe8, 0x74, 0xf5, 0xf0, 0xef, 0x12, 0x58, 0x4d,
+ 0xa0, 0x63, 0xf9, 0xf0, 0x9d, 0x94, 0x7c, 0xd8, 0xc8, 0xc8, 0x87, 0xba, 0xcc, 0xe6, 0x4b, 0xfd,
+ 0x30, 0x59, 0x3f, 0xfc, 0x51, 0x01, 0xcb, 0x89, 0xb9, 0xbb, 0x00, 0x01, 0xb1, 0x95, 0x16, 0x10,
+ 0xcd, 0x09, 0xf9, 0x52, 0xa0, 0x20, 0x7e, 0x3d, 0x9b, 0xf2, 0xfb, 0x0b, 0x7f, 0x73, 0xf1, 0x73,
+ 0xb0, 0x3a, 0xb4, 0x0d, 0xdf, 0x24, 0x5d, 0x03, 0xeb, 0x66, 0x08, 0x60, 0x15, 0x97, 0x4d, 0xe2,
+ 0x8b, 0x52, 0x7a, 0xe2, 0x7a, 0xba, 0x47, 0x89, 0x45, 0x1f, 0xc4, 0x96, 0x71, 0x9d, 0x7f, 0x20,
+ 0xa1, 0x43, 0xd2, 0x41, 0xe0, 0xab, 0x60, 0x81, 0x55, 0x4a, 0xbd, 0x4f, 0x76, 0xb0, 0x19, 0xe6,
+ 0x54, 0xf4, 0xed, 0x61, 0x3f, 0xee, 0x42, 0x49, 0x1c, 0x3c, 0x02, 0x2b, 0x8e, 0xad, 0xf5, 0xb0,
+ 0x85, 0x07, 0x84, 0x9d, 0xff, 0x7b, 0xb6, 0xa1, 0xf7, 0x47, 0xfc, 0x4e, 0x63, 0xbe, 0xf3, 0x5a,
+ 0xf8, 0xbe, 0xba, 0x97, 0x87, 0xb0, 0xf7, 0x01, 0x49, 0x33, 0xdf, 0xcf, 0x32, 0x4a, 0x68, 0xe6,
+ 0x3e, 0x95, 0xcd, 0xe5, 0xfe, 0x5f, 0x20, 0x4b, 0xae, 0x73, 0x7e, 0x2c, 0x2b, 0xba, 0xad, 0xa9,
+ 0x9d, 0xeb, 0xb6, 0x46, 0xa2, 0x67, 0xe7, 0xcf, 0xa6, 0x67, 0x5b, 0xef, 0x55, 0xc1, 0x95, 0xdc,
+ 0x19, 0xfb, 0x19, 0x5e, 0xb9, 0xe4, 0x84, 0x61, 0xf9, 0x0c, 0xc2, 0x70, 0x13, 0x2c, 0x8b, 0xef,
+ 0x74, 0x19, 0x5d, 0x19, 0xcd, 0x47, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x95, 0x4f, 0xf5, 0x8c,
+ 0x57, 0x3e, 0x49, 0x2f, 0xc4, 0xdf, 0x4b, 0x82, 0xc4, 0xcd, 0x7b, 0x21, 0xfe, 0x65, 0x92, 0xc5,
+ 0xc3, 0xef, 0x86, 0x59, 0x19, 0x31, 0xcc, 0x71, 0x86, 0x4c, 0x9a, 0x45, 0x04, 0x19, 0xf4, 0x33,
+ 0x7d, 0x8b, 0x7a, 0x47, 0xf2, 0x2d, 0x6a, 0x63, 0xc2, 0x6e, 0x98, 0xfe, 0x76, 0x47, 0xaa, 0xdd,
+ 0x17, 0xce, 0xae, 0xdd, 0x5b, 0x7f, 0x55, 0xc0, 0x73, 0x85, 0xfb, 0x11, 0x6e, 0xa6, 0x6a, 0xfe,
+ 0xad, 0x4c, 0xcd, 0x7f, 0xbe, 0xd0, 0x30, 0x51, 0xf8, 0x4d, 0xf9, 0xc5, 0xcf, 0xdd, 0x89, 0x17,
+ 0x3f, 0x12, 0x45, 0x37, 0xf9, 0x06, 0xa8, 0xb3, 0xf1, 0xe4, 0x69, 0x63, 0xe6, 0x83, 0xa7, 0x8d,
+ 0x99, 0x0f, 0x9f, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, 0xc6, 0x0d, 0xe5, 0x83, 0x71, 0x43,
+ 0xf9, 0x70, 0xdc, 0x50, 0xfe, 0x31, 0x6e, 0x28, 0xbf, 0xfd, 0xa8, 0x31, 0xf3, 0xb0, 0x34, 0xbc,
+ 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x69, 0x8a, 0x39, 0xfa, 0x26, 0x00, 0x00,
}
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
@@ -2310,6 +2311,9 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x48
if m.RevisionHistoryLimit != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
i--
@@ -2399,6 +2403,9 @@ func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x58
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -2978,6 +2985,7 @@ func (m *StatefulSetSpec) Size() (n int) {
if m.RevisionHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
}
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
return n
}
@@ -3005,6 +3013,7 @@ func (m *StatefulSetStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
return n
}
@@ -3408,6 +3417,7 @@ func (this *StatefulSetSpec) String() string {
`PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`,
`UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
`RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
`}`,
}, "")
return s
@@ -3431,6 +3441,7 @@ func (this *StatefulSetStatus) String() string {
`UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
`}`,
}, "")
return s
@@ -7719,6 +7730,25 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error {
}
}
m.RevisionHistoryLimit = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -7982,6 +8012,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto
index d4689d88b..6e593a379 100644
--- a/vendor/k8s.io/api/apps/v1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1/generated.proto
@@ -219,7 +219,8 @@ message DaemonSetUpdateStrategy {
// Deployment enables declarative updates for Pods and ReplicaSets.
message Deployment {
- // Standard object metadata.
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -366,7 +367,8 @@ message DeploymentStrategy {
message ReplicaSet {
// If the Labels of a ReplicaSet are empty, they are defaulted to
// be the same as the Pod(s) that the ReplicaSet manages.
- // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -479,7 +481,7 @@ message RollingUpdateDaemonSet {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
- // number is calculated from percentage by rounding down to a minimum of one.
+ // number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
@@ -511,7 +513,7 @@ message RollingUpdateDaemonSet {
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
// cause evictions during disruption.
- // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.
+ // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
// +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
}
@@ -562,6 +564,8 @@ message RollingUpdateStatefulSetStrategy {
// The StatefulSet guarantees that a given network identity will always
// map to the same storage identity.
message StatefulSet {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -598,9 +602,12 @@ message StatefulSetCondition {
// StatefulSetList is a collection of StatefulSets.
message StatefulSetList {
+ // Standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+ // Items is the list of stateful sets.
repeated StatefulSet items = 2;
}
@@ -663,6 +670,13 @@ message StatefulSetSpec {
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
optional int32 revisionHistoryLimit = 8;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // +optional
+ optional int32 minReadySeconds = 9;
}
// StatefulSetStatus represents the current state of a StatefulSet.
@@ -705,6 +719,12 @@ message StatefulSetStatus {
// +patchMergeKey=type
// +patchStrategy=merge
repeated StatefulSetCondition conditions = 10;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // Remove omitempty when graduating to beta
+ // +optional
+ optional int32 availableReplicas = 11;
}
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go
index 48299f18e..51c9fa43d 100644
--- a/vendor/k8s.io/api/apps/v1/types.go
+++ b/vendor/k8s.io/api/apps/v1/types.go
@@ -34,6 +34,7 @@ const (
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// StatefulSet represents a set of pods with consistent identities.
@@ -44,6 +45,8 @@ const (
// map to the same storage identity.
type StatefulSet struct {
metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -172,6 +175,13 @@ type StatefulSetSpec struct {
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
}
// StatefulSetStatus represents the current state of a StatefulSet.
@@ -214,6 +224,12 @@ type StatefulSetStatus struct {
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // Remove omitempty when graduating to beta
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,11,opt,name=availableReplicas"`
}
type StatefulSetConditionType string
@@ -240,20 +256,26 @@ type StatefulSetCondition struct {
// StatefulSetList is a collection of StatefulSets.
type StatefulSetList struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
+
+ // Items is the list of stateful sets.
+ Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Deployment enables declarative updates for Pods and ReplicaSets.
type Deployment struct {
metav1.TypeMeta `json:",inline"`
- // Standard object metadata.
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -488,7 +510,7 @@ type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
- // number is calculated from percentage by rounding down to a minimum of one.
+ // number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
@@ -520,7 +542,7 @@ type RollingUpdateDaemonSet struct {
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
// cause evictions during disruption.
- // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.
+ // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
// +optional
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
}
@@ -682,6 +704,7 @@ type DaemonSetList struct {
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
+// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
@@ -690,7 +713,8 @@ type ReplicaSet struct {
// If the Labels of a ReplicaSet are empty, they are defaulted to
// be the same as the Pod(s) that the ReplicaSet manages.
- // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
index b9783ad20..39c733adb 100644
--- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
@@ -125,7 +125,7 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string {
var map_Deployment = map[string]string{
"": "Deployment enables declarative updates for Pods and ReplicaSets.",
- "metadata": "Standard object metadata.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Specification of the desired behavior of the Deployment.",
"status": "Most recently observed status of the Deployment.",
}
@@ -262,8 +262,8 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
- "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
- "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
+ "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+ "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
@@ -290,9 +290,10 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
}
var map_StatefulSet = map[string]string{
- "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
- "spec": "Spec defines the desired identities of pods in this set.",
- "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
+ "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec defines the desired identities of pods in this set.",
+ "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.",
}
func (StatefulSet) SwaggerDoc() map[string]string {
@@ -313,7 +314,9 @@ func (StatefulSetCondition) SwaggerDoc() map[string]string {
}
var map_StatefulSetList = map[string]string{
- "": "StatefulSetList is a collection of StatefulSets.",
+ "": "StatefulSetList is a collection of StatefulSets.",
+ "metadata": "Standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "Items is the list of stateful sets.",
}
func (StatefulSetList) SwaggerDoc() map[string]string {
@@ -330,6 +333,7 @@ var map_StatefulSetSpec = map[string]string{
"podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.",
}
func (StatefulSetSpec) SwaggerDoc() map[string]string {
@@ -347,6 +351,7 @@ var map_StatefulSetStatus = map[string]string{
"updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
"collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
"conditions": "Represents the latest available observations of a statefulset's current state.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta",
}
func (StatefulSetStatus) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
index 6bc56e382..79e39e582 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go
@@ -668,123 +668,124 @@ func init() {
}
var fileDescriptor_2a07313e8f66e805 = []byte{
- // 1855 bytes of a gzipped FileDescriptorProto
+ // 1869 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47,
- 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35,
+ 0x15, 0x77, 0x8f, 0x3d, 0xf6, 0xcc, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35,
0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91,
0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab,
- 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82,
- 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f,
- 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf,
- 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf,
- 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f,
- 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36,
- 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f,
- 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97,
- 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a,
- 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0,
- 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0,
- 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3,
- 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36,
- 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46,
- 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23,
- 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46,
- 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8,
- 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c,
- 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34,
- 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70,
- 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae,
- 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67,
- 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb,
- 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8,
- 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9,
- 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0,
- 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41,
- 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf,
- 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57,
- 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42,
- 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d,
- 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd,
- 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed,
- 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee,
- 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30,
- 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2,
- 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20,
- 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce,
- 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18,
- 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14,
- 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0,
- 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2,
- 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96,
- 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0,
- 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe,
- 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94,
- 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c,
- 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4,
- 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a,
- 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6,
- 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e,
- 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e,
- 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b,
- 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26,
- 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f,
- 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2,
- 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba,
- 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87,
- 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2,
- 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6,
- 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a,
- 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e,
- 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58,
- 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17,
- 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e,
- 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0,
- 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a,
- 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec,
- 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a,
- 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98,
- 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2,
- 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62,
- 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53,
- 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad,
- 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03,
- 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f,
- 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b,
- 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7,
- 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d,
- 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6,
- 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41,
- 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd,
- 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49,
- 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31,
- 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3,
- 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1,
- 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e,
- 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c,
- 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13,
- 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91,
- 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81,
- 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9,
- 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8,
- 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f,
- 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0,
- 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08,
- 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8,
- 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3,
- 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f,
- 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f,
- 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f,
- 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58,
- 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f,
- 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4,
- 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14,
- 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed,
- 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d,
- 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a,
- 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed,
- 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b,
- 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5,
- 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00,
+ 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x4a, 0xce, 0xfc, 0x15, 0xdc, 0x40, 0x70, 0xe3, 0xb4, 0xc7,
+ 0x88, 0x0b, 0x39, 0x59, 0xec, 0xe4, 0x0a, 0x47, 0x2e, 0x2b, 0x21, 0xa1, 0xaa, 0xae, 0xfe, 0xee,
+ 0xb6, 0xdb, 0x48, 0x6b, 0x09, 0x6e, 0xd3, 0xf5, 0xde, 0xfb, 0xbd, 0x57, 0x55, 0xef, 0xbd, 0x7a,
+ 0xbf, 0x81, 0x1f, 0x9d, 0xbd, 0xe7, 0xa9, 0xba, 0xdd, 0x3b, 0xf3, 0x07, 0xd4, 0xb5, 0x28, 0xa3,
+ 0x5e, 0x6f, 0x4c, 0xad, 0xa1, 0xed, 0xf6, 0xa4, 0x80, 0x38, 0x7a, 0x8f, 0x38, 0x8e, 0xd7, 0x1b,
+ 0xdf, 0x1e, 0x50, 0x46, 0x6e, 0xf7, 0x46, 0xd4, 0xa2, 0x2e, 0x61, 0x74, 0xa8, 0x3a, 0xae, 0xcd,
+ 0x6c, 0xb4, 0x16, 0x28, 0xaa, 0xc4, 0xd1, 0x55, 0xae, 0xa8, 0x4a, 0xc5, 0xf5, 0xed, 0x91, 0xce,
+ 0x1e, 0xf9, 0x03, 0x55, 0xb3, 0xcd, 0xde, 0xc8, 0x1e, 0xd9, 0x3d, 0xa1, 0x3f, 0xf0, 0x1f, 0x8a,
+ 0x2f, 0xf1, 0x21, 0x7e, 0x05, 0x38, 0xeb, 0xdd, 0x84, 0x43, 0xcd, 0x76, 0x69, 0x6f, 0x9c, 0xf3,
+ 0xb5, 0xfe, 0x4e, 0xac, 0x63, 0x12, 0xed, 0x91, 0x6e, 0x51, 0x77, 0xd2, 0x73, 0xce, 0x46, 0x7c,
+ 0xc1, 0xeb, 0x99, 0x94, 0x91, 0x22, 0xab, 0x5e, 0x99, 0x95, 0xeb, 0x5b, 0x4c, 0x37, 0x69, 0xce,
+ 0xe0, 0xdd, 0xcb, 0x0c, 0x3c, 0xed, 0x11, 0x35, 0x49, 0xce, 0xee, 0xed, 0x32, 0x3b, 0x9f, 0xe9,
+ 0x46, 0x4f, 0xb7, 0x98, 0xc7, 0xdc, 0xac, 0x51, 0xf7, 0x5f, 0x0a, 0xa0, 0x3d, 0xdb, 0x62, 0xae,
+ 0x6d, 0x18, 0xd4, 0xc5, 0x74, 0xac, 0x7b, 0xba, 0x6d, 0xa1, 0xcf, 0xa1, 0xc1, 0xf7, 0x33, 0x24,
+ 0x8c, 0xb4, 0x95, 0x4d, 0x65, 0x6b, 0x71, 0xe7, 0x2d, 0x35, 0x3e, 0xe9, 0x08, 0x5e, 0x75, 0xce,
+ 0x46, 0x7c, 0xc1, 0x53, 0xb9, 0xb6, 0x3a, 0xbe, 0xad, 0x1e, 0x0e, 0xbe, 0xa0, 0x1a, 0x3b, 0xa0,
+ 0x8c, 0xf4, 0xd1, 0x93, 0xf3, 0x8d, 0x99, 0xe9, 0xf9, 0x06, 0xc4, 0x6b, 0x38, 0x42, 0x45, 0x87,
+ 0x30, 0x27, 0xd0, 0x6b, 0x02, 0x7d, 0xbb, 0x14, 0x5d, 0x6e, 0x5a, 0xc5, 0xe4, 0x57, 0xf7, 0x1f,
+ 0x33, 0x6a, 0xf1, 0xf0, 0xfa, 0x2f, 0x48, 0xe8, 0xb9, 0x7b, 0x84, 0x11, 0x2c, 0x80, 0xd0, 0x9b,
+ 0xd0, 0x70, 0x65, 0xf8, 0xed, 0xd9, 0x4d, 0x65, 0x6b, 0xb6, 0x7f, 0x43, 0x6a, 0x35, 0xc2, 0x6d,
+ 0xe1, 0x48, 0xa3, 0xfb, 0x44, 0x81, 0x5b, 0xf9, 0x7d, 0xef, 0xeb, 0x1e, 0x43, 0xbf, 0xc8, 0xed,
+ 0x5d, 0xad, 0xb6, 0x77, 0x6e, 0x2d, 0x76, 0x1e, 0x39, 0x0e, 0x57, 0x12, 0xfb, 0x3e, 0x82, 0xba,
+ 0xce, 0xa8, 0xe9, 0xb5, 0x6b, 0x9b, 0xb3, 0x5b, 0x8b, 0x3b, 0x6f, 0xa8, 0x25, 0x09, 0xac, 0xe6,
+ 0xa3, 0xeb, 0x2f, 0x49, 0xdc, 0xfa, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0x6f, 0x6b, 0x00, 0xf7,
+ 0xa8, 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0xae, 0xee, 0x01, 0xcc, 0x79, 0x0e, 0xd5, 0xe4,
+ 0xd5, 0xbd, 0x5a, 0xba, 0x83, 0x38, 0xa8, 0x63, 0x87, 0x6a, 0xf1, 0xa5, 0xf1, 0x2f, 0x2c, 0x20,
+ 0xd0, 0x27, 0x30, 0xef, 0x31, 0xc2, 0x7c, 0x4f, 0x5c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84,
+ 0x41, 0xbf, 0x25, 0xe1, 0xe6, 0x83, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9b, 0x85, 0x95, 0x58, 0x79,
+ 0xcf, 0xb6, 0x86, 0x3a, 0xe3, 0x29, 0x7d, 0x17, 0xe6, 0xd8, 0xc4, 0xa1, 0xe2, 0x4c, 0x9a, 0xfd,
+ 0x57, 0xc3, 0x60, 0x4e, 0x26, 0x0e, 0x7d, 0x76, 0xbe, 0xb1, 0x56, 0x60, 0xc2, 0x45, 0x58, 0x18,
+ 0xa1, 0xfd, 0x28, 0xce, 0x9a, 0x30, 0x7f, 0x27, 0xed, 0xfc, 0xd9, 0xf9, 0x46, 0x41, 0x03, 0x51,
+ 0x23, 0xa4, 0x74, 0x88, 0xe8, 0x0b, 0x68, 0x19, 0xc4, 0x63, 0xa7, 0xce, 0x90, 0x30, 0x7a, 0xa2,
+ 0x9b, 0xb4, 0x3d, 0x2f, 0x76, 0xff, 0x7a, 0xb5, 0x8b, 0xe2, 0x16, 0xfd, 0x5b, 0x32, 0x82, 0xd6,
+ 0x7e, 0x0a, 0x09, 0x67, 0x90, 0xd1, 0x18, 0x10, 0x5f, 0x39, 0x71, 0x89, 0xe5, 0x05, 0xbb, 0xe2,
+ 0xfe, 0x16, 0xae, 0xec, 0x6f, 0x5d, 0xfa, 0x43, 0xfb, 0x39, 0x34, 0x5c, 0xe0, 0x01, 0xbd, 0x02,
+ 0xf3, 0x2e, 0x25, 0x9e, 0x6d, 0xb5, 0xe7, 0xc4, 0x89, 0x45, 0xd7, 0x85, 0xc5, 0x2a, 0x96, 0x52,
+ 0xf4, 0x1a, 0x2c, 0x98, 0xd4, 0xf3, 0xc8, 0x88, 0xb6, 0xeb, 0x42, 0x71, 0x59, 0x2a, 0x2e, 0x1c,
+ 0x04, 0xcb, 0x38, 0x94, 0x77, 0xff, 0xa8, 0x40, 0x2b, 0xbe, 0xa6, 0x6b, 0xa8, 0xd5, 0x8f, 0xd2,
+ 0xb5, 0xfa, 0xfd, 0x0a, 0xc9, 0x59, 0x52, 0xa3, 0xff, 0xa8, 0x01, 0x8a, 0x95, 0xb0, 0x6d, 0x18,
+ 0x03, 0xa2, 0x9d, 0xa1, 0x4d, 0x98, 0xb3, 0x88, 0x19, 0xe6, 0x64, 0x54, 0x20, 0x3f, 0x21, 0x26,
+ 0xc5, 0x42, 0x82, 0xbe, 0x54, 0x00, 0xf9, 0xe2, 0x36, 0x87, 0xbb, 0x96, 0x65, 0x33, 0xc2, 0x0f,
+ 0x38, 0x0c, 0x68, 0xaf, 0x42, 0x40, 0xa1, 0x2f, 0xf5, 0x34, 0x87, 0x72, 0xdf, 0x62, 0xee, 0x24,
+ 0xbe, 0xd8, 0xbc, 0x02, 0x2e, 0x70, 0x8d, 0x7e, 0x0e, 0xe0, 0x4a, 0xcc, 0x13, 0x5b, 0x96, 0x6d,
+ 0x79, 0x0f, 0x08, 0xdd, 0xef, 0xd9, 0xd6, 0x43, 0x7d, 0x14, 0x37, 0x16, 0x1c, 0x41, 0xe0, 0x04,
+ 0xdc, 0xfa, 0x7d, 0x58, 0x2b, 0x89, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0x8e, 0x0a, 0xf3,
+ 0x9f, 0x68, 0x15, 0xea, 0x63, 0x62, 0xf8, 0x34, 0xa8, 0x49, 0x1c, 0x7c, 0xdc, 0xa9, 0xbd, 0xa7,
+ 0x74, 0x7f, 0x5f, 0x4f, 0x66, 0x0a, 0xef, 0x37, 0x68, 0x8b, 0x3f, 0x0f, 0x8e, 0xa1, 0x6b, 0xc4,
+ 0x13, 0x18, 0xf5, 0xfe, 0x0b, 0xc1, 0xd3, 0x10, 0xac, 0xe1, 0x48, 0x8a, 0x7e, 0x09, 0x0d, 0x8f,
+ 0x1a, 0x54, 0x63, 0xb6, 0x2b, 0x5b, 0xdc, 0xdb, 0x15, 0x73, 0x8a, 0x0c, 0xa8, 0x71, 0x2c, 0x4d,
+ 0x03, 0xf8, 0xf0, 0x0b, 0x47, 0x90, 0xe8, 0x13, 0x68, 0x30, 0x6a, 0x3a, 0x06, 0x61, 0x54, 0x9e,
+ 0x5e, 0x2a, 0xaf, 0x78, 0xef, 0xe0, 0x60, 0x47, 0xf6, 0xf0, 0x44, 0xaa, 0x89, 0xee, 0x19, 0xe5,
+ 0x69, 0xb8, 0x8a, 0x23, 0x18, 0xf4, 0x33, 0x68, 0x78, 0x8c, 0xbf, 0xea, 0xa3, 0x89, 0xa8, 0xb6,
+ 0x8b, 0x9e, 0x95, 0x64, 0x1f, 0x0d, 0x4c, 0x62, 0xe8, 0x70, 0x05, 0x47, 0x70, 0x68, 0x17, 0x96,
+ 0x4d, 0xdd, 0xc2, 0x94, 0x0c, 0x27, 0xc7, 0x54, 0xb3, 0xad, 0xa1, 0x27, 0xca, 0xb4, 0xde, 0x5f,
+ 0x93, 0x46, 0xcb, 0x07, 0x69, 0x31, 0xce, 0xea, 0xa3, 0x7d, 0x58, 0x0d, 0x9f, 0xdd, 0x8f, 0x74,
+ 0x8f, 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x7a, 0x5e, 0xbd, 0xdf, 0x9e, 0x9e, 0x6f, 0xac,
+ 0xe2, 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x7d, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x87, 0x35, 0xe2,
+ 0xbe, 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x9f, 0xa6, 0xd2, 0xb4, 0x71, 0xb5, 0x34, 0x6d, 0x95,
+ 0xa7, 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd,
+ 0xa2, 0xe1, 0xc9, 0x34, 0xc5, 0x8e, 0x5e, 0x9a, 0x9e, 0x6f, 0xac, 0x1d, 0x15, 0xab, 0xe0, 0x32,
+ 0xdb, 0xee, 0x5f, 0xe6, 0xe0, 0x46, 0xf6, 0x8d, 0x43, 0x1f, 0x03, 0xb2, 0x07, 0x1e, 0x75, 0xc7,
+ 0x74, 0xf8, 0x61, 0x30, 0xb8, 0xf1, 0xe9, 0x46, 0x11, 0xd3, 0x4d, 0x54, 0xb7, 0x87, 0x39, 0x0d,
+ 0x5c, 0x60, 0x15, 0xcc, 0x47, 0xb2, 0x00, 0x6a, 0x22, 0xd0, 0xc4, 0x7c, 0x94, 0x2b, 0x82, 0x5d,
+ 0x58, 0x96, 0xb5, 0x1f, 0x0a, 0x45, 0xb2, 0x26, 0xee, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74,
+ 0x17, 0x96, 0x5c, 0x9e, 0x07, 0x11, 0xc0, 0x82, 0x00, 0xf8, 0x8e, 0x04, 0x58, 0xc2, 0x49, 0x21,
+ 0x4e, 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x73, 0x02,
+ 0xe0, 0x45, 0x09, 0x70, 0x73, 0x37, 0xab, 0x80, 0xf3, 0x36, 0xe8, 0x00, 0x56, 0x7c, 0x2b, 0x0f,
+ 0x15, 0x24, 0xf1, 0x4b, 0x12, 0x6a, 0xe5, 0x34, 0xaf, 0x82, 0x8b, 0xec, 0xd0, 0xe7, 0x00, 0x5a,
+ 0xf8, 0xaa, 0x7b, 0xed, 0x79, 0xd1, 0x86, 0xdf, 0xac, 0x50, 0x6c, 0xd1, 0x28, 0x10, 0xb7, 0xc0,
+ 0x68, 0xc9, 0xc3, 0x09, 0x4c, 0x74, 0x07, 0x5a, 0x9a, 0x6d, 0x18, 0x22, 0xf3, 0xf7, 0x6c, 0xdf,
+ 0x62, 0x22, 0x79, 0xeb, 0x7d, 0xc4, 0x1f, 0xfb, 0xbd, 0x94, 0x04, 0x67, 0x34, 0xbb, 0x7f, 0x56,
+ 0x92, 0xcf, 0x4c, 0x58, 0xce, 0xe8, 0x4e, 0x6a, 0xf4, 0x79, 0x25, 0x33, 0xfa, 0xdc, 0xca, 0x5b,
+ 0x24, 0x26, 0x1f, 0x1d, 0x96, 0x78, 0xf2, 0xeb, 0xd6, 0x28, 0xb8, 0x70, 0xd9, 0x12, 0xdf, 0xba,
+ 0xb0, 0x94, 0x22, 0xed, 0xc4, 0xc3, 0x78, 0x53, 0xdc, 0x79, 0x52, 0x88, 0xd3, 0xc8, 0xdd, 0x0f,
+ 0xa0, 0x95, 0xae, 0xc3, 0xd4, 0x4c, 0xaf, 0x5c, 0x3a, 0xd3, 0x7f, 0xab, 0xc0, 0x5a, 0x89, 0x77,
+ 0x64, 0x40, 0xcb, 0x24, 0x8f, 0x13, 0xd7, 0x7c, 0xe9, 0x6c, 0xcc, 0x59, 0x93, 0x1a, 0xb0, 0x26,
+ 0xf5, 0x81, 0xc5, 0x0e, 0xdd, 0x63, 0xe6, 0xea, 0xd6, 0x28, 0xb8, 0x87, 0x83, 0x14, 0x16, 0xce,
+ 0x60, 0xa3, 0xcf, 0xa0, 0x61, 0x92, 0xc7, 0xc7, 0xbe, 0x3b, 0x2a, 0x3a, 0xaf, 0x6a, 0x7e, 0xc4,
+ 0xfb, 0x71, 0x20, 0x51, 0x70, 0x84, 0xd7, 0x3d, 0x84, 0xcd, 0xd4, 0x26, 0x79, 0xab, 0xa0, 0x0f,
+ 0x7d, 0xe3, 0x98, 0xc6, 0x17, 0xfe, 0x06, 0x34, 0x1d, 0xe2, 0x32, 0x3d, 0x6a, 0x17, 0xf5, 0xfe,
+ 0xd2, 0xf4, 0x7c, 0xa3, 0x79, 0x14, 0x2e, 0xe2, 0x58, 0xde, 0xfd, 0xb7, 0x02, 0xf5, 0x63, 0x8d,
+ 0x18, 0xf4, 0x1a, 0xa8, 0xc3, 0xbd, 0x14, 0x75, 0xe8, 0x96, 0x26, 0x91, 0x88, 0xa7, 0x94, 0x35,
+ 0xec, 0x67, 0x58, 0xc3, 0xcb, 0x97, 0xe0, 0x5c, 0x4c, 0x18, 0xde, 0x87, 0x66, 0xe4, 0x2e, 0xd5,
+ 0x25, 0x95, 0xcb, 0xba, 0x64, 0xf7, 0x77, 0x35, 0x58, 0x4c, 0xb8, 0xb8, 0x9a, 0x35, 0x3f, 0xee,
+ 0xc4, 0xa0, 0xc1, 0x3b, 0xc9, 0x4e, 0x95, 0x8d, 0xa8, 0xe1, 0x50, 0x11, 0xcc, 0x6f, 0xf1, 0xeb,
+ 0x9d, 0x9f, 0x35, 0x3e, 0x80, 0x16, 0x23, 0xee, 0x88, 0xb2, 0x50, 0x26, 0x0e, 0xac, 0x19, 0x93,
+ 0x87, 0x93, 0x94, 0x14, 0x67, 0xb4, 0xd7, 0xef, 0xc2, 0x52, 0xca, 0xd9, 0x95, 0x86, 0xb0, 0x2f,
+ 0xf9, 0xe1, 0xc4, 0xc9, 0x79, 0x0d, 0xd9, 0xf5, 0x71, 0x2a, 0xbb, 0xb6, 0xca, 0x0f, 0x33, 0x51,
+ 0x32, 0x65, 0x39, 0x86, 0x33, 0x39, 0xf6, 0x7a, 0x25, 0xb4, 0x8b, 0x33, 0xed, 0x9f, 0x35, 0x58,
+ 0x4d, 0x68, 0xc7, 0xdc, 0xf4, 0x87, 0xa9, 0x06, 0xbd, 0x95, 0x69, 0xd0, 0xed, 0x22, 0x9b, 0xe7,
+ 0x46, 0x4e, 0x8b, 0x09, 0xe3, 0xec, 0xff, 0x22, 0x61, 0xfc, 0x93, 0x02, 0xcb, 0x89, 0xb3, 0xbb,
+ 0x06, 0xc6, 0xf8, 0x20, 0xcd, 0x18, 0x5f, 0xae, 0x92, 0x34, 0x25, 0x94, 0xf1, 0xab, 0xf9, 0x54,
+ 0xf0, 0xff, 0xf7, 0x24, 0xe6, 0xd7, 0xb0, 0x3a, 0xb6, 0x0d, 0xdf, 0xa4, 0x7b, 0x06, 0xd1, 0xcd,
+ 0x50, 0x81, 0x0f, 0x7d, 0xb3, 0xd9, 0x3f, 0x86, 0x22, 0x78, 0xea, 0x7a, 0xba, 0xc7, 0xa8, 0xc5,
+ 0x3e, 0x8d, 0x2d, 0xfb, 0xdf, 0x95, 0x4e, 0x56, 0x3f, 0x2d, 0x80, 0xc3, 0x85, 0x4e, 0xd0, 0x0f,
+ 0x60, 0x91, 0x0f, 0xcc, 0xba, 0x46, 0x39, 0xf7, 0x96, 0x89, 0xb5, 0x22, 0x81, 0x16, 0x8f, 0x63,
+ 0x11, 0x4e, 0xea, 0xa1, 0x47, 0xb0, 0xe2, 0xd8, 0xc3, 0x03, 0x62, 0x91, 0x11, 0xe5, 0x63, 0xc6,
+ 0x91, 0x6d, 0xe8, 0xda, 0x44, 0x30, 0x9b, 0x66, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c,
+ 0xe3, 0x14, 0x21, 0xbf, 0x2c, 0x8a, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0xe5, 0x73, 0x2f, 0x89,
+ 0x5e, 0xf0, 0x17, 0xce, 0x4e, 0x95, 0x0c, 0x3b, 0x4d, 0x59, 0xc6, 0xdd, 0x3f, 0xbd, 0x8e, 0x33,
+ 0x1e, 0x4a, 0x89, 0x5b, 0xe3, 0xbf, 0x22, 0x6e, 0x05, 0x4c, 0xb2, 0x79, 0x35, 0x26, 0xd9, 0xfd,
+ 0x43, 0x1d, 0x6e, 0xe6, 0xba, 0x2d, 0xfa, 0xf1, 0x05, 0x24, 0xe9, 0xd6, 0x73, 0x23, 0x48, 0x39,
+ 0x76, 0x33, 0x7b, 0x05, 0x76, 0xb3, 0x0b, 0xcb, 0x9a, 0xef, 0xba, 0xd4, 0x62, 0x19, 0x6e, 0x13,
+ 0x9d, 0xc5, 0x5e, 0x5a, 0x8c, 0xb3, 0xfa, 0x45, 0x04, 0xad, 0x7e, 0x45, 0x82, 0x96, 0x8c, 0x42,
+ 0x0e, 0xd9, 0x41, 0xe6, 0xe6, 0xa3, 0x90, 0xb3, 0x76, 0x56, 0x9f, 0x0f, 0x18, 0x01, 0x6a, 0x84,
+ 0xb0, 0x90, 0x1e, 0x30, 0x4e, 0x53, 0x52, 0x9c, 0xd1, 0x2e, 0x20, 0x3b, 0xcd, 0xaa, 0x64, 0x07,
+ 0x91, 0x14, 0x15, 0x03, 0xd1, 0x26, 0xb6, 0xab, 0x94, 0x43, 0x75, 0x2e, 0x56, 0xc8, 0x42, 0x17,
+ 0xaf, 0xce, 0x42, 0xbb, 0x7f, 0x55, 0xe0, 0xc5, 0xd2, 0x82, 0x44, 0xbb, 0xa9, 0xe7, 0x7f, 0x3b,
+ 0xf3, 0xfc, 0x7f, 0xaf, 0xd4, 0x30, 0x31, 0x03, 0xb8, 0xc5, 0x34, 0xed, 0xfd, 0x6a, 0x34, 0xad,
+ 0x80, 0x43, 0x5c, 0xce, 0xd7, 0xfa, 0xdb, 0x4f, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9,
+ 0xe6, 0x69, 0x67, 0xe6, 0x37, 0xd3, 0x8e, 0xf2, 0x64, 0xda, 0x51, 0xbe, 0x9e, 0x76, 0x94, 0x6f,
+ 0xa6, 0x1d, 0xe5, 0xef, 0xd3, 0x8e, 0xf2, 0xd5, 0xb7, 0x9d, 0x99, 0xcf, 0x16, 0xa4, 0xc7, 0xff,
+ 0x04, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x02, 0xfa, 0x27, 0xed, 0x1b, 0x00, 0x00,
}
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
@@ -1706,6 +1707,9 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x48
if m.RevisionHistoryLimit != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
i--
@@ -1795,6 +1799,9 @@ func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x58
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -2236,6 +2243,7 @@ func (m *StatefulSetSpec) Size() (n int) {
if m.RevisionHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
}
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
return n
}
@@ -2265,6 +2273,7 @@ func (m *StatefulSetStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
return n
}
@@ -2568,6 +2577,7 @@ func (this *StatefulSetSpec) String() string {
`PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`,
`UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
`RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
`}`,
}, "")
return s
@@ -2591,6 +2601,7 @@ func (this *StatefulSetStatus) String() string {
`UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
`}`,
}, "")
return s
@@ -5694,6 +5705,25 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error {
}
}
m.RevisionHistoryLimit = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -5958,6 +5988,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto
index 888f3e79e..128efa9ca 100644
--- a/vendor/k8s.io/api/apps/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto
@@ -427,6 +427,13 @@ message StatefulSetSpec {
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
optional int32 revisionHistoryLimit = 8;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // +optional
+ optional int32 minReadySeconds = 9;
}
// StatefulSetStatus represents the current state of a StatefulSet.
@@ -469,6 +476,12 @@ message StatefulSetStatus {
// +patchMergeKey=type
// +patchStrategy=merge
repeated StatefulSetCondition conditions = 10;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // Remove omitempty when graduating to beta
+ // +optional
+ optional int32 availableReplicas = 11;
}
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go
index 9f822faee..be638bb0f 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types.go
@@ -218,6 +218,13 @@ type StatefulSetSpec struct {
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
}
// StatefulSetStatus represents the current state of a StatefulSet.
@@ -260,6 +267,12 @@ type StatefulSetStatus struct {
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // Remove omitempty when graduating to beta
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,11,opt,name=availableReplicas"`
}
type StatefulSetConditionType string
diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
index 504b85863..51e08b575 100644
--- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go
@@ -237,6 +237,7 @@ var map_StatefulSetSpec = map[string]string{
"podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.",
}
func (StatefulSetSpec) SwaggerDoc() map[string]string {
@@ -254,6 +255,7 @@ var map_StatefulSetStatus = map[string]string{
"updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
"collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
"conditions": "Represents the latest available observations of a statefulset's current state.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta",
}
func (StatefulSetStatus) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
index b2e5c2e97..e03a95acd 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go
@@ -957,143 +957,144 @@ func init() {
}
var fileDescriptor_42fe616264472f7e = []byte{
- // 2169 bytes of a gzipped FileDescriptorProto
+ // 2182 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7,
- 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63,
- 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81,
- 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3,
- 0xf5, 0xa2, 0x97, 0x5e, 0x0b, 0x14, 0x68, 0x7b, 0xed, 0x3f, 0xd1, 0x5b, 0x51, 0xb4, 0xb7, 0x22,
- 0x28, 0x7c, 0x29, 0x10, 0xf4, 0x92, 0x9c, 0x84, 0x7a, 0x73, 0x2a, 0x8a, 0x5e, 0x0a, 0xf4, 0x12,
- 0xa0, 0x40, 0x41, 0x0e, 0xe7, 0x83, 0xf3, 0xe1, 0x1d, 0x29, 0x8e, 0xd2, 0x04, 0xb9, 0x69, 0xc9,
- 0xe7, 0x7d, 0xf8, 0xbe, 0xe4, 0x4b, 0xbe, 0x0f, 0x39, 0x02, 0xdf, 0x3b, 0x7e, 0xcb, 0x55, 0x35,
- 0xab, 0x75, 0xec, 0x1d, 0x12, 0xc7, 0x24, 0x94, 0xb8, 0xad, 0x3e, 0x31, 0xbb, 0x96, 0xd3, 0x12,
- 0x1d, 0xd8, 0xd6, 0x5a, 0xd8, 0xb6, 0xdd, 0x56, 0xff, 0xe6, 0x21, 0xa1, 0x78, 0xad, 0xd5, 0x23,
- 0x26, 0x71, 0x30, 0x25, 0x5d, 0xd5, 0x76, 0x2c, 0x6a, 0xc1, 0x45, 0x1f, 0xa8, 0x62, 0x5b, 0x53,
- 0x19, 0x50, 0x15, 0xc0, 0xa5, 0xd5, 0x9e, 0x46, 0x8f, 0xbc, 0x43, 0xb5, 0x63, 0x19, 0xad, 0x9e,
- 0xd5, 0xb3, 0x5a, 0x1c, 0x7f, 0xe8, 0x3d, 0xe2, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0xb3, 0xd4,
- 0x8c, 0x0d, 0xd8, 0xb1, 0x1c, 0xd2, 0xea, 0xdf, 0x4c, 0x8e, 0xb5, 0xf4, 0x7a, 0x84, 0x31, 0x70,
- 0xe7, 0x48, 0x33, 0x89, 0x33, 0x68, 0xd9, 0xc7, 0x3d, 0xd6, 0xe0, 0xb6, 0x0c, 0x42, 0x71, 0x96,
- 0x55, 0x2b, 0xcf, 0xca, 0xf1, 0x4c, 0xaa, 0x19, 0x24, 0x65, 0xf0, 0xe6, 0x28, 0x03, 0xb7, 0x73,
- 0x44, 0x0c, 0x9c, 0xb2, 0x7b, 0x2d, 0xcf, 0xce, 0xa3, 0x9a, 0xde, 0xd2, 0x4c, 0xea, 0x52, 0x27,
- 0x69, 0xd4, 0xfc, 0xb7, 0x02, 0xe0, 0x86, 0x65, 0x52, 0xc7, 0xd2, 0x75, 0xe2, 0x20, 0xd2, 0xd7,
- 0x5c, 0xcd, 0x32, 0xe1, 0x43, 0x50, 0x63, 0xf1, 0x74, 0x31, 0xc5, 0x75, 0xe5, 0x8a, 0xb2, 0x32,
- 0xb5, 0x76, 0x43, 0x8d, 0x66, 0x3a, 0xa4, 0x57, 0xed, 0xe3, 0x1e, 0x6b, 0x70, 0x55, 0x86, 0x56,
- 0xfb, 0x37, 0xd5, 0xdd, 0xc3, 0x0f, 0x48, 0x87, 0x6e, 0x13, 0x8a, 0xdb, 0xf0, 0xc9, 0xc9, 0xf2,
- 0xd8, 0xf0, 0x64, 0x19, 0x44, 0x6d, 0x28, 0x64, 0x85, 0xbb, 0xa0, 0xc2, 0xd9, 0x4b, 0x9c, 0x7d,
- 0x35, 0x97, 0x5d, 0x04, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1e, 0x53, 0x62, 0x32, 0xf7, 0xda, 0x17,
- 0x04, 0x75, 0x65, 0x13, 0x53, 0x8c, 0x38, 0x11, 0xbc, 0x0e, 0x6a, 0x8e, 0x70, 0xbf, 0x5e, 0xbe,
- 0xa2, 0xac, 0x94, 0xdb, 0x17, 0x05, 0xaa, 0x16, 0x84, 0x85, 0x42, 0x44, 0xf3, 0x89, 0x02, 0x16,
- 0xd2, 0x71, 0x6f, 0x69, 0x2e, 0x85, 0x3f, 0x4c, 0xc5, 0xae, 0x16, 0x8b, 0x9d, 0x59, 0xf3, 0xc8,
- 0xc3, 0x81, 0x83, 0x96, 0x58, 0xdc, 0x7b, 0xa0, 0xaa, 0x51, 0x62, 0xb8, 0xf5, 0xd2, 0x95, 0xf2,
- 0xca, 0xd4, 0xda, 0x35, 0x35, 0x27, 0x81, 0xd5, 0xb4, 0x77, 0xed, 0x69, 0xc1, 0x5b, 0xbd, 0xc7,
- 0x18, 0x90, 0x4f, 0xd4, 0xfc, 0x79, 0x09, 0x4c, 0x6e, 0x62, 0x62, 0x58, 0xe6, 0x3e, 0xa1, 0xe7,
- 0xb0, 0x72, 0x77, 0x41, 0xc5, 0xb5, 0x49, 0x47, 0xac, 0xdc, 0xd5, 0xdc, 0x00, 0x42, 0x9f, 0xf6,
- 0x6d, 0xd2, 0x89, 0x96, 0x8c, 0xfd, 0x42, 0x9c, 0x01, 0xee, 0x81, 0x71, 0x97, 0x62, 0xea, 0xb9,
- 0x7c, 0xc1, 0xa6, 0xd6, 0x56, 0x0a, 0x70, 0x71, 0x7c, 0x7b, 0x46, 0xb0, 0x8d, 0xfb, 0xbf, 0x91,
- 0xe0, 0x69, 0xfe, 0xbd, 0x04, 0x60, 0x88, 0xdd, 0xb0, 0xcc, 0xae, 0x46, 0x59, 0x3a, 0xdf, 0x02,
- 0x15, 0x3a, 0xb0, 0x09, 0x9f, 0x90, 0xc9, 0xf6, 0xd5, 0xc0, 0x95, 0xfb, 0x03, 0x9b, 0x7c, 0x76,
- 0xb2, 0xbc, 0x90, 0xb6, 0x60, 0x3d, 0x88, 0xdb, 0xc0, 0xad, 0xd0, 0xc9, 0x12, 0xb7, 0x7e, 0x5d,
- 0x1e, 0xfa, 0xb3, 0x93, 0xe5, 0x8c, 0xb3, 0x43, 0x0d, 0x99, 0x64, 0x07, 0x61, 0x1f, 0x40, 0x1d,
- 0xbb, 0xf4, 0xbe, 0x83, 0x4d, 0xd7, 0x1f, 0x49, 0x33, 0x88, 0x08, 0xff, 0xd5, 0x62, 0x0b, 0xc5,
- 0x2c, 0xda, 0x4b, 0xc2, 0x0b, 0xb8, 0x95, 0x62, 0x43, 0x19, 0x23, 0xc0, 0xab, 0x60, 0xdc, 0x21,
- 0xd8, 0xb5, 0xcc, 0x7a, 0x85, 0x47, 0x11, 0x4e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, 0xc0,
- 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, 0xa3,
- 0xa0, 0xbf, 0xf9, 0x3b, 0x05, 0x4c, 0x87, 0x33, 0x77, 0x0e, 0x3b, 0xe7, 0x8e, 0xbc, 0x73, 0x9a,
- 0xa3, 0x93, 0x25, 0x67, 0xc3, 0x7c, 0x58, 0x8e, 0x39, 0xce, 0xd2, 0x11, 0xfe, 0x08, 0xd4, 0x5c,
- 0xa2, 0x93, 0x0e, 0xb5, 0x1c, 0xe1, 0xf8, 0x6b, 0x05, 0x1d, 0xc7, 0x87, 0x44, 0xdf, 0x17, 0xa6,
- 0xed, 0x0b, 0xcc, 0xf3, 0xe0, 0x17, 0x0a, 0x29, 0xe1, 0x7b, 0xa0, 0x46, 0x89, 0x61, 0xeb, 0x98,
- 0x12, 0xb1, 0x6b, 0x5e, 0x8c, 0x3b, 0xcf, 0x72, 0x86, 0x91, 0xed, 0x59, 0xdd, 0xfb, 0x02, 0xc6,
- 0xb7, 0x4c, 0x38, 0x19, 0x41, 0x2b, 0x0a, 0x69, 0xa0, 0x0d, 0x66, 0x3c, 0xbb, 0xcb, 0x90, 0x94,
- 0x1d, 0xe7, 0xbd, 0x81, 0xc8, 0xa1, 0x1b, 0xa3, 0x67, 0xe5, 0x40, 0xb2, 0x6b, 0x2f, 0x88, 0x51,
- 0x66, 0xe4, 0x76, 0x94, 0xe0, 0x87, 0xeb, 0x60, 0xd6, 0xd0, 0x4c, 0x44, 0x70, 0x77, 0xb0, 0x4f,
- 0x3a, 0x96, 0xd9, 0x75, 0x79, 0x2a, 0x55, 0xdb, 0x8b, 0x82, 0x60, 0x76, 0x5b, 0xee, 0x46, 0x49,
- 0x3c, 0xdc, 0x02, 0xf3, 0xc1, 0x01, 0x7c, 0x57, 0x73, 0xa9, 0xe5, 0x0c, 0xb6, 0x34, 0x43, 0xa3,
- 0xf5, 0x71, 0xce, 0x53, 0x1f, 0x9e, 0x2c, 0xcf, 0xa3, 0x8c, 0x7e, 0x94, 0x69, 0xd5, 0xfc, 0xf5,
- 0x38, 0x98, 0x4d, 0x9c, 0x0b, 0xf0, 0x01, 0x58, 0xe8, 0x78, 0x8e, 0x43, 0x4c, 0xba, 0xe3, 0x19,
- 0x87, 0xc4, 0xd9, 0xef, 0x1c, 0x91, 0xae, 0xa7, 0x93, 0x2e, 0x5f, 0xd6, 0x6a, 0xbb, 0x21, 0x7c,
- 0x5d, 0xd8, 0xc8, 0x44, 0xa1, 0x1c, 0x6b, 0xf8, 0x2e, 0x80, 0x26, 0x6f, 0xda, 0xd6, 0x5c, 0x37,
- 0xe4, 0x2c, 0x71, 0xce, 0x70, 0x2b, 0xee, 0xa4, 0x10, 0x28, 0xc3, 0x8a, 0xf9, 0xd8, 0x25, 0xae,
- 0xe6, 0x90, 0x6e, 0xd2, 0xc7, 0xb2, 0xec, 0xe3, 0x66, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0x37, 0xc0,
- 0x94, 0x3f, 0x1a, 0x9f, 0x73, 0xb1, 0x38, 0x73, 0x82, 0x6c, 0x6a, 0x27, 0xea, 0x42, 0x71, 0x1c,
- 0x0b, 0xcd, 0x3a, 0x74, 0x89, 0xd3, 0x27, 0xdd, 0x3b, 0xbe, 0x38, 0x60, 0x15, 0xb4, 0xca, 0x2b,
- 0x68, 0x18, 0xda, 0x6e, 0x0a, 0x81, 0x32, 0xac, 0x58, 0x68, 0x7e, 0xd6, 0xa4, 0x42, 0x1b, 0x97,
- 0x43, 0x3b, 0xc8, 0x44, 0xa1, 0x1c, 0x6b, 0x96, 0x7b, 0xbe, 0xcb, 0xeb, 0x7d, 0xac, 0xe9, 0xf8,
- 0x50, 0x27, 0xf5, 0x09, 0x39, 0xf7, 0x76, 0xe4, 0x6e, 0x94, 0xc4, 0xc3, 0x3b, 0xe0, 0x92, 0xdf,
- 0x74, 0x60, 0xe2, 0x90, 0xa4, 0xc6, 0x49, 0x5e, 0x10, 0x24, 0x97, 0x76, 0x92, 0x00, 0x94, 0xb6,
- 0x81, 0xb7, 0xc0, 0x4c, 0xc7, 0xd2, 0x75, 0x9e, 0x8f, 0x1b, 0x96, 0x67, 0xd2, 0xfa, 0x24, 0x67,
- 0x81, 0x6c, 0x0f, 0x6d, 0x48, 0x3d, 0x28, 0x81, 0x84, 0x3f, 0x06, 0xa0, 0x13, 0x14, 0x06, 0xb7,
- 0x0e, 0x46, 0x28, 0x80, 0x74, 0x59, 0x8a, 0x2a, 0x73, 0xd8, 0xe4, 0xa2, 0x18, 0x65, 0xf3, 0x43,
- 0x05, 0x2c, 0xe6, 0x6c, 0x74, 0xf8, 0x5d, 0xa9, 0x08, 0x5e, 0x4b, 0x14, 0xc1, 0xcb, 0x39, 0x66,
- 0xb1, 0x4a, 0x78, 0x04, 0xa6, 0x99, 0x20, 0xd1, 0xcc, 0x9e, 0x0f, 0x11, 0x67, 0x59, 0x2b, 0x37,
- 0x00, 0x14, 0x47, 0x47, 0xa7, 0xf2, 0xa5, 0xe1, 0xc9, 0xf2, 0xb4, 0xd4, 0x87, 0x64, 0xe2, 0xe6,
- 0x2f, 0x4a, 0x00, 0x6c, 0x12, 0x5b, 0xb7, 0x06, 0x06, 0x31, 0xcf, 0x43, 0xd3, 0xdc, 0x93, 0x34,
- 0xcd, 0xcb, 0xf9, 0x4b, 0x12, 0x3a, 0x95, 0x2b, 0x6a, 0xde, 0x4b, 0x88, 0x9a, 0x57, 0x8a, 0x90,
- 0x3d, 0x5b, 0xd5, 0x7c, 0x5c, 0x06, 0x73, 0x11, 0x38, 0x92, 0x35, 0xb7, 0xa5, 0x15, 0x7d, 0x39,
- 0xb1, 0xa2, 0x8b, 0x19, 0x26, 0x5f, 0x98, 0xae, 0xf9, 0x00, 0xcc, 0x30, 0xd5, 0xe1, 0xaf, 0x1f,
- 0xd7, 0x34, 0xe3, 0xa7, 0xd6, 0x34, 0x61, 0x25, 0xda, 0x92, 0x98, 0x50, 0x82, 0x39, 0x47, 0x43,
- 0x4d, 0x7c, 0x15, 0x35, 0xd4, 0xef, 0x15, 0x30, 0x13, 0x2d, 0xd3, 0x39, 0x88, 0xa8, 0xbb, 0xb2,
- 0x88, 0x7a, 0xb1, 0x40, 0x72, 0xe6, 0xa8, 0xa8, 0x8f, 0x2b, 0x71, 0xd7, 0xb9, 0x8c, 0x5a, 0x61,
- 0x57, 0x30, 0x5b, 0xd7, 0x3a, 0xd8, 0x15, 0xf5, 0xf6, 0x82, 0x7f, 0xfd, 0xf2, 0xdb, 0x50, 0xd8,
- 0x2b, 0x09, 0xae, 0xd2, 0x17, 0x2b, 0xb8, 0xca, 0xcf, 0x47, 0x70, 0xfd, 0x00, 0xd4, 0xdc, 0x40,
- 0x6a, 0x55, 0x38, 0xe5, 0xb5, 0x42, 0x1b, 0x5b, 0xa8, 0xac, 0x90, 0x3a, 0xd4, 0x57, 0x21, 0x5d,
- 0x96, 0xb2, 0xaa, 0x7e, 0x99, 0xca, 0x8a, 0x25, 0xba, 0x8d, 0x3d, 0x97, 0x74, 0xf9, 0xa6, 0xaa,
- 0x45, 0x89, 0xbe, 0xc7, 0x5b, 0x91, 0xe8, 0x85, 0x07, 0x60, 0xd1, 0x76, 0xac, 0x9e, 0x43, 0x5c,
- 0x77, 0x93, 0xe0, 0xae, 0xae, 0x99, 0x24, 0x08, 0xc0, 0xaf, 0x89, 0x97, 0x87, 0x27, 0xcb, 0x8b,
- 0x7b, 0xd9, 0x10, 0x94, 0x67, 0xdb, 0xfc, 0x53, 0x05, 0x5c, 0x4c, 0x9e, 0x8d, 0x39, 0x32, 0x45,
- 0x39, 0x93, 0x4c, 0xb9, 0x1e, 0xcb, 0x53, 0x5f, 0xc3, 0xc5, 0x9e, 0x0a, 0x52, 0xb9, 0xba, 0x0e,
- 0x66, 0x85, 0x2c, 0x09, 0x3a, 0x85, 0x50, 0x0b, 0x97, 0xe7, 0x40, 0xee, 0x46, 0x49, 0x3c, 0xbc,
- 0x0d, 0xa6, 0x1d, 0xae, 0xbc, 0x02, 0x02, 0x5f, 0xbd, 0xfc, 0x9f, 0x20, 0x98, 0x46, 0xf1, 0x4e,
- 0x24, 0x63, 0x99, 0x72, 0x89, 0x04, 0x49, 0x40, 0x50, 0x91, 0x95, 0xcb, 0x7a, 0x12, 0x80, 0xd2,
- 0x36, 0x70, 0x1b, 0xcc, 0x79, 0x66, 0x9a, 0xca, 0xcf, 0xb5, 0xcb, 0x82, 0x6a, 0xee, 0x20, 0x0d,
- 0x41, 0x59, 0x76, 0xf0, 0xa1, 0x24, 0x66, 0xc6, 0xf9, 0x79, 0x72, 0xbd, 0xc0, 0x9e, 0x28, 0xac,
- 0x66, 0x32, 0xa4, 0x56, 0xad, 0xa8, 0xd4, 0x6a, 0xfe, 0x51, 0x01, 0x30, 0xbd, 0x0f, 0x47, 0xbe,
- 0x04, 0xa4, 0x2c, 0x62, 0x15, 0x53, 0xcb, 0xd6, 0x3f, 0x37, 0x0a, 0xea, 0x9f, 0xe8, 0x40, 0x2d,
- 0x26, 0x80, 0xc4, 0x44, 0x9f, 0xcf, 0xa3, 0x4e, 0x51, 0x01, 0x14, 0x39, 0xf5, 0x1c, 0x04, 0x50,
- 0x8c, 0xec, 0xd9, 0x02, 0xe8, 0x1f, 0x25, 0x30, 0x17, 0x81, 0x0b, 0x0b, 0xa0, 0x0c, 0x93, 0x6f,
- 0x1e, 0x76, 0x8a, 0x89, 0x92, 0x68, 0xea, 0xfe, 0x97, 0x44, 0x49, 0xe4, 0x55, 0x8e, 0x28, 0xf9,
- 0x6d, 0x29, 0xee, 0xfa, 0x29, 0x45, 0xc9, 0x73, 0x78, 0xe1, 0xf8, 0xca, 0xe9, 0x9a, 0xe6, 0x9f,
- 0xcb, 0xe0, 0x62, 0x72, 0x1f, 0x4a, 0x05, 0x52, 0x19, 0x59, 0x20, 0xf7, 0xc0, 0xfc, 0x23, 0x4f,
- 0xd7, 0x07, 0x3c, 0x86, 0x58, 0x95, 0xf4, 0x4b, 0xeb, 0xff, 0x0b, 0xcb, 0xf9, 0xef, 0x67, 0x60,
- 0x50, 0xa6, 0x65, 0xba, 0x5e, 0x56, 0x3e, 0x6f, 0xbd, 0xac, 0x9e, 0xa1, 0x5e, 0x66, 0x4b, 0x8e,
- 0xf2, 0x99, 0x24, 0xc7, 0xe9, 0x8a, 0x65, 0xc6, 0xc1, 0x35, 0xf2, 0xea, 0x3f, 0x54, 0xc0, 0x42,
- 0xf6, 0x85, 0x1b, 0xea, 0x60, 0xc6, 0xc0, 0x8f, 0xe3, 0x0f, 0x1f, 0xa3, 0x8a, 0x88, 0x47, 0x35,
- 0x5d, 0xf5, 0x3f, 0x19, 0xa9, 0xf7, 0x4c, 0xba, 0xeb, 0xec, 0x53, 0x47, 0x33, 0x7b, 0x7e, 0xe5,
- 0xdd, 0x96, 0xb8, 0x50, 0x82, 0x1b, 0xbe, 0x0f, 0x6a, 0x06, 0x7e, 0xbc, 0xef, 0x39, 0xbd, 0xac,
- 0x0a, 0x59, 0x6c, 0x1c, 0xbe, 0x01, 0xb6, 0x05, 0x0b, 0x0a, 0xf9, 0x9a, 0x9f, 0x2a, 0x60, 0x31,
- 0xa7, 0xaa, 0x7e, 0x8d, 0xa2, 0xdc, 0x05, 0x57, 0xa4, 0x20, 0xd9, 0xae, 0x24, 0x8f, 0x3c, 0x9d,
- 0x6f, 0x50, 0x21, 0x64, 0xae, 0x81, 0x49, 0x1b, 0x3b, 0x54, 0x0b, 0x65, 0x70, 0xb5, 0x3d, 0x3d,
- 0x3c, 0x59, 0x9e, 0xdc, 0x0b, 0x1a, 0x51, 0xd4, 0xdf, 0xfc, 0x8f, 0x02, 0xaa, 0xfb, 0x1d, 0xac,
- 0x93, 0x73, 0x50, 0x12, 0x9b, 0x92, 0x92, 0xc8, 0x7f, 0xa5, 0xe7, 0xfe, 0xe4, 0x8a, 0x88, 0xad,
- 0x84, 0x88, 0x78, 0x69, 0x04, 0xcf, 0xb3, 0xf5, 0xc3, 0xdb, 0x60, 0x32, 0x1c, 0xee, 0x74, 0x87,
- 0x5b, 0xf3, 0x37, 0x25, 0x30, 0x15, 0x1b, 0xe2, 0x94, 0x47, 0xe3, 0x43, 0xa9, 0x1e, 0xb0, 0x4d,
- 0xbf, 0x56, 0x24, 0x10, 0x35, 0x38, 0xfb, 0xdf, 0x31, 0xa9, 0x13, 0xbf, 0x3c, 0xa6, 0x4b, 0xc2,
- 0x77, 0xc0, 0x0c, 0xc5, 0x4e, 0x8f, 0xd0, 0xa0, 0x8f, 0x4f, 0xd8, 0x64, 0xf4, 0x98, 0x72, 0x5f,
- 0xea, 0x45, 0x09, 0xf4, 0xd2, 0x6d, 0x30, 0x2d, 0x0d, 0x06, 0x2f, 0x82, 0xf2, 0x31, 0x19, 0xf8,
- 0x92, 0x0a, 0xb1, 0x3f, 0xe1, 0x3c, 0xa8, 0xf6, 0xb1, 0xee, 0xf9, 0x79, 0x3e, 0x89, 0xfc, 0x1f,
- 0xb7, 0x4a, 0x6f, 0x29, 0xcd, 0x5f, 0xb2, 0xc9, 0x89, 0x92, 0xf3, 0x1c, 0xb2, 0xeb, 0x5d, 0x29,
- 0xbb, 0xf2, 0x3f, 0x18, 0xc6, 0xb7, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0xbd, 0x5a, 0x88, 0xed,
- 0xd9, 0x99, 0xf6, 0xcf, 0x12, 0x98, 0x8f, 0xa1, 0x23, 0xa9, 0xfa, 0x6d, 0x49, 0xaa, 0xae, 0x24,
- 0xa4, 0x6a, 0x3d, 0xcb, 0xe6, 0x1b, 0xad, 0x3a, 0x5a, 0xab, 0xfe, 0x41, 0x01, 0xb3, 0xb1, 0xb9,
- 0x3b, 0x07, 0xb1, 0x7a, 0x4f, 0x16, 0xab, 0x2f, 0x15, 0x49, 0x9a, 0x1c, 0xb5, 0xfa, 0x97, 0xaa,
- 0xe4, 0xfc, 0xd7, 0xfe, 0x0d, 0xed, 0xa7, 0x60, 0xbe, 0x6f, 0xe9, 0x9e, 0x41, 0x36, 0x74, 0xac,
- 0x19, 0x01, 0x80, 0xa9, 0xbb, 0x72, 0xf2, 0x9e, 0x18, 0xd2, 0x13, 0xc7, 0xd5, 0x5c, 0x4a, 0x4c,
- 0xfa, 0x20, 0xb2, 0x8c, 0x34, 0xe5, 0x83, 0x0c, 0x3a, 0x94, 0x39, 0x08, 0x7c, 0x03, 0x4c, 0x31,
- 0x55, 0xa6, 0x75, 0xc8, 0x0e, 0x36, 0x82, 0xc4, 0x0a, 0x3f, 0x8f, 0xed, 0x47, 0x5d, 0x28, 0x8e,
- 0x83, 0x47, 0x60, 0xce, 0xb6, 0xba, 0xdb, 0xd8, 0xc4, 0x3d, 0xc2, 0x64, 0xc6, 0x9e, 0xa5, 0x6b,
- 0x9d, 0x01, 0x7f, 0x58, 0x9b, 0x6c, 0xbf, 0x19, 0x3c, 0x9a, 0xec, 0xa5, 0x21, 0xec, 0x02, 0x9a,
- 0xd1, 0xcc, 0x37, 0x75, 0x16, 0x25, 0x74, 0x52, 0x9f, 0x74, 0xfd, 0x27, 0xed, 0xb5, 0x22, 0x19,
- 0x76, 0xc6, 0x8f, 0xba, 0x79, 0xef, 0x86, 0xb5, 0x33, 0x7d, 0x91, 0xfd, 0x57, 0x05, 0x5c, 0x4a,
- 0x1d, 0x95, 0x5f, 0xe2, 0xcb, 0x5d, 0xea, 0x1a, 0x51, 0x3e, 0xc5, 0x35, 0x62, 0x1d, 0xcc, 0x8a,
- 0x8f, 0xc1, 0x89, 0x5b, 0x48, 0x78, 0x1b, 0xdc, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, 0x72, 0x58,
- 0x3d, 0xe5, 0xcb, 0x61, 0xdc, 0x0b, 0xf1, 0xcf, 0x4d, 0x7e, 0xea, 0xa5, 0xbd, 0x10, 0xff, 0xe3,
- 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02,
- 0xfd, 0xb9, 0x3e, 0x78, 0xe2, 0x8c, 0x0f, 0x9e, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xbd, 0xe7, 0xaf,
- 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d,
- 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0xf6, 0x7b, 0xbb, 0xd8, 0xb3, 0x5f, 0x86, 0x76, 0x1f, 0xfd, 0xfe,
- 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, 0x63, 0x3f,
- 0x1b, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, 0x7f, 0x1b,
- 0x36, 0x94, 0x5f, 0x7d, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0xdf, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0xd0, 0xf7, 0x24, 0x13, 0x48, 0x29, 0x00, 0x00,
+ 0xf9, 0xf6, 0xec, 0x87, 0xb4, 0x4b, 0x59, 0x92, 0x4d, 0xe9, 0x27, 0x6d, 0xec, 0x5f, 0x57, 0xc6,
+ 0x26, 0x70, 0x94, 0xd8, 0x9a, 0xb5, 0x95, 0x0f, 0x24, 0x76, 0xd1, 0x56, 0x2b, 0xa5, 0xb6, 0x03,
+ 0x7d, 0x85, 0xb2, 0x0c, 0x34, 0x68, 0x51, 0x53, 0xbb, 0xf4, 0x6a, 0xa2, 0xf9, 0xc2, 0x0c, 0x67,
+ 0xeb, 0x45, 0x2f, 0xbd, 0x16, 0x28, 0xd0, 0xf4, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6e, 0x45,
+ 0x50, 0xf8, 0x18, 0xf4, 0x92, 0x9c, 0x84, 0x7a, 0x73, 0x2a, 0x8a, 0x1e, 0x7b, 0x09, 0x50, 0xa0,
+ 0x20, 0x87, 0xf3, 0xc1, 0xf9, 0xf0, 0x8e, 0x14, 0x47, 0x69, 0x82, 0xdc, 0xb4, 0xe4, 0xf3, 0x3e,
+ 0x7c, 0x5f, 0xf2, 0x25, 0xdf, 0x87, 0x1c, 0x81, 0x1f, 0x1d, 0xbd, 0xe5, 0xaa, 0x9a, 0xd5, 0x3e,
+ 0xf2, 0x0e, 0x88, 0x63, 0x12, 0x4a, 0xdc, 0xf6, 0x80, 0x98, 0x3d, 0xcb, 0x69, 0x8b, 0x0e, 0x6c,
+ 0x6b, 0x6d, 0x6c, 0xdb, 0x6e, 0x7b, 0x70, 0xf3, 0x80, 0x50, 0xbc, 0xda, 0xee, 0x13, 0x93, 0x38,
+ 0x98, 0x92, 0x9e, 0x6a, 0x3b, 0x16, 0xb5, 0xe0, 0xa2, 0x0f, 0x54, 0xb1, 0xad, 0xa9, 0x0c, 0xa8,
+ 0x0a, 0xe0, 0xa5, 0x95, 0xbe, 0x46, 0x0f, 0xbd, 0x03, 0xb5, 0x6b, 0x19, 0xed, 0xbe, 0xd5, 0xb7,
+ 0xda, 0x1c, 0x7f, 0xe0, 0x3d, 0xe2, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0x73, 0xa9, 0x15, 0x1b,
+ 0xb0, 0x6b, 0x39, 0xa4, 0x3d, 0xb8, 0x99, 0x1c, 0xeb, 0xd2, 0xeb, 0x11, 0xc6, 0xc0, 0xdd, 0x43,
+ 0xcd, 0x24, 0xce, 0xb0, 0x6d, 0x1f, 0xf5, 0x59, 0x83, 0xdb, 0x36, 0x08, 0xc5, 0x59, 0x56, 0xed,
+ 0x3c, 0x2b, 0xc7, 0x33, 0xa9, 0x66, 0x90, 0x94, 0xc1, 0x9b, 0xe3, 0x0c, 0xdc, 0xee, 0x21, 0x31,
+ 0x70, 0xca, 0xee, 0xb5, 0x3c, 0x3b, 0x8f, 0x6a, 0x7a, 0x5b, 0x33, 0xa9, 0x4b, 0x9d, 0xa4, 0x51,
+ 0xeb, 0xdf, 0x0a, 0x80, 0xeb, 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0xc8, 0x40, 0x73, 0x35,
+ 0xcb, 0x84, 0x0f, 0x41, 0x8d, 0xc5, 0xd3, 0xc3, 0x14, 0x37, 0x94, 0x2b, 0xca, 0xf2, 0xd4, 0xea,
+ 0x0d, 0x35, 0x9a, 0xe9, 0x90, 0x5e, 0xb5, 0x8f, 0xfa, 0xac, 0xc1, 0x55, 0x19, 0x5a, 0x1d, 0xdc,
+ 0x54, 0x77, 0x0e, 0x3e, 0x20, 0x5d, 0xba, 0x45, 0x28, 0xee, 0xc0, 0x27, 0xc7, 0x4b, 0xe7, 0x46,
+ 0xc7, 0x4b, 0x20, 0x6a, 0x43, 0x21, 0x2b, 0xdc, 0x01, 0x15, 0xce, 0x5e, 0xe2, 0xec, 0x2b, 0xb9,
+ 0xec, 0x22, 0x68, 0x15, 0xe1, 0x5f, 0xbc, 0xf3, 0x98, 0x12, 0x93, 0xb9, 0xd7, 0x39, 0x2f, 0xa8,
+ 0x2b, 0x1b, 0x98, 0x62, 0xc4, 0x89, 0xe0, 0x75, 0x50, 0x73, 0x84, 0xfb, 0x8d, 0xf2, 0x15, 0x65,
+ 0xb9, 0xdc, 0xb9, 0x20, 0x50, 0xb5, 0x20, 0x2c, 0x14, 0x22, 0x5a, 0x4f, 0x14, 0xb0, 0x90, 0x8e,
+ 0x7b, 0x53, 0x73, 0x29, 0xfc, 0x69, 0x2a, 0x76, 0xb5, 0x58, 0xec, 0xcc, 0x9a, 0x47, 0x1e, 0x0e,
+ 0x1c, 0xb4, 0xc4, 0xe2, 0xde, 0x05, 0x55, 0x8d, 0x12, 0xc3, 0x6d, 0x94, 0xae, 0x94, 0x97, 0xa7,
+ 0x56, 0xaf, 0xa9, 0x39, 0x09, 0xac, 0xa6, 0xbd, 0xeb, 0x4c, 0x0b, 0xde, 0xea, 0x3d, 0xc6, 0x80,
+ 0x7c, 0xa2, 0xd6, 0xaf, 0x4b, 0xa0, 0xbe, 0x81, 0x89, 0x61, 0x99, 0x7b, 0x84, 0x9e, 0xc1, 0xca,
+ 0xdd, 0x05, 0x15, 0xd7, 0x26, 0x5d, 0xb1, 0x72, 0x57, 0x73, 0x03, 0x08, 0x7d, 0xda, 0xb3, 0x49,
+ 0x37, 0x5a, 0x32, 0xf6, 0x0b, 0x71, 0x06, 0xb8, 0x0b, 0x26, 0x5c, 0x8a, 0xa9, 0xe7, 0xf2, 0x05,
+ 0x9b, 0x5a, 0x5d, 0x2e, 0xc0, 0xc5, 0xf1, 0x9d, 0x19, 0xc1, 0x36, 0xe1, 0xff, 0x46, 0x82, 0xa7,
+ 0xf5, 0x8f, 0x12, 0x80, 0x21, 0x76, 0xdd, 0x32, 0x7b, 0x1a, 0x65, 0xe9, 0x7c, 0x0b, 0x54, 0xe8,
+ 0xd0, 0x26, 0x7c, 0x42, 0xea, 0x9d, 0xab, 0x81, 0x2b, 0xf7, 0x87, 0x36, 0xf9, 0xe2, 0x78, 0x69,
+ 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x9b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, 0xba, 0x3c, 0xf4,
+ 0x17, 0xc7, 0x4b, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x01, 0x80, 0x3a, 0x76, 0xe9,
+ 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, 0x8a, 0x59, 0x74,
+ 0x2e, 0x09, 0x2f, 0xe0, 0x66, 0x8a, 0x0d, 0x65, 0x8c, 0x00, 0xaf, 0x82, 0x09, 0x87, 0x60, 0xd7,
+ 0x32, 0x1b, 0x15, 0x1e, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x5f, 0x01, 0x93, 0x06,
+ 0x71, 0x5d, 0xdc, 0x27, 0x8d, 0x2a, 0x07, 0xce, 0x0a, 0xe0, 0xe4, 0x96, 0xdf, 0x8c, 0x82, 0xfe,
+ 0xd6, 0x1f, 0x15, 0x30, 0x1d, 0xce, 0xdc, 0x19, 0xec, 0x9c, 0x3b, 0xf2, 0xce, 0x69, 0x8d, 0x4f,
+ 0x96, 0x9c, 0x0d, 0xf3, 0x71, 0x39, 0xe6, 0x38, 0x4b, 0x47, 0xf8, 0x33, 0x50, 0x73, 0x89, 0x4e,
+ 0xba, 0xd4, 0x72, 0x84, 0xe3, 0xaf, 0x15, 0x74, 0x1c, 0x1f, 0x10, 0x7d, 0x4f, 0x98, 0x76, 0xce,
+ 0x33, 0xcf, 0x83, 0x5f, 0x28, 0xa4, 0x84, 0xef, 0x81, 0x1a, 0x25, 0x86, 0xad, 0x63, 0x4a, 0xc4,
+ 0xae, 0x79, 0x31, 0xee, 0x3c, 0xcb, 0x19, 0x46, 0xb6, 0x6b, 0xf5, 0xee, 0x0b, 0x18, 0xdf, 0x32,
+ 0xe1, 0x64, 0x04, 0xad, 0x28, 0xa4, 0x81, 0x36, 0x98, 0xf1, 0xec, 0x1e, 0x43, 0x52, 0x76, 0x9c,
+ 0xf7, 0x87, 0x22, 0x87, 0x6e, 0x8c, 0x9f, 0x95, 0x7d, 0xc9, 0xae, 0xb3, 0x20, 0x46, 0x99, 0x91,
+ 0xdb, 0x51, 0x82, 0x1f, 0xae, 0x81, 0x59, 0x43, 0x33, 0x11, 0xc1, 0xbd, 0xe1, 0x1e, 0xe9, 0x5a,
+ 0x66, 0xcf, 0xe5, 0xa9, 0x54, 0xed, 0x2c, 0x0a, 0x82, 0xd9, 0x2d, 0xb9, 0x1b, 0x25, 0xf1, 0x70,
+ 0x13, 0xcc, 0x07, 0x07, 0xf0, 0x5d, 0xcd, 0xa5, 0x96, 0x33, 0xdc, 0xd4, 0x0c, 0x8d, 0x36, 0x26,
+ 0x38, 0x4f, 0x63, 0x74, 0xbc, 0x34, 0x8f, 0x32, 0xfa, 0x51, 0xa6, 0x55, 0xeb, 0x77, 0x13, 0x60,
+ 0x36, 0x71, 0x2e, 0xc0, 0x07, 0x60, 0xa1, 0xeb, 0x39, 0x0e, 0x31, 0xe9, 0xb6, 0x67, 0x1c, 0x10,
+ 0x67, 0xaf, 0x7b, 0x48, 0x7a, 0x9e, 0x4e, 0x7a, 0x7c, 0x59, 0xab, 0x9d, 0xa6, 0xf0, 0x75, 0x61,
+ 0x3d, 0x13, 0x85, 0x72, 0xac, 0xe1, 0xbb, 0x00, 0x9a, 0xbc, 0x69, 0x4b, 0x73, 0xdd, 0x90, 0xb3,
+ 0xc4, 0x39, 0xc3, 0xad, 0xb8, 0x9d, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x8f, 0xb8, 0x9a, 0x43,
+ 0x7a, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x1b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x00, 0x53, 0xfe,
+ 0x68, 0x7c, 0xce, 0xc5, 0xe2, 0xcc, 0x09, 0xb2, 0xa9, 0xed, 0xa8, 0x0b, 0xc5, 0x71, 0x2c, 0x34,
+ 0xeb, 0xc0, 0x25, 0xce, 0x80, 0xf4, 0xee, 0xf8, 0xe2, 0x80, 0x55, 0xd0, 0x2a, 0xaf, 0xa0, 0x61,
+ 0x68, 0x3b, 0x29, 0x04, 0xca, 0xb0, 0x62, 0xa1, 0xf9, 0x59, 0x93, 0x0a, 0x6d, 0x42, 0x0e, 0x6d,
+ 0x3f, 0x13, 0x85, 0x72, 0xac, 0x59, 0xee, 0xf9, 0x2e, 0xaf, 0x0d, 0xb0, 0xa6, 0xe3, 0x03, 0x9d,
+ 0x34, 0x26, 0xe5, 0xdc, 0xdb, 0x96, 0xbb, 0x51, 0x12, 0x0f, 0xef, 0x80, 0x8b, 0x7e, 0xd3, 0xbe,
+ 0x89, 0x43, 0x92, 0x1a, 0x27, 0x79, 0x41, 0x90, 0x5c, 0xdc, 0x4e, 0x02, 0x50, 0xda, 0x06, 0xde,
+ 0x02, 0x33, 0x5d, 0x4b, 0xd7, 0x79, 0x3e, 0xae, 0x5b, 0x9e, 0x49, 0x1b, 0x75, 0xce, 0x02, 0xd9,
+ 0x1e, 0x5a, 0x97, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x0e, 0x40, 0x37, 0x28, 0x0c, 0x6e, 0x03, 0x8c,
+ 0x51, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xd6, 0xc7, 0x0a, 0x58,
+ 0xcc, 0xd9, 0xe8, 0xf0, 0x87, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, 0xcc, 0x62, 0x95,
+ 0xf0, 0x10, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x7d, 0x1f, 0x22, 0xce, 0xb2, 0x76, 0x6e, 0x00, 0x28,
+ 0x8e, 0x8e, 0x4e, 0xe5, 0x8b, 0xa3, 0xe3, 0xa5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, 0xad, 0xdf, 0x94,
+ 0x00, 0xd8, 0x20, 0xb6, 0x6e, 0x0d, 0x0d, 0x62, 0x9e, 0x85, 0xa6, 0xb9, 0x27, 0x69, 0x9a, 0x97,
+ 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, 0x21, 0x7b, 0xb6,
+ 0xaa, 0xf9, 0xb4, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, 0x72, 0x62, 0x45,
+ 0x17, 0x33, 0x4c, 0xbe, 0x32, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, 0x3f, 0xae, 0x69,
+ 0x26, 0x4e, 0xac, 0x69, 0xc2, 0x4a, 0xb4, 0x29, 0x31, 0xa1, 0x04, 0x73, 0x8e, 0x86, 0x9a, 0xfc,
+ 0x26, 0x6a, 0xa8, 0x3f, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x33, 0x10, 0x51, 0x77, 0x65, 0x11, 0xf5,
+ 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x9f, 0x56, 0xe2, 0xae, 0x73, 0x19, 0xb5, 0xcc, 0xae, 0x60,
+ 0xb6, 0xae, 0x75, 0xb1, 0x2b, 0xea, 0xed, 0x79, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, 0xb0, 0x57, 0x12,
+ 0x5c, 0xa5, 0xaf, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x09, 0xa8, 0xb9, 0x81, 0xd4, 0xaa,
+ 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, 0xba, 0x2c, 0x65,
+ 0x55, 0xfd, 0x3a, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf1, 0x4d, 0x55, 0x8b, 0x12,
+ 0x7d, 0x97, 0xb7, 0x22, 0xd1, 0x0b, 0xf7, 0xc1, 0xa2, 0xed, 0x58, 0x7d, 0x87, 0xb8, 0xee, 0x06,
+ 0xc1, 0x3d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x8f, 0x8e, 0x97, 0x16, 0x77, 0xb3,
+ 0x21, 0x28, 0xcf, 0xb6, 0xf5, 0x97, 0x0a, 0xb8, 0x90, 0x3c, 0x1b, 0x73, 0x64, 0x8a, 0x72, 0x2a,
+ 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, 0x0d, 0xcc, 0x0a,
+ 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0xbe, 0xdc, 0x8d, 0x92, 0x78, 0x78, 0x1b, 0x4c,
+ 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, 0x9d, 0x48, 0xc6,
+ 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xb5, 0x24, 0x00, 0xa5, 0x6d, 0xe0,
+ 0x16, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x7e, 0x1a, 0x82, 0xb2,
+ 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x4c, 0xf0, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, 0x58, 0xcd, 0x64,
+ 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfa, 0x48, 0x01, 0x30, 0xbd, 0x0f, 0xc7, 0xbe, 0x04, 0xa4,
+ 0x2c, 0x62, 0x15, 0x53, 0xcb, 0xd6, 0x3f, 0x37, 0x0a, 0xea, 0x9f, 0xe8, 0x40, 0x2d, 0x26, 0x80,
+ 0xc4, 0x44, 0x9f, 0xcd, 0xa3, 0x4e, 0x51, 0x01, 0x14, 0x39, 0xf5, 0x1c, 0x04, 0x50, 0x8c, 0xec,
+ 0xd9, 0x02, 0xe8, 0x9f, 0x25, 0x30, 0x17, 0x81, 0x0b, 0x0b, 0xa0, 0x0c, 0x93, 0xef, 0x1e, 0x76,
+ 0x8a, 0x89, 0x92, 0x68, 0xea, 0xfe, 0x97, 0x44, 0x49, 0xe4, 0x55, 0x8e, 0x28, 0xf9, 0x43, 0x29,
+ 0xee, 0xfa, 0x09, 0x45, 0xc9, 0x73, 0x78, 0xe1, 0xf8, 0xc6, 0xe9, 0x9a, 0xd6, 0x5f, 0xcb, 0xe0,
+ 0x42, 0x72, 0x1f, 0x4a, 0x05, 0x52, 0x19, 0x5b, 0x20, 0x77, 0xc1, 0xfc, 0x23, 0x4f, 0xd7, 0x87,
+ 0x3c, 0x86, 0x58, 0x95, 0xf4, 0x4b, 0xeb, 0xff, 0x0b, 0xcb, 0xf9, 0x1f, 0x67, 0x60, 0x50, 0xa6,
+ 0x65, 0xba, 0x5e, 0x56, 0xbe, 0x6c, 0xbd, 0xac, 0x9e, 0xa2, 0x5e, 0x66, 0x4b, 0x8e, 0xf2, 0xa9,
+ 0x24, 0xc7, 0xc9, 0x8a, 0x65, 0xc6, 0xc1, 0x35, 0xf6, 0xea, 0x3f, 0x52, 0xc0, 0x42, 0xf6, 0x85,
+ 0x1b, 0xea, 0x60, 0xc6, 0xc0, 0x8f, 0xe3, 0x0f, 0x1f, 0xe3, 0x8a, 0x88, 0x47, 0x35, 0x5d, 0xf5,
+ 0x3f, 0x19, 0xa9, 0xf7, 0x4c, 0xba, 0xe3, 0xec, 0x51, 0x47, 0x33, 0xfb, 0x7e, 0xe5, 0xdd, 0x92,
+ 0xb8, 0x50, 0x82, 0x1b, 0xbe, 0x0f, 0x6a, 0x06, 0x7e, 0xbc, 0xe7, 0x39, 0xfd, 0xac, 0x0a, 0x59,
+ 0x6c, 0x1c, 0xbe, 0x01, 0xb6, 0x04, 0x0b, 0x0a, 0xf9, 0x5a, 0x9f, 0x2b, 0x60, 0x31, 0xa7, 0xaa,
+ 0x7e, 0x8b, 0xa2, 0xdc, 0x01, 0x57, 0xa4, 0x20, 0xd9, 0xae, 0x24, 0x8f, 0x3c, 0x9d, 0x6f, 0x50,
+ 0x21, 0x64, 0xae, 0x81, 0xba, 0x8d, 0x1d, 0xaa, 0x85, 0x32, 0xb8, 0xda, 0x99, 0x1e, 0x1d, 0x2f,
+ 0xd5, 0x77, 0x83, 0x46, 0x14, 0xf5, 0xb7, 0xfe, 0xa3, 0x80, 0xea, 0x5e, 0x17, 0xeb, 0xe4, 0x0c,
+ 0x94, 0xc4, 0x86, 0xa4, 0x24, 0xf2, 0x5f, 0xe9, 0xb9, 0x3f, 0xb9, 0x22, 0x62, 0x33, 0x21, 0x22,
+ 0x5e, 0x1a, 0xc3, 0xf3, 0x6c, 0xfd, 0xf0, 0x36, 0xa8, 0x87, 0xc3, 0x9d, 0xec, 0x70, 0x6b, 0xfd,
+ 0xbe, 0x04, 0xa6, 0x62, 0x43, 0x9c, 0xf0, 0x68, 0x7c, 0x28, 0xd5, 0x03, 0xb6, 0xe9, 0x57, 0x8b,
+ 0x04, 0xa2, 0x06, 0x67, 0xff, 0x3b, 0x26, 0x75, 0xe2, 0x97, 0xc7, 0x74, 0x49, 0xf8, 0x01, 0x98,
+ 0xa1, 0xd8, 0xe9, 0x13, 0x1a, 0xf4, 0xf1, 0x09, 0xab, 0x47, 0x8f, 0x29, 0xf7, 0xa5, 0x5e, 0x94,
+ 0x40, 0x5f, 0xba, 0x0d, 0xa6, 0xa5, 0xc1, 0xe0, 0x05, 0x50, 0x3e, 0x22, 0x43, 0x5f, 0x52, 0x21,
+ 0xf6, 0x27, 0x9c, 0x07, 0xd5, 0x01, 0xd6, 0x3d, 0x3f, 0xcf, 0xeb, 0xc8, 0xff, 0x71, 0xab, 0xf4,
+ 0x96, 0xd2, 0xfa, 0x2d, 0x9b, 0x9c, 0x28, 0x39, 0xcf, 0x20, 0xbb, 0xde, 0x95, 0xb2, 0x2b, 0xff,
+ 0x83, 0x61, 0x7c, 0xcb, 0xe4, 0xe5, 0x18, 0x4a, 0xe4, 0xd8, 0xab, 0x85, 0xd8, 0x9e, 0x9d, 0x69,
+ 0xff, 0x2a, 0x81, 0xf9, 0x18, 0x3a, 0x92, 0xaa, 0xdf, 0x97, 0xa4, 0xea, 0x72, 0x42, 0xaa, 0x36,
+ 0xb2, 0x6c, 0xbe, 0xd3, 0xaa, 0xe3, 0xb5, 0xea, 0x9f, 0x15, 0x30, 0x1b, 0x9b, 0xbb, 0x33, 0x10,
+ 0xab, 0xf7, 0x64, 0xb1, 0xfa, 0x52, 0x91, 0xa4, 0xc9, 0x51, 0xab, 0x1f, 0x4e, 0x48, 0xce, 0x7f,
+ 0xeb, 0xdf, 0xd0, 0x7e, 0x09, 0xe6, 0x07, 0x96, 0xee, 0x19, 0x64, 0x5d, 0xc7, 0x9a, 0x11, 0x00,
+ 0x98, 0xba, 0x2b, 0x27, 0xef, 0x89, 0x21, 0x3d, 0x71, 0x5c, 0xcd, 0xa5, 0xc4, 0xa4, 0x0f, 0x22,
+ 0xcb, 0x48, 0x53, 0x3e, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, 0xc0, 0x14, 0x53, 0x65, 0x5a,
+ 0x97, 0x6c, 0x63, 0x23, 0x48, 0xac, 0xf0, 0xf3, 0xd8, 0x5e, 0xd4, 0x85, 0xe2, 0x38, 0x78, 0x08,
+ 0xe6, 0x6c, 0xab, 0xb7, 0x85, 0x4d, 0xdc, 0x27, 0x4c, 0x66, 0xec, 0x5a, 0xba, 0xd6, 0x1d, 0xf2,
+ 0x87, 0xb5, 0x7a, 0xe7, 0xcd, 0xe0, 0xd1, 0x64, 0x37, 0x0d, 0x61, 0x17, 0xd0, 0x8c, 0x66, 0xbe,
+ 0xa9, 0xb3, 0x28, 0xa1, 0x93, 0xfa, 0xa4, 0xeb, 0x3f, 0x69, 0xaf, 0x16, 0xc9, 0xb0, 0x53, 0x7e,
+ 0xd4, 0xcd, 0x7b, 0x37, 0xac, 0x9d, 0xea, 0xdd, 0x30, 0xe3, 0x02, 0x55, 0x3f, 0xd9, 0x05, 0xaa,
+ 0xf5, 0x51, 0x15, 0x5c, 0x4c, 0x9d, 0xb6, 0x5f, 0xe3, 0xe3, 0x5f, 0xea, 0x26, 0x52, 0x3e, 0xc1,
+ 0x4d, 0x64, 0x0d, 0xcc, 0x8a, 0xef, 0xc9, 0x89, 0x8b, 0x4c, 0x38, 0x1f, 0xeb, 0x72, 0x37, 0x4a,
+ 0xe2, 0xb3, 0x1e, 0x1f, 0xab, 0x27, 0x7c, 0x7c, 0x8c, 0x7b, 0x21, 0xfe, 0x3f, 0xca, 0xcf, 0xde,
+ 0xb4, 0x17, 0xe2, 0xdf, 0xa4, 0x92, 0x78, 0x26, 0x32, 0x7c, 0xd6, 0x90, 0x61, 0x52, 0x16, 0x19,
+ 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0xbf, 0xd4, 0x37, 0x53, 0x9c, 0xf1, 0xcd, 0x74, 0xa5, 0xc8, 0x96,
+ 0x28, 0xfe, 0xce, 0x98, 0x79, 0x63, 0x9c, 0x3a, 0xf9, 0x8d, 0xb1, 0xf5, 0x37, 0x05, 0xbc, 0x90,
+ 0xbb, 0x29, 0xe1, 0x9a, 0x24, 0x01, 0x56, 0x12, 0x12, 0xe0, 0x7b, 0xb9, 0x86, 0x31, 0x1d, 0xe0,
+ 0x64, 0x3f, 0x41, 0xbe, 0x5d, 0xec, 0x09, 0x32, 0xe3, 0x1e, 0x31, 0xfe, 0x2d, 0xb2, 0xb3, 0xf2,
+ 0xe4, 0x69, 0xf3, 0xdc, 0x27, 0x4f, 0x9b, 0xe7, 0x3e, 0x7b, 0xda, 0x3c, 0xf7, 0xab, 0x51, 0x53,
+ 0x79, 0x32, 0x6a, 0x2a, 0x9f, 0x8c, 0x9a, 0xca, 0x67, 0xa3, 0xa6, 0xf2, 0xf7, 0x51, 0x53, 0xf9,
+ 0xf0, 0xf3, 0xe6, 0xb9, 0xf7, 0x27, 0xc5, 0x88, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xda, 0x5d,
+ 0xee, 0xc9, 0xd4, 0x29, 0x00, 0x00,
}
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
@@ -2542,6 +2543,9 @@ func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds))
+ i--
+ dAtA[i] = 0x48
if m.RevisionHistoryLimit != nil {
i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit))
i--
@@ -2631,6 +2635,9 @@ func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas))
+ i--
+ dAtA[i] = 0x58
if len(m.Conditions) > 0 {
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
{
@@ -3255,6 +3262,7 @@ func (m *StatefulSetSpec) Size() (n int) {
if m.RevisionHistoryLimit != nil {
n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit))
}
+ n += 1 + sovGenerated(uint64(m.MinReadySeconds))
return n
}
@@ -3282,6 +3290,7 @@ func (m *StatefulSetStatus) Size() (n int) {
n += 1 + l + sovGenerated(uint64(l))
}
}
+ n += 1 + sovGenerated(uint64(m.AvailableReplicas))
return n
}
@@ -3729,6 +3738,7 @@ func (this *StatefulSetSpec) String() string {
`PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`,
`UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`,
`RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`,
+ `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`,
`}`,
}, "")
return s
@@ -3752,6 +3762,7 @@ func (this *StatefulSetStatus) String() string {
`UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`,
`CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`,
`Conditions:` + repeatedStringForConditions + `,`,
+ `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`,
`}`,
}, "")
return s
@@ -8486,6 +8497,25 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error {
}
}
m.RevisionHistoryLimit = &v
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType)
+ }
+ m.MinReadySeconds = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinReadySeconds |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -8749,6 +8779,25 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType)
+ }
+ m.AvailableReplicas = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AvailableReplicas |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto
index ff306ba6a..fb3f05ea2 100644
--- a/vendor/k8s.io/api/apps/v1beta2/generated.proto
+++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto
@@ -487,7 +487,7 @@ message RollingUpdateDaemonSet {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
- // number is calculated from percentage by rounding down to a minimum of one.
+ // number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
@@ -519,7 +519,7 @@ message RollingUpdateDaemonSet {
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
// cause evictions during disruption.
- // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.
+ // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
// +optional
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
}
@@ -592,6 +592,7 @@ message ScaleStatus {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
+ // +mapType=atomic
map selector = 2;
// label selector for pods that should match the replicas count. This is a serializated
@@ -714,6 +715,13 @@ message StatefulSetSpec {
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
optional int32 revisionHistoryLimit = 8;
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // +optional
+ optional int32 minReadySeconds = 9;
}
// StatefulSetStatus represents the current state of a StatefulSet.
@@ -756,6 +764,12 @@ message StatefulSetStatus {
// +patchMergeKey=type
// +patchStrategy=merge
repeated StatefulSetCondition conditions = 10;
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // Remove omitempty when graduating to beta
+ // +optional
+ optional int32 availableReplicas = 11;
}
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go
index 316a0ad24..67e10b971 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types.go
@@ -45,6 +45,7 @@ type ScaleStatus struct {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
+ // +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// label selector for pods that should match the replicas count. This is a serializated
@@ -82,6 +83,7 @@ type Scale struct {
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
+// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=Scale,result=Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.8
// +k8s:prerelease-lifecycle-gen:deprecated=1.9
@@ -226,6 +228,13 @@ type StatefulSetSpec struct {
// consists of all revisions not represented by a currently applied
// StatefulSetSpec version. The default value is 10.
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"`
+
+ // Minimum number of seconds for which a newly created pod should be ready
+ // without any of its container crashing for it to be considered available.
+ // Defaults to 0 (pod will be considered available as soon as it is ready)
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // +optional
+ MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"`
}
// StatefulSetStatus represents the current state of a StatefulSet.
@@ -268,6 +277,12 @@ type StatefulSetStatus struct {
// +patchMergeKey=type
// +patchStrategy=merge
Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"`
+
+ // Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet.
+ // This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.
+ // Remove omitempty when graduating to beta
+ // +optional
+ AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,11,opt,name=availableReplicas"`
}
type StatefulSetConditionType string
@@ -554,7 +569,7 @@ type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
- // number is calculated from percentage by rounding down to a minimum of one.
+ // number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
@@ -586,7 +601,7 @@ type RollingUpdateDaemonSet struct {
// daemonset on any given node can double if the readiness check fails, and
// so resource intensive daemonsets should take into account that they may
// cause evictions during disruption.
- // This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.
+ // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.
// +optional
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
}
diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
index 51d552234..1f9912203 100644
--- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
@@ -262,8 +262,8 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
- "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
- "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
+ "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+ "maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate.",
}
func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
@@ -361,6 +361,7 @@ var map_StatefulSetSpec = map[string]string{
"podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.",
"updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.",
"revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.",
+ "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready) This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate.",
}
func (StatefulSetSpec) SwaggerDoc() map[string]string {
@@ -378,6 +379,7 @@ var map_StatefulSetStatus = map[string]string{
"updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)",
"collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.",
"conditions": "Represents the latest available observations of a statefulset's current state.",
+ "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this StatefulSet. This is an alpha field and requires enabling StatefulSetMinReadySeconds feature gate. Remove omitempty when graduating to beta",
}
func (StatefulSetStatus) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto
index c91fd92a5..8f928be40 100644
--- a/vendor/k8s.io/api/authentication/v1/generated.proto
+++ b/vendor/k8s.io/api/authentication/v1/generated.proto
@@ -58,11 +58,15 @@ message ExtraValue {
// TokenRequest requests a token for a given service account.
message TokenRequest {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+ // Spec holds information about the request being evaluated
optional TokenRequestSpec spec = 2;
+ // Status is filled in by the server and indicates whether the token can be authenticated.
// +optional
optional TokenRequestStatus status = 3;
}
@@ -105,6 +109,8 @@ message TokenRequestStatus {
// Note: TokenReview requests may be cached by the webhook token authenticator
// plugin in the kube-apiserver.
message TokenReview {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go
index 6f5f0ad1a..7ba247d63 100644
--- a/vendor/k8s.io/api/authentication/v1/types.go
+++ b/vendor/k8s.io/api/authentication/v1/types.go
@@ -31,6 +31,9 @@ const (
// It can be repeated multiplied times for multiple groups.
ImpersonateGroupHeader = "Impersonate-Group"
+ // ImpersonateUIDHeader is used to impersonate a particular UID during an API server request
+ ImpersonateUIDHeader = "Impersonate-Uid"
+
// ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the
// extra map[string][]string for user.Info. The key will be every after the prefix.
// It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple
@@ -48,6 +51,8 @@ const (
// plugin in the kube-apiserver.
type TokenReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -130,10 +135,15 @@ func (t ExtraValue) String() string {
// TokenRequest requests a token for a given service account.
type TokenRequest struct {
metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+ // Spec holds information about the request being evaluated
Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
+
+ // Status is filled in by the server and indicates whether the token can be authenticated.
// +optional
Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
index 09f6b920f..f9a88a3df 100644
--- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
@@ -40,7 +40,10 @@ func (BoundObjectReference) SwaggerDoc() map[string]string {
}
var map_TokenRequest = map[string]string{
- "": "TokenRequest requests a token for a given service account.",
+ "": "TokenRequest requests a token for a given service account.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated",
+ "status": "Status is filled in by the server and indicates whether the token can be authenticated.",
}
func (TokenRequest) SwaggerDoc() map[string]string {
@@ -69,9 +72,10 @@ func (TokenRequestStatus) SwaggerDoc() map[string]string {
}
var map_TokenReview = map[string]string{
- "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
- "spec": "Spec holds information about the request being evaluated",
- "status": "Status is filled in by the server and indicates whether the request can be authenticated.",
+ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated",
+ "status": "Status is filled in by the server and indicates whether the request can be authenticated.",
}
func (TokenReview) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto
index d3bff49eb..67a32b320 100644
--- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto
@@ -41,13 +41,15 @@ message ExtraValue {
// Note: TokenReview requests may be cached by the webhook token authenticator
// plugin in the kube-apiserver.
message TokenReview {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Spec holds information about the request being evaluated
optional TokenReviewSpec spec = 2;
- // Status is filled in by the server and indicates whether the request can be authenticated.
+ // Status is filled in by the server and indicates whether the token can be authenticated.
// +optional
optional TokenReviewStatus status = 3;
}
diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go
index 121b34618..08e1e09b6 100644
--- a/vendor/k8s.io/api/authentication/v1beta1/types.go
+++ b/vendor/k8s.io/api/authentication/v1beta1/types.go
@@ -35,13 +35,15 @@ import (
// plugin in the kube-apiserver.
type TokenReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Spec holds information about the request being evaluated
Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
- // Status is filled in by the server and indicates whether the request can be authenticated.
+ // Status is filled in by the server and indicates whether the token can be authenticated.
// +optional
Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
index 8c9acfb5b..1086955c3 100644
--- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
@@ -28,9 +28,10 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_TokenReview = map[string]string{
- "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
- "spec": "Spec holds information about the request being evaluated",
- "status": "Status is filled in by the server and indicates whether the request can be authenticated.",
+ "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated",
+ "status": "Status is filled in by the server and indicates whether the token can be authenticated.",
}
func (TokenReview) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto
index 931b0f499..0170ee11f 100644
--- a/vendor/k8s.io/api/authorization/v1/generated.proto
+++ b/vendor/k8s.io/api/authorization/v1/generated.proto
@@ -41,6 +41,8 @@ message ExtraValue {
// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
// checking.
message LocalSubjectAccessReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -134,6 +136,8 @@ message ResourceRule {
// spec.namespace means "in all namespaces". Self is a special case, because users should always be able
// to check whether they can perform an action
message SelfSubjectAccessReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -164,6 +168,8 @@ message SelfSubjectAccessReviewSpec {
// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
message SelfSubjectRulesReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -175,6 +181,7 @@ message SelfSubjectRulesReview {
optional SubjectRulesReviewStatus status = 3;
}
+// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.
message SelfSubjectRulesReviewSpec {
// Namespace to evaluate rules for. Required.
optional string namespace = 1;
@@ -182,6 +189,8 @@ message SelfSubjectRulesReviewSpec {
// SubjectAccessReview checks whether or not a user or group can perform an action.
message SubjectAccessReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go
index be8913eb4..d1fe483f9 100644
--- a/vendor/k8s.io/api/authorization/v1/types.go
+++ b/vendor/k8s.io/api/authorization/v1/types.go
@@ -30,6 +30,8 @@ import (
// SubjectAccessReview checks whether or not a user or group can perform an action.
type SubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -51,6 +53,8 @@ type SubjectAccessReview struct {
// to check whether they can perform an action
type SelfSubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -71,6 +75,8 @@ type SelfSubjectAccessReview struct {
// checking.
type LocalSubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -200,6 +206,8 @@ type SubjectAccessReviewStatus struct {
// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
type SelfSubjectRulesReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -211,6 +219,7 @@ type SelfSubjectRulesReview struct {
Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
+// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.
type SelfSubjectRulesReviewSpec struct {
// Namespace to evaluate rules for. Required.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
index 8445f7116..2e5fbea7a 100644
--- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go
@@ -28,9 +28,10 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_LocalSubjectAccessReview = map[string]string{
- "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
- "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.",
- "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
}
func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
@@ -85,9 +86,10 @@ func (ResourceRule) SwaggerDoc() map[string]string {
}
var map_SelfSubjectAccessReview = map[string]string{
- "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action",
- "spec": "Spec holds information about the request being evaluated. user and groups must be empty",
- "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+ "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated. user and groups must be empty",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
}
func (SelfSubjectAccessReview) SwaggerDoc() map[string]string {
@@ -105,9 +107,10 @@ func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string {
}
var map_SelfSubjectRulesReview = map[string]string{
- "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.",
- "spec": "Spec holds information about the request being evaluated.",
- "status": "Status is filled in by the server and indicates the set of actions a user can perform.",
+ "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated.",
+ "status": "Status is filled in by the server and indicates the set of actions a user can perform.",
}
func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
@@ -115,6 +118,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
}
var map_SelfSubjectRulesReviewSpec = map[string]string{
+ "": "SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.",
"namespace": "Namespace to evaluate rules for. Required.",
}
@@ -123,9 +127,10 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
}
var map_SubjectAccessReview = map[string]string{
- "": "SubjectAccessReview checks whether or not a user or group can perform an action.",
- "spec": "Spec holds information about the request being evaluated",
- "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+ "": "SubjectAccessReview checks whether or not a user or group can perform an action.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
}
func (SubjectAccessReview) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto
index a9e221608..4b1a55e0c 100644
--- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto
@@ -41,6 +41,8 @@ message ExtraValue {
// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions
// checking.
message LocalSubjectAccessReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -134,6 +136,8 @@ message ResourceRule {
// spec.namespace means "in all namespaces". Self is a special case, because users should always be able
// to check whether they can perform an action
message SelfSubjectAccessReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -164,6 +168,8 @@ message SelfSubjectAccessReviewSpec {
// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns.
// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
message SelfSubjectRulesReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -175,6 +181,7 @@ message SelfSubjectRulesReview {
optional SubjectRulesReviewStatus status = 3;
}
+// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.
message SelfSubjectRulesReviewSpec {
// Namespace to evaluate rules for. Required.
optional string namespace = 1;
@@ -182,6 +189,8 @@ message SelfSubjectRulesReviewSpec {
// SubjectAccessReview checks whether or not a user or group can perform an action.
message SubjectAccessReview {
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go
index c62b5ea21..265309865 100644
--- a/vendor/k8s.io/api/authorization/v1beta1/types.go
+++ b/vendor/k8s.io/api/authorization/v1beta1/types.go
@@ -33,6 +33,8 @@ import (
// SubjectAccessReview checks whether or not a user or group can perform an action.
type SubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -57,6 +59,8 @@ type SubjectAccessReview struct {
// to check whether they can perform an action
type SelfSubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -80,6 +84,8 @@ type SelfSubjectAccessReview struct {
// checking.
type LocalSubjectAccessReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -212,6 +218,8 @@ type SubjectAccessReviewStatus struct {
// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.
type SelfSubjectRulesReview struct {
metav1.TypeMeta `json:",inline"`
+ // Standard list metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -223,6 +231,7 @@ type SelfSubjectRulesReview struct {
Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
+// SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.
type SelfSubjectRulesReviewSpec struct {
// Namespace to evaluate rules for. Required.
Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"`
diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
index 3ae6e7206..2d291189e 100644
--- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go
@@ -28,9 +28,10 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_LocalSubjectAccessReview = map[string]string{
- "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
- "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.",
- "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+ "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
}
func (LocalSubjectAccessReview) SwaggerDoc() map[string]string {
@@ -85,9 +86,10 @@ func (ResourceRule) SwaggerDoc() map[string]string {
}
var map_SelfSubjectAccessReview = map[string]string{
- "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action",
- "spec": "Spec holds information about the request being evaluated. user and groups must be empty",
- "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+ "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated. user and groups must be empty",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
}
func (SelfSubjectAccessReview) SwaggerDoc() map[string]string {
@@ -105,9 +107,10 @@ func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string {
}
var map_SelfSubjectRulesReview = map[string]string{
- "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.",
- "spec": "Spec holds information about the request being evaluated.",
- "status": "Status is filled in by the server and indicates the set of actions a user can perform.",
+ "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated.",
+ "status": "Status is filled in by the server and indicates the set of actions a user can perform.",
}
func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
@@ -115,6 +118,7 @@ func (SelfSubjectRulesReview) SwaggerDoc() map[string]string {
}
var map_SelfSubjectRulesReviewSpec = map[string]string{
+ "": "SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.",
"namespace": "Namespace to evaluate rules for. Required.",
}
@@ -123,9 +127,10 @@ func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string {
}
var map_SubjectAccessReview = map[string]string{
- "": "SubjectAccessReview checks whether or not a user or group can perform an action.",
- "spec": "Spec holds information about the request being evaluated",
- "status": "Status is filled in by the server and indicates whether the request is allowed or not",
+ "": "SubjectAccessReview checks whether or not a user or group can perform an action.",
+ "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Spec holds information about the request being evaluated",
+ "status": "Status is filled in by the server and indicates whether the request is allowed or not",
}
func (SubjectAccessReview) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto
index 08b8667c8..606a50da5 100644
--- a/vendor/k8s.io/api/autoscaling/v1/generated.proto
+++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto
@@ -85,6 +85,7 @@ message ContainerResourceMetricStatus {
}
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+// +structType=atomic
message CrossVersionObjectReference {
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
optional string kind = 1;
diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go
index 3343177b2..93b1efb51 100644
--- a/vendor/k8s.io/api/autoscaling/v1/types.go
+++ b/vendor/k8s.io/api/autoscaling/v1/types.go
@@ -23,6 +23,7 @@ import (
)
// CrossVersionObjectReference contains enough information to let you identify the referred resource.
+// +structType=atomic
type CrossVersionObjectReference struct {
// Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"`
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
index ac6cb6769..1be548d0a 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go
@@ -27,7 +27,7 @@ import (
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.12
-// +k8s:prerelease-lifecycle-gen:deprecated=1.22
+// +k8s:prerelease-lifecycle-gen:deprecated=1.23
// HorizontalPodAutoscaler is the configuration for a horizontal pod
// autoscaler, which automatically manages the replica count of any resource
diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go
index 83926e3a0..c77892c99 100644
--- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go
+++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.prerelease-lifecycle.go
@@ -29,13 +29,13 @@ func (in *HorizontalPodAutoscaler) APILifecycleIntroduced() (major, minor int) {
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
func (in *HorizontalPodAutoscaler) APILifecycleDeprecated() (major, minor int) {
- return 1, 22
+ return 1, 23
}
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
func (in *HorizontalPodAutoscaler) APILifecycleRemoved() (major, minor int) {
- return 1, 25
+ return 1, 26
}
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go
index 1407caebc..b8a102831 100644
--- a/vendor/k8s.io/api/batch/v1/generated.pb.go
+++ b/vendor/k8s.io/api/batch/v1/generated.pb.go
@@ -33,6 +33,8 @@ import (
math_bits "math/bits"
reflect "reflect"
strings "strings"
+
+ k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
)
// Reference imports to suppress errors if they are not otherwise used.
@@ -326,6 +328,34 @@ func (m *JobTemplateSpec) XXX_DiscardUnknown() {
var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo
+func (m *UncountedTerminatedPods) Reset() { *m = UncountedTerminatedPods{} }
+func (*UncountedTerminatedPods) ProtoMessage() {}
+func (*UncountedTerminatedPods) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3b52da57c93de713, []int{10}
+}
+func (m *UncountedTerminatedPods) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UncountedTerminatedPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *UncountedTerminatedPods) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UncountedTerminatedPods.Merge(m, src)
+}
+func (m *UncountedTerminatedPods) XXX_Size() int {
+ return m.Size()
+}
+func (m *UncountedTerminatedPods) XXX_DiscardUnknown() {
+ xxx_messageInfo_UncountedTerminatedPods.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UncountedTerminatedPods proto.InternalMessageInfo
+
func init() {
proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1.CronJob")
proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1.CronJobList")
@@ -337,6 +367,7 @@ func init() {
proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec")
proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus")
proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1.JobTemplateSpec")
+ proto.RegisterType((*UncountedTerminatedPods)(nil), "k8s.io.api.batch.v1.UncountedTerminatedPods")
}
func init() {
@@ -344,89 +375,95 @@ func init() {
}
var fileDescriptor_3b52da57c93de713 = []byte{
- // 1304 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x41, 0x6f, 0x1b, 0x45,
- 0x14, 0xce, 0xc6, 0x71, 0x6c, 0x8f, 0x93, 0xd4, 0x9d, 0xd2, 0xd6, 0x98, 0xca, 0x1b, 0x4c, 0x41,
- 0x01, 0xc1, 0x9a, 0x94, 0x08, 0x21, 0x04, 0x48, 0xd9, 0x54, 0x15, 0x0d, 0x8e, 0x1a, 0xc6, 0xa9,
- 0x90, 0xa0, 0x20, 0xc6, 0xbb, 0x63, 0x67, 0x9b, 0xdd, 0x9d, 0xd5, 0xce, 0xd8, 0x22, 0x37, 0x7e,
- 0x02, 0xbf, 0x02, 0x71, 0x42, 0x48, 0xdc, 0x39, 0xa2, 0x1e, 0x7b, 0xec, 0x69, 0x45, 0x97, 0x1b,
- 0x17, 0xee, 0xe1, 0x82, 0x76, 0x76, 0xbc, 0xbb, 0xb6, 0x77, 0x43, 0xd3, 0x43, 0xc5, 0xcd, 0xfb,
- 0xe6, 0xfb, 0xbe, 0x79, 0x7e, 0xef, 0xcd, 0x7b, 0x0f, 0x7c, 0x74, 0xf2, 0x01, 0xd3, 0x2c, 0xda,
- 0x3d, 0x19, 0x0f, 0x88, 0xef, 0x12, 0x4e, 0x58, 0x77, 0x42, 0x5c, 0x93, 0xfa, 0x5d, 0x79, 0x80,
- 0x3d, 0xab, 0x3b, 0xc0, 0xdc, 0x38, 0xee, 0x4e, 0xb6, 0xbb, 0x23, 0xe2, 0x12, 0x1f, 0x73, 0x62,
- 0x6a, 0x9e, 0x4f, 0x39, 0x85, 0x57, 0x62, 0x90, 0x86, 0x3d, 0x4b, 0x13, 0x20, 0x6d, 0xb2, 0xdd,
- 0x7a, 0x67, 0x64, 0xf1, 0xe3, 0xf1, 0x40, 0x33, 0xa8, 0xd3, 0x1d, 0xd1, 0x11, 0xed, 0x0a, 0xec,
- 0x60, 0x3c, 0x14, 0x5f, 0xe2, 0x43, 0xfc, 0x8a, 0x35, 0x5a, 0x9d, 0xcc, 0x45, 0x06, 0xf5, 0x49,
- 0xce, 0x3d, 0xad, 0x9d, 0x14, 0xe3, 0x60, 0xe3, 0xd8, 0x72, 0x89, 0x7f, 0xda, 0xf5, 0x4e, 0x46,
- 0x91, 0x81, 0x75, 0x1d, 0xc2, 0x71, 0x1e, 0xab, 0x5b, 0xc4, 0xf2, 0xc7, 0x2e, 0xb7, 0x1c, 0xb2,
- 0x40, 0x78, 0xff, 0xbf, 0x08, 0xcc, 0x38, 0x26, 0x0e, 0x9e, 0xe7, 0x75, 0xfe, 0x51, 0x40, 0x65,
- 0xcf, 0xa7, 0xee, 0x3e, 0x1d, 0xc0, 0x6f, 0x41, 0x35, 0xf2, 0xc7, 0xc4, 0x1c, 0x37, 0x95, 0x4d,
- 0x65, 0xab, 0x7e, 0xeb, 0x5d, 0x2d, 0x8d, 0x52, 0x22, 0xab, 0x79, 0x27, 0xa3, 0xc8, 0xc0, 0xb4,
- 0x08, 0xad, 0x4d, 0xb6, 0xb5, 0x7b, 0x83, 0x87, 0xc4, 0xe0, 0x07, 0x84, 0x63, 0x1d, 0x3e, 0x0a,
- 0xd4, 0xa5, 0x30, 0x50, 0x41, 0x6a, 0x43, 0x89, 0x2a, 0xd4, 0xc1, 0x0a, 0xf3, 0x88, 0xd1, 0x5c,
- 0x16, 0xea, 0x9b, 0x5a, 0x4e, 0x0e, 0x34, 0xe9, 0x4d, 0xdf, 0x23, 0x86, 0xbe, 0x26, 0xd5, 0x56,
- 0xa2, 0x2f, 0x24, 0xb8, 0x70, 0x1f, 0xac, 0x32, 0x8e, 0xf9, 0x98, 0x35, 0x4b, 0x42, 0xa5, 0x73,
- 0xae, 0x8a, 0x40, 0xea, 0x1b, 0x52, 0x67, 0x35, 0xfe, 0x46, 0x52, 0xa1, 0xf3, 0xb3, 0x02, 0xea,
- 0x12, 0xd9, 0xb3, 0x18, 0x87, 0x0f, 0x16, 0x22, 0xa0, 0x3d, 0x5b, 0x04, 0x22, 0xb6, 0xf8, 0xff,
- 0x0d, 0x79, 0x53, 0x75, 0x6a, 0xc9, 0xfc, 0xfb, 0x5d, 0x50, 0xb6, 0x38, 0x71, 0x58, 0x73, 0x79,
- 0xb3, 0xb4, 0x55, 0xbf, 0x75, 0xe3, 0x3c, 0xc7, 0xf5, 0x75, 0x29, 0x54, 0xbe, 0x1b, 0x51, 0x50,
- 0xcc, 0xec, 0xfc, 0xb4, 0x92, 0x38, 0x1c, 0x85, 0x04, 0xbe, 0x0d, 0xaa, 0x51, 0x62, 0xcd, 0xb1,
- 0x4d, 0x84, 0xc3, 0xb5, 0xd4, 0x81, 0xbe, 0xb4, 0xa3, 0x04, 0x01, 0xef, 0x83, 0xeb, 0x8c, 0x63,
- 0x9f, 0x5b, 0xee, 0xe8, 0x36, 0xc1, 0xa6, 0x6d, 0xb9, 0xa4, 0x4f, 0x0c, 0xea, 0x9a, 0x4c, 0x64,
- 0xa4, 0xa4, 0xbf, 0x12, 0x06, 0xea, 0xf5, 0x7e, 0x3e, 0x04, 0x15, 0x71, 0xe1, 0x03, 0x70, 0xd9,
- 0xa0, 0xae, 0x31, 0xf6, 0x7d, 0xe2, 0x1a, 0xa7, 0x87, 0xd4, 0xb6, 0x8c, 0x53, 0x91, 0x9c, 0x9a,
- 0xae, 0x49, 0x6f, 0x2e, 0xef, 0xcd, 0x03, 0xce, 0xf2, 0x8c, 0x68, 0x51, 0x08, 0xbe, 0x0e, 0x2a,
- 0x6c, 0xcc, 0x3c, 0xe2, 0x9a, 0xcd, 0x95, 0x4d, 0x65, 0xab, 0xaa, 0xd7, 0xc3, 0x40, 0xad, 0xf4,
- 0x63, 0x13, 0x9a, 0x9e, 0xc1, 0xaf, 0x40, 0xfd, 0x21, 0x1d, 0x1c, 0x11, 0xc7, 0xb3, 0x31, 0x27,
- 0xcd, 0xb2, 0xc8, 0xde, 0xcd, 0xdc, 0x10, 0xef, 0xa7, 0x38, 0x51, 0x65, 0x57, 0xa4, 0x93, 0xf5,
- 0xcc, 0x01, 0xca, 0xaa, 0xc1, 0x6f, 0x40, 0x8b, 0x8d, 0x0d, 0x83, 0x30, 0x36, 0x1c, 0xdb, 0xfb,
- 0x74, 0xc0, 0x3e, 0xb5, 0x18, 0xa7, 0xfe, 0x69, 0xcf, 0x72, 0x2c, 0xde, 0x5c, 0xdd, 0x54, 0xb6,
- 0xca, 0x7a, 0x3b, 0x0c, 0xd4, 0x56, 0xbf, 0x10, 0x85, 0xce, 0x51, 0x80, 0x08, 0x5c, 0x1b, 0x62,
- 0xcb, 0x26, 0xe6, 0x82, 0x76, 0x45, 0x68, 0xb7, 0xc2, 0x40, 0xbd, 0x76, 0x27, 0x17, 0x81, 0x0a,
- 0x98, 0x9d, 0xdf, 0x96, 0xc1, 0xfa, 0xcc, 0x2b, 0x80, 0x9f, 0x81, 0x55, 0x6c, 0x70, 0x6b, 0x12,
- 0x95, 0x4a, 0x54, 0x80, 0xaf, 0x65, 0xa3, 0x13, 0xf5, 0xaf, 0xf4, 0x2d, 0x23, 0x32, 0x24, 0x51,
- 0x12, 0x48, 0xfa, 0x74, 0x76, 0x05, 0x15, 0x49, 0x09, 0x68, 0x83, 0x86, 0x8d, 0x19, 0x9f, 0x56,
- 0xd9, 0x91, 0xe5, 0x10, 0x91, 0x9f, 0xfa, 0xad, 0xb7, 0x9e, 0xed, 0xc9, 0x44, 0x0c, 0xfd, 0xa5,
- 0x30, 0x50, 0x1b, 0xbd, 0x39, 0x1d, 0xb4, 0xa0, 0x0c, 0x7d, 0x00, 0x85, 0x2d, 0x09, 0xa1, 0xb8,
- 0xaf, 0x7c, 0xe1, 0xfb, 0xae, 0x85, 0x81, 0x0a, 0x7b, 0x0b, 0x4a, 0x28, 0x47, 0xbd, 0xf3, 0xb7,
- 0x02, 0x4a, 0x2f, 0xa6, 0x2d, 0x7e, 0x32, 0xd3, 0x16, 0x6f, 0x14, 0x15, 0x6d, 0x61, 0x4b, 0xbc,
- 0x33, 0xd7, 0x12, 0xdb, 0x85, 0x0a, 0xe7, 0xb7, 0xc3, 0xdf, 0x4b, 0x60, 0x6d, 0x9f, 0x0e, 0xf6,
- 0xa8, 0x6b, 0x5a, 0xdc, 0xa2, 0x2e, 0xdc, 0x01, 0x2b, 0xfc, 0xd4, 0x9b, 0xb6, 0x96, 0xcd, 0xe9,
- 0xd5, 0x47, 0xa7, 0x1e, 0x39, 0x0b, 0xd4, 0x46, 0x16, 0x1b, 0xd9, 0x90, 0x40, 0xc3, 0x5e, 0xe2,
- 0xce, 0xb2, 0xe0, 0xed, 0xcc, 0x5e, 0x77, 0x16, 0xa8, 0x39, 0x83, 0x53, 0x4b, 0x94, 0x66, 0x9d,
- 0x82, 0x23, 0xb0, 0x1e, 0x25, 0xe7, 0xd0, 0xa7, 0x83, 0xb8, 0xca, 0x4a, 0x17, 0xce, 0xfa, 0x55,
- 0xe9, 0xc0, 0x7a, 0x2f, 0x2b, 0x84, 0x66, 0x75, 0xe1, 0x24, 0xae, 0xb1, 0x23, 0x1f, 0xbb, 0x2c,
- 0xfe, 0x4b, 0xcf, 0x57, 0xd3, 0x2d, 0x79, 0x9b, 0xa8, 0xb3, 0x59, 0x35, 0x94, 0x73, 0x03, 0x7c,
- 0x03, 0xac, 0xfa, 0x04, 0x33, 0xea, 0x8a, 0x7a, 0xae, 0xa5, 0xd9, 0x41, 0xc2, 0x8a, 0xe4, 0x29,
- 0x7c, 0x13, 0x54, 0x1c, 0xc2, 0x18, 0x1e, 0x11, 0xd1, 0x71, 0x6a, 0xfa, 0x25, 0x09, 0xac, 0x1c,
- 0xc4, 0x66, 0x34, 0x3d, 0xef, 0xfc, 0xa8, 0x80, 0xca, 0x8b, 0x99, 0x69, 0x1f, 0xcf, 0xce, 0xb4,
- 0x66, 0x51, 0xe5, 0x15, 0xcc, 0xb3, 0x5f, 0xca, 0xc2, 0x51, 0x31, 0xcb, 0xb6, 0x41, 0xdd, 0xc3,
- 0x3e, 0xb6, 0x6d, 0x62, 0x5b, 0xcc, 0x11, 0xbe, 0x96, 0xf5, 0x4b, 0x51, 0x5f, 0x3e, 0x4c, 0xcd,
- 0x28, 0x8b, 0x89, 0x28, 0x06, 0x75, 0x3c, 0x9b, 0x44, 0xc1, 0x8c, 0xcb, 0x4d, 0x52, 0xf6, 0x52,
- 0x33, 0xca, 0x62, 0xe0, 0x3d, 0x70, 0x35, 0xee, 0x60, 0xf3, 0x13, 0xb0, 0x24, 0x26, 0xe0, 0xcb,
- 0x61, 0xa0, 0x5e, 0xdd, 0xcd, 0x03, 0xa0, 0x7c, 0x1e, 0xdc, 0x01, 0x6b, 0x03, 0x6c, 0x9c, 0xd0,
- 0xe1, 0x30, 0xdb, 0xb1, 0x1b, 0x61, 0xa0, 0xae, 0xe9, 0x19, 0x3b, 0x9a, 0x41, 0xc1, 0xaf, 0x41,
- 0x95, 0x11, 0x9b, 0x18, 0x9c, 0xfa, 0xb2, 0xc4, 0xde, 0x7b, 0xc6, 0xac, 0xe0, 0x01, 0xb1, 0xfb,
- 0x92, 0xaa, 0xaf, 0x89, 0x49, 0x2f, 0xbf, 0x50, 0x22, 0x09, 0x3f, 0x04, 0x1b, 0x0e, 0x76, 0xc7,
- 0x38, 0x41, 0x8a, 0xda, 0xaa, 0xea, 0x30, 0x0c, 0xd4, 0x8d, 0x83, 0x99, 0x13, 0x34, 0x87, 0x84,
- 0x9f, 0x83, 0x2a, 0x9f, 0x8e, 0xd1, 0x55, 0xe1, 0x5a, 0xee, 0xa0, 0x38, 0xa4, 0xe6, 0xcc, 0x14,
- 0x4d, 0xaa, 0x24, 0x19, 0xa1, 0x89, 0x4c, 0xb4, 0x78, 0x70, 0x6e, 0xcb, 0x88, 0xed, 0x0e, 0x39,
- 0xf1, 0xef, 0x58, 0xae, 0xc5, 0x8e, 0x89, 0xd9, 0xac, 0x8a, 0x70, 0x89, 0xc5, 0xe3, 0xe8, 0xa8,
- 0x97, 0x07, 0x41, 0x45, 0x5c, 0xd8, 0x03, 0x1b, 0x69, 0x6a, 0x0f, 0xa8, 0x49, 0x9a, 0x35, 0xf1,
- 0x30, 0x6e, 0x46, 0xff, 0x72, 0x6f, 0xe6, 0xe4, 0x6c, 0xc1, 0x82, 0xe6, 0xb8, 0xd9, 0x45, 0x03,
- 0x14, 0x2f, 0x1a, 0x9d, 0xbf, 0x4a, 0xa0, 0x96, 0xce, 0xd4, 0xfb, 0x00, 0x18, 0xd3, 0xc6, 0xc5,
- 0xe4, 0x5c, 0x7d, 0xb5, 0xe8, 0x11, 0x24, 0x2d, 0x2e, 0x9d, 0x07, 0x89, 0x89, 0xa1, 0x8c, 0x10,
- 0xfc, 0x02, 0xd4, 0xc4, 0xb6, 0x25, 0x5a, 0xd0, 0xf2, 0x85, 0x5b, 0xd0, 0x7a, 0x18, 0xa8, 0xb5,
- 0xfe, 0x54, 0x00, 0xa5, 0x5a, 0x70, 0x98, 0x0d, 0xd9, 0x73, 0xb6, 0x53, 0x38, 0x1b, 0x5e, 0x71,
- 0xc5, 0x9c, 0x6a, 0xd4, 0xd4, 0xe4, 0xae, 0xb1, 0x22, 0x12, 0x5c, 0xb4, 0x46, 0x74, 0x41, 0x4d,
- 0xec, 0x45, 0xc4, 0x24, 0xa6, 0xa8, 0xd1, 0xb2, 0x7e, 0x59, 0x42, 0x6b, 0xfd, 0xe9, 0x01, 0x4a,
- 0x31, 0x91, 0x70, 0xbc, 0xf0, 0xc8, 0xb5, 0x2b, 0x11, 0x8e, 0xd7, 0x23, 0x24, 0x4f, 0xe1, 0x6d,
- 0xd0, 0x90, 0x2e, 0x11, 0xf3, 0xae, 0x6b, 0x92, 0xef, 0x08, 0x13, 0x4f, 0xb3, 0xa6, 0x37, 0x25,
- 0xa3, 0xb1, 0x37, 0x77, 0x8e, 0x16, 0x18, 0x9d, 0x5f, 0x15, 0x70, 0x69, 0x6e, 0x5d, 0xfc, 0xff,
- 0xef, 0x03, 0xfa, 0xd6, 0xa3, 0xa7, 0xed, 0xa5, 0xc7, 0x4f, 0xdb, 0x4b, 0x4f, 0x9e, 0xb6, 0x97,
- 0xbe, 0x0f, 0xdb, 0xca, 0xa3, 0xb0, 0xad, 0x3c, 0x0e, 0xdb, 0xca, 0x93, 0xb0, 0xad, 0xfc, 0x11,
- 0xb6, 0x95, 0x1f, 0xfe, 0x6c, 0x2f, 0x7d, 0xb9, 0x3c, 0xd9, 0xfe, 0x37, 0x00, 0x00, 0xff, 0xff,
- 0x5a, 0x54, 0xec, 0x5f, 0x44, 0x0f, 0x00, 0x00,
+ // 1395 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcf, 0x6f, 0x1b, 0xc5,
+ 0x17, 0xcf, 0xc6, 0x76, 0x6c, 0x8f, 0x93, 0xd4, 0x9d, 0x7e, 0xdb, 0xfa, 0x6b, 0x2a, 0x6f, 0x6a,
+ 0x0a, 0x0a, 0xa8, 0xac, 0x49, 0x88, 0x10, 0x42, 0x80, 0x94, 0x4d, 0x55, 0x68, 0x70, 0xd4, 0x30,
+ 0x76, 0x84, 0x04, 0x05, 0xb1, 0xde, 0x1d, 0x3b, 0xdb, 0xec, 0xee, 0x58, 0x3b, 0x63, 0x8b, 0xdc,
+ 0x90, 0xf8, 0x07, 0xf8, 0x2b, 0x10, 0x27, 0x84, 0x04, 0x67, 0x8e, 0xa8, 0xc7, 0x1e, 0x7b, 0x5a,
+ 0xd1, 0xe5, 0x0f, 0x80, 0x73, 0xb8, 0xa0, 0x9d, 0x1d, 0xef, 0x0f, 0x7b, 0x37, 0xa4, 0x3d, 0x54,
+ 0xdc, 0xbc, 0x6f, 0x3e, 0x9f, 0xcf, 0xbc, 0x99, 0xf7, 0xe6, 0xbd, 0x67, 0xf0, 0xde, 0xc9, 0x3b,
+ 0x54, 0x31, 0x49, 0xe7, 0x64, 0x32, 0xc0, 0xae, 0x83, 0x19, 0xa6, 0x9d, 0x29, 0x76, 0x0c, 0xe2,
+ 0x76, 0xc4, 0x82, 0x36, 0x36, 0x3b, 0x03, 0x8d, 0xe9, 0xc7, 0x9d, 0xe9, 0x56, 0x67, 0x84, 0x1d,
+ 0xec, 0x6a, 0x0c, 0x1b, 0xca, 0xd8, 0x25, 0x8c, 0xc0, 0x2b, 0x21, 0x48, 0xd1, 0xc6, 0xa6, 0xc2,
+ 0x41, 0xca, 0x74, 0xab, 0xf9, 0xc6, 0xc8, 0x64, 0xc7, 0x93, 0x81, 0xa2, 0x13, 0xbb, 0x33, 0x22,
+ 0x23, 0xd2, 0xe1, 0xd8, 0xc1, 0x64, 0xc8, 0xbf, 0xf8, 0x07, 0xff, 0x15, 0x6a, 0x34, 0xdb, 0x89,
+ 0x8d, 0x74, 0xe2, 0xe2, 0x8c, 0x7d, 0x9a, 0x3b, 0x31, 0xc6, 0xd6, 0xf4, 0x63, 0xd3, 0xc1, 0xee,
+ 0x69, 0x67, 0x7c, 0x32, 0x0a, 0x0c, 0xb4, 0x63, 0x63, 0xa6, 0x65, 0xb1, 0x3a, 0x79, 0x2c, 0x77,
+ 0xe2, 0x30, 0xd3, 0xc6, 0x0b, 0x84, 0xb7, 0xff, 0x8d, 0x40, 0xf5, 0x63, 0x6c, 0x6b, 0xf3, 0xbc,
+ 0xf6, 0xdf, 0x12, 0x28, 0xef, 0xb9, 0xc4, 0xd9, 0x27, 0x03, 0xf8, 0x15, 0xa8, 0x04, 0xfe, 0x18,
+ 0x1a, 0xd3, 0x1a, 0xd2, 0x86, 0xb4, 0x59, 0xdb, 0x7e, 0x53, 0x89, 0x6f, 0x29, 0x92, 0x55, 0xc6,
+ 0x27, 0xa3, 0xc0, 0x40, 0x95, 0x00, 0xad, 0x4c, 0xb7, 0x94, 0xfb, 0x83, 0x87, 0x58, 0x67, 0x07,
+ 0x98, 0x69, 0x2a, 0x7c, 0xe4, 0xc9, 0x4b, 0xbe, 0x27, 0x83, 0xd8, 0x86, 0x22, 0x55, 0xa8, 0x82,
+ 0x22, 0x1d, 0x63, 0xbd, 0xb1, 0xcc, 0xd5, 0x37, 0x94, 0x8c, 0x18, 0x28, 0xc2, 0x9b, 0xde, 0x18,
+ 0xeb, 0xea, 0xaa, 0x50, 0x2b, 0x06, 0x5f, 0x88, 0x73, 0xe1, 0x3e, 0x58, 0xa1, 0x4c, 0x63, 0x13,
+ 0xda, 0x28, 0x70, 0x95, 0xf6, 0xb9, 0x2a, 0x1c, 0xa9, 0xae, 0x0b, 0x9d, 0x95, 0xf0, 0x1b, 0x09,
+ 0x85, 0xf6, 0x8f, 0x12, 0xa8, 0x09, 0x64, 0xd7, 0xa4, 0x0c, 0x3e, 0x58, 0xb8, 0x01, 0xe5, 0x62,
+ 0x37, 0x10, 0xb0, 0xf9, 0xf9, 0xeb, 0x62, 0xa7, 0xca, 0xcc, 0x92, 0x38, 0xfd, 0x2e, 0x28, 0x99,
+ 0x0c, 0xdb, 0xb4, 0xb1, 0xbc, 0x51, 0xd8, 0xac, 0x6d, 0xdf, 0x38, 0xcf, 0x71, 0x75, 0x4d, 0x08,
+ 0x95, 0xee, 0x05, 0x14, 0x14, 0x32, 0xdb, 0x3f, 0x14, 0x23, 0x87, 0x83, 0x2b, 0x81, 0xb7, 0x41,
+ 0x25, 0x08, 0xac, 0x31, 0xb1, 0x30, 0x77, 0xb8, 0x1a, 0x3b, 0xd0, 0x13, 0x76, 0x14, 0x21, 0xe0,
+ 0x11, 0xb8, 0x4e, 0x99, 0xe6, 0x32, 0xd3, 0x19, 0xdd, 0xc1, 0x9a, 0x61, 0x99, 0x0e, 0xee, 0x61,
+ 0x9d, 0x38, 0x06, 0xe5, 0x11, 0x29, 0xa8, 0x2f, 0xf9, 0x9e, 0x7c, 0xbd, 0x97, 0x0d, 0x41, 0x79,
+ 0x5c, 0xf8, 0x00, 0x5c, 0xd6, 0x89, 0xa3, 0x4f, 0x5c, 0x17, 0x3b, 0xfa, 0xe9, 0x21, 0xb1, 0x4c,
+ 0xfd, 0x94, 0x07, 0xa7, 0xaa, 0x2a, 0xc2, 0x9b, 0xcb, 0x7b, 0xf3, 0x80, 0xb3, 0x2c, 0x23, 0x5a,
+ 0x14, 0x82, 0xaf, 0x80, 0x32, 0x9d, 0xd0, 0x31, 0x76, 0x8c, 0x46, 0x71, 0x43, 0xda, 0xac, 0xa8,
+ 0x35, 0xdf, 0x93, 0xcb, 0xbd, 0xd0, 0x84, 0x66, 0x6b, 0xf0, 0x73, 0x50, 0x7b, 0x48, 0x06, 0x7d,
+ 0x6c, 0x8f, 0x2d, 0x8d, 0xe1, 0x46, 0x89, 0x47, 0xef, 0x56, 0xe6, 0x15, 0xef, 0xc7, 0x38, 0x9e,
+ 0x65, 0x57, 0x84, 0x93, 0xb5, 0xc4, 0x02, 0x4a, 0xaa, 0xc1, 0x2f, 0x41, 0x93, 0x4e, 0x74, 0x1d,
+ 0x53, 0x3a, 0x9c, 0x58, 0xfb, 0x64, 0x40, 0x3f, 0x32, 0x29, 0x23, 0xee, 0x69, 0xd7, 0xb4, 0x4d,
+ 0xd6, 0x58, 0xd9, 0x90, 0x36, 0x4b, 0x6a, 0xcb, 0xf7, 0xe4, 0x66, 0x2f, 0x17, 0x85, 0xce, 0x51,
+ 0x80, 0x08, 0x5c, 0x1b, 0x6a, 0xa6, 0x85, 0x8d, 0x05, 0xed, 0x32, 0xd7, 0x6e, 0xfa, 0x9e, 0x7c,
+ 0xed, 0x6e, 0x26, 0x02, 0xe5, 0x30, 0xdb, 0xbf, 0x2e, 0x83, 0xb5, 0xd4, 0x2b, 0x80, 0x1f, 0x83,
+ 0x15, 0x4d, 0x67, 0xe6, 0x34, 0x48, 0x95, 0x20, 0x01, 0x5f, 0x4e, 0xde, 0x4e, 0x50, 0xbf, 0xe2,
+ 0xb7, 0x8c, 0xf0, 0x10, 0x07, 0x41, 0xc0, 0xf1, 0xd3, 0xd9, 0xe5, 0x54, 0x24, 0x24, 0xa0, 0x05,
+ 0xea, 0x96, 0x46, 0xd9, 0x2c, 0xcb, 0xfa, 0xa6, 0x8d, 0x79, 0x7c, 0x6a, 0xdb, 0xaf, 0x5f, 0xec,
+ 0xc9, 0x04, 0x0c, 0xf5, 0x7f, 0xbe, 0x27, 0xd7, 0xbb, 0x73, 0x3a, 0x68, 0x41, 0x19, 0xba, 0x00,
+ 0x72, 0x5b, 0x74, 0x85, 0x7c, 0xbf, 0xd2, 0x33, 0xef, 0x77, 0xcd, 0xf7, 0x64, 0xd8, 0x5d, 0x50,
+ 0x42, 0x19, 0xea, 0xed, 0x3f, 0x25, 0x50, 0x78, 0x31, 0x65, 0xf1, 0x83, 0x54, 0x59, 0xbc, 0x91,
+ 0x97, 0xb4, 0xb9, 0x25, 0xf1, 0xee, 0x5c, 0x49, 0x6c, 0xe5, 0x2a, 0x9c, 0x5f, 0x0e, 0x7f, 0x2b,
+ 0x80, 0xd5, 0x7d, 0x32, 0xd8, 0x23, 0x8e, 0x61, 0x32, 0x93, 0x38, 0x70, 0x07, 0x14, 0xd9, 0xe9,
+ 0x78, 0x56, 0x5a, 0x36, 0x66, 0x5b, 0xf7, 0x4f, 0xc7, 0xf8, 0xcc, 0x93, 0xeb, 0x49, 0x6c, 0x60,
+ 0x43, 0x1c, 0x0d, 0xbb, 0x91, 0x3b, 0xcb, 0x9c, 0xb7, 0x93, 0xde, 0xee, 0xcc, 0x93, 0x33, 0x1a,
+ 0xa7, 0x12, 0x29, 0xa5, 0x9d, 0x82, 0x23, 0xb0, 0x16, 0x04, 0xe7, 0xd0, 0x25, 0x83, 0x30, 0xcb,
+ 0x0a, 0xcf, 0x1c, 0xf5, 0xab, 0xc2, 0x81, 0xb5, 0x6e, 0x52, 0x08, 0xa5, 0x75, 0xe1, 0x34, 0xcc,
+ 0xb1, 0xbe, 0xab, 0x39, 0x34, 0x3c, 0xd2, 0xf3, 0xe5, 0x74, 0x53, 0xec, 0xc6, 0xf3, 0x2c, 0xad,
+ 0x86, 0x32, 0x76, 0x80, 0xaf, 0x82, 0x15, 0x17, 0x6b, 0x94, 0x38, 0x3c, 0x9f, 0xab, 0x71, 0x74,
+ 0x10, 0xb7, 0x22, 0xb1, 0x0a, 0x5f, 0x03, 0x65, 0x1b, 0x53, 0xaa, 0x8d, 0x30, 0xaf, 0x38, 0x55,
+ 0xf5, 0x92, 0x00, 0x96, 0x0f, 0x42, 0x33, 0x9a, 0xad, 0xb7, 0xbf, 0x97, 0x40, 0xf9, 0xc5, 0xf4,
+ 0xb4, 0xf7, 0xd3, 0x3d, 0xad, 0x91, 0x97, 0x79, 0x39, 0xfd, 0xec, 0xa7, 0x12, 0x77, 0x94, 0xf7,
+ 0xb2, 0x2d, 0x50, 0x1b, 0x6b, 0xae, 0x66, 0x59, 0xd8, 0x32, 0xa9, 0xcd, 0x7d, 0x2d, 0xa9, 0x97,
+ 0x82, 0xba, 0x7c, 0x18, 0x9b, 0x51, 0x12, 0x13, 0x50, 0x74, 0x62, 0x8f, 0x2d, 0x1c, 0x5c, 0x66,
+ 0x98, 0x6e, 0x82, 0xb2, 0x17, 0x9b, 0x51, 0x12, 0x03, 0xef, 0x83, 0xab, 0x61, 0x05, 0x9b, 0xef,
+ 0x80, 0x05, 0xde, 0x01, 0xff, 0xef, 0x7b, 0xf2, 0xd5, 0xdd, 0x2c, 0x00, 0xca, 0xe6, 0xc1, 0x1d,
+ 0xb0, 0x3a, 0xd0, 0xf4, 0x13, 0x32, 0x1c, 0x26, 0x2b, 0x76, 0xdd, 0xf7, 0xe4, 0x55, 0x35, 0x61,
+ 0x47, 0x29, 0x14, 0xfc, 0x02, 0x54, 0x28, 0xb6, 0xb0, 0xce, 0x88, 0x2b, 0x52, 0xec, 0xad, 0x0b,
+ 0x46, 0x45, 0x1b, 0x60, 0xab, 0x27, 0xa8, 0xea, 0x2a, 0xef, 0xf4, 0xe2, 0x0b, 0x45, 0x92, 0xf0,
+ 0x5d, 0xb0, 0x6e, 0x6b, 0xce, 0x44, 0x8b, 0x90, 0x3c, 0xb7, 0x2a, 0x2a, 0xf4, 0x3d, 0x79, 0xfd,
+ 0x20, 0xb5, 0x82, 0xe6, 0x90, 0xf0, 0x13, 0x50, 0x61, 0xb3, 0x36, 0xba, 0xc2, 0x5d, 0xcb, 0x6c,
+ 0x14, 0x87, 0xc4, 0x48, 0x75, 0xd1, 0x28, 0x4b, 0xa2, 0x16, 0x1a, 0xc9, 0x04, 0x83, 0x07, 0x63,
+ 0x96, 0xb8, 0xb1, 0xdd, 0x21, 0xc3, 0xee, 0x5d, 0xd3, 0x31, 0xe9, 0x31, 0x36, 0x1a, 0x15, 0x7e,
+ 0x5d, 0x7c, 0xf0, 0xe8, 0xf7, 0xbb, 0x59, 0x10, 0x94, 0xc7, 0x85, 0x5d, 0xb0, 0x1e, 0x87, 0xf6,
+ 0x80, 0x18, 0xb8, 0x51, 0xe5, 0x0f, 0xe3, 0x56, 0x70, 0xca, 0xbd, 0xd4, 0xca, 0xd9, 0x82, 0x05,
+ 0xcd, 0x71, 0x93, 0x83, 0x06, 0xc8, 0x1f, 0x34, 0xda, 0x7f, 0x15, 0x41, 0x35, 0xee, 0xa9, 0x47,
+ 0x00, 0xe8, 0xb3, 0xc2, 0x45, 0x45, 0x5f, 0xbd, 0x99, 0xf7, 0x08, 0xa2, 0x12, 0x17, 0xf7, 0x83,
+ 0xc8, 0x44, 0x51, 0x42, 0x08, 0x7e, 0x0a, 0xaa, 0x7c, 0xda, 0xe2, 0x25, 0x68, 0xf9, 0x99, 0x4b,
+ 0xd0, 0x9a, 0xef, 0xc9, 0xd5, 0xde, 0x4c, 0x00, 0xc5, 0x5a, 0x70, 0x98, 0xbc, 0xb2, 0xe7, 0x2c,
+ 0xa7, 0x30, 0x7d, 0xbd, 0x7c, 0x8b, 0x39, 0xd5, 0xa0, 0xa8, 0x89, 0x59, 0xa3, 0xc8, 0x03, 0x9c,
+ 0x37, 0x46, 0x74, 0x40, 0x95, 0xcf, 0x45, 0xd8, 0xc0, 0x06, 0xcf, 0xd1, 0x92, 0x7a, 0x59, 0x40,
+ 0xab, 0xbd, 0xd9, 0x02, 0x8a, 0x31, 0x81, 0x70, 0x38, 0xf0, 0x88, 0xb1, 0x2b, 0x12, 0x0e, 0xc7,
+ 0x23, 0x24, 0x56, 0xe1, 0x1d, 0x50, 0x17, 0x2e, 0x61, 0xe3, 0x9e, 0x63, 0xe0, 0xaf, 0x31, 0xe5,
+ 0x4f, 0xb3, 0xaa, 0x36, 0x04, 0xa3, 0xbe, 0x37, 0xb7, 0x8e, 0x16, 0x18, 0xf0, 0x5b, 0x09, 0x5c,
+ 0x9f, 0x38, 0x3a, 0x99, 0x38, 0x0c, 0x1b, 0x7d, 0xec, 0xda, 0xa6, 0x13, 0xfc, 0x79, 0x3a, 0x24,
+ 0x06, 0xe5, 0x99, 0x5b, 0xdb, 0xbe, 0x9d, 0x19, 0xec, 0xa3, 0x6c, 0x4e, 0x98, 0xe7, 0x39, 0x8b,
+ 0x28, 0x6f, 0xa7, 0xf6, 0xcf, 0x12, 0xb8, 0x34, 0x37, 0xb4, 0xfe, 0xf7, 0xa7, 0x92, 0xf6, 0x2f,
+ 0x12, 0xc8, 0x3b, 0x2a, 0x3c, 0x4c, 0x86, 0x3d, 0x78, 0x35, 0x55, 0x75, 0x3b, 0x15, 0xf2, 0x33,
+ 0x4f, 0xbe, 0x99, 0xf7, 0x97, 0x36, 0x18, 0x32, 0xa8, 0x72, 0x74, 0xef, 0x4e, 0x32, 0x2f, 0x3e,
+ 0x8c, 0xf2, 0x62, 0x99, 0xcb, 0x75, 0xe2, 0x9c, 0xb8, 0x98, 0x96, 0xa0, 0xab, 0x9b, 0x8f, 0x9e,
+ 0xb6, 0x96, 0x1e, 0x3f, 0x6d, 0x2d, 0x3d, 0x79, 0xda, 0x5a, 0xfa, 0xc6, 0x6f, 0x49, 0x8f, 0xfc,
+ 0x96, 0xf4, 0xd8, 0x6f, 0x49, 0x4f, 0xfc, 0x96, 0xf4, 0xbb, 0xdf, 0x92, 0xbe, 0xfb, 0xa3, 0xb5,
+ 0xf4, 0xd9, 0xf2, 0x74, 0xeb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xfc, 0x0b, 0x48, 0x81,
+ 0x10, 0x00, 0x00,
}
func (m *CronJob) Marshal() (dAtA []byte, err error) {
@@ -938,6 +975,18 @@ func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.UncountedTerminatedPods != nil {
+ {
+ size, err := m.UncountedTerminatedPods.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
i -= len(m.CompletedIndexes)
copy(dAtA[i:], m.CompletedIndexes)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CompletedIndexes)))
@@ -1036,6 +1085,47 @@ func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
return len(dAtA) - i, nil
}
+func (m *UncountedTerminatedPods) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UncountedTerminatedPods) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UncountedTerminatedPods) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Failed) > 0 {
+ for iNdEx := len(m.Failed) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Failed[iNdEx])
+ copy(dAtA[i:], m.Failed[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Failed[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Succeeded) > 0 {
+ for iNdEx := len(m.Succeeded) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Succeeded[iNdEx])
+ copy(dAtA[i:], m.Succeeded[iNdEx])
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Succeeded[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
@@ -1247,6 +1337,10 @@ func (m *JobStatus) Size() (n int) {
n += 1 + sovGenerated(uint64(m.Failed))
l = len(m.CompletedIndexes)
n += 1 + l + sovGenerated(uint64(l))
+ if m.UncountedTerminatedPods != nil {
+ l = m.UncountedTerminatedPods.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -1263,6 +1357,27 @@ func (m *JobTemplateSpec) Size() (n int) {
return n
}
+func (m *UncountedTerminatedPods) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Succeeded) > 0 {
+ for _, s := range m.Succeeded {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ if len(m.Failed) > 0 {
+ for _, s := range m.Failed {
+ l = len(s)
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ }
+ return n
+}
+
func sovGenerated(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
@@ -1409,6 +1524,7 @@ func (this *JobStatus) String() string {
`Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`,
`Failed:` + fmt.Sprintf("%v", this.Failed) + `,`,
`CompletedIndexes:` + fmt.Sprintf("%v", this.CompletedIndexes) + `,`,
+ `UncountedTerminatedPods:` + strings.Replace(this.UncountedTerminatedPods.String(), "UncountedTerminatedPods", "UncountedTerminatedPods", 1) + `,`,
`}`,
}, "")
return s
@@ -1424,6 +1540,17 @@ func (this *JobTemplateSpec) String() string {
}, "")
return s
}
+func (this *UncountedTerminatedPods) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&UncountedTerminatedPods{`,
+ `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`,
+ `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func valueToStringGenerated(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
@@ -3110,6 +3237,42 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error {
}
m.CompletedIndexes = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UncountedTerminatedPods", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.UncountedTerminatedPods == nil {
+ m.UncountedTerminatedPods = &UncountedTerminatedPods{}
+ }
+ if err := m.UncountedTerminatedPods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -3247,6 +3410,120 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error {
}
return nil
}
+func (m *UncountedTerminatedPods) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UncountedTerminatedPods: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UncountedTerminatedPods: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Succeeded = append(m.Succeeded, k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Failed = append(m.Failed, k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto
index 04f0e7ea7..1aefe0526 100644
--- a/vendor/k8s.io/api/batch/v1/generated.proto
+++ b/vendor/k8s.io/api/batch/v1/generated.proto
@@ -246,9 +246,11 @@ message JobSpec {
// for each index.
// When value is `Indexed`, .spec.completions must be specified and
// `.spec.parallelism` must be less than or equal to 10^5.
+ // In addition, The Pod name takes the form
+ // `$(job-name)-$(index)-$(random-string)`,
+ // the Pod hostname takes the form `$(job-name)-$(index)`.
//
- // This field is alpha-level and is only honored by servers that enable the
- // IndexedJob feature gate. More completion modes can be added in the future.
+ // This field is beta-level. More completion modes can be added in the future.
// If the Job controller observes a mode that it doesn't recognize, the
// controller skips updates for the Job.
// +optional
@@ -260,9 +262,11 @@ message JobSpec {
// false to true), the Job controller will delete all active Pods associated
// with this Job. Users must design their workload to gracefully handle this.
// Suspending a Job will reset the StartTime field of the Job, effectively
- // resetting the ActiveDeadlineSeconds timer too. This is an alpha field and
- // requires the SuspendJob feature gate to be enabled; otherwise this field
- // may not be set to true. Defaults to false.
+ // resetting the ActiveDeadlineSeconds timer too. Defaults to false.
+ //
+ // This field is beta-level, gated by SuspendJob feature flag (enabled by
+ // default).
+ //
// +optional
optional bool suspend = 10;
}
@@ -317,6 +321,24 @@ message JobStatus {
// represented as "1,3-5,7".
// +optional
optional string completedIndexes = 7;
+
+ // UncountedTerminatedPods holds the UIDs of Pods that have terminated but
+ // the job controller hasn't yet accounted for in the status counters.
+ //
+ // The job controller creates pods with a finalizer. When a pod terminates
+ // (succeeded or failed), the controller does three steps to account for it
+ // in the job status:
+ // (1) Add the pod UID to the arrays in this field.
+ // (2) Remove the pod finalizer.
+ // (3) Remove the pod UID from the arrays while increasing the corresponding
+ // counter.
+ //
+ // This field is alpha-level. The job controller only makes use of this field
+ // when the feature gate PodTrackingWithFinalizers is enabled.
+ // Old jobs might not be tracked using this field, in which case the field
+ // remains null.
+ // +optional
+ optional UncountedTerminatedPods uncountedTerminatedPods = 8;
}
// JobTemplateSpec describes the data a Job should have when created from a template
@@ -332,3 +354,17 @@ message JobTemplateSpec {
optional JobSpec spec = 2;
}
+// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
+// been accounted in Job status counters.
+message UncountedTerminatedPods {
+ // Succeeded holds UIDs of succeeded Pods.
+ // +listType=set
+ // +optional
+ repeated string succeeded = 1;
+
+ // Failed holds UIDs of failed Pods.
+ // +listType=set
+ // +optional
+ repeated string failed = 2;
+}
+
diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go
index 12f4b04cb..77742e5f8 100644
--- a/vendor/k8s.io/api/batch/v1/types.go
+++ b/vendor/k8s.io/api/batch/v1/types.go
@@ -19,9 +19,20 @@ package v1
import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
)
-const JobCompletionIndexAnnotationAlpha = "batch.kubernetes.io/job-completion-index"
+const (
+ JobCompletionIndexAnnotation = "batch.kubernetes.io/job-completion-index"
+
+ // JobTrackingFinalizer is a finalizer for Job's pods. It prevents them from
+ // being deleted before being accounted in the Job status.
+ // The apiserver and job controller use this string as a Job annotation, to
+ // mark Jobs that are being tracked using pod finalizers. Two releases after
+ // the JobTrackingWithFinalizers graduates to GA, JobTrackingFinalizer will
+ // no longer be used as a Job annotation.
+ JobTrackingFinalizer = "batch.kubernetes.io/job-tracking"
+)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -162,9 +173,11 @@ type JobSpec struct {
// for each index.
// When value is `Indexed`, .spec.completions must be specified and
// `.spec.parallelism` must be less than or equal to 10^5.
+ // In addition, The Pod name takes the form
+ // `$(job-name)-$(index)-$(random-string)`,
+ // the Pod hostname takes the form `$(job-name)-$(index)`.
//
- // This field is alpha-level and is only honored by servers that enable the
- // IndexedJob feature gate. More completion modes can be added in the future.
+ // This field is beta-level. More completion modes can be added in the future.
// If the Job controller observes a mode that it doesn't recognize, the
// controller skips updates for the Job.
// +optional
@@ -176,9 +189,11 @@ type JobSpec struct {
// false to true), the Job controller will delete all active Pods associated
// with this Job. Users must design their workload to gracefully handle this.
// Suspending a Job will reset the StartTime field of the Job, effectively
- // resetting the ActiveDeadlineSeconds timer too. This is an alpha field and
- // requires the SuspendJob feature gate to be enabled; otherwise this field
- // may not be set to true. Defaults to false.
+ // resetting the ActiveDeadlineSeconds timer too. Defaults to false.
+ //
+ // This field is beta-level, gated by SuspendJob feature flag (enabled by
+ // default).
+ //
// +optional
Suspend *bool `json:"suspend,omitempty" protobuf:"varint,10,opt,name=suspend"`
}
@@ -233,6 +248,38 @@ type JobStatus struct {
// represented as "1,3-5,7".
// +optional
CompletedIndexes string `json:"completedIndexes,omitempty" protobuf:"bytes,7,opt,name=completedIndexes"`
+
+ // UncountedTerminatedPods holds the UIDs of Pods that have terminated but
+ // the job controller hasn't yet accounted for in the status counters.
+ //
+ // The job controller creates pods with a finalizer. When a pod terminates
+ // (succeeded or failed), the controller does three steps to account for it
+ // in the job status:
+ // (1) Add the pod UID to the arrays in this field.
+ // (2) Remove the pod finalizer.
+ // (3) Remove the pod UID from the arrays while increasing the corresponding
+ // counter.
+ //
+ // This field is alpha-level. The job controller only makes use of this field
+ // when the feature gate PodTrackingWithFinalizers is enabled.
+ // Old jobs might not be tracked using this field, in which case the field
+ // remains null.
+ // +optional
+ UncountedTerminatedPods *UncountedTerminatedPods `json:"uncountedTerminatedPods,omitempty" protobuf:"bytes,8,opt,name=uncountedTerminatedPods"`
+}
+
+// UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't
+// been accounted in Job status counters.
+type UncountedTerminatedPods struct {
+ // Succeeded holds UIDs of succeeded Pods.
+ // +listType=set
+ // +optional
+ Succeeded []types.UID `json:"succeeded,omitempty" protobuf:"bytes,1,rep,name=succeeded,casttype=k8s.io/apimachinery/pkg/types.UID"`
+
+ // Failed holds UIDs of failed Pods.
+ // +listType=set
+ // +optional
+ Failed []types.UID `json:"failed,omitempty" protobuf:"bytes,2,rep,name=failed,casttype=k8s.io/apimachinery/pkg/types.UID"`
}
type JobConditionType string
diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
index d98c5edeb..f6e01000d 100644
--- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go
@@ -119,8 +119,8 @@ var map_JobSpec = map[string]string{
"manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector",
"template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
"ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.",
- "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5.\n\nThis field is alpha-level and is only honored by servers that enable the IndexedJob feature gate. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.",
- "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. This is an alpha field and requires the SuspendJob feature gate to be enabled; otherwise this field may not be set to true. Defaults to false.",
+ "completionMode": "CompletionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nThis field is beta-level. More completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, the controller skips updates for the Job.",
+ "suspend": "Suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.\n\nThis field is beta-level, gated by SuspendJob feature flag (enabled by default).",
}
func (JobSpec) SwaggerDoc() map[string]string {
@@ -128,14 +128,15 @@ func (JobSpec) SwaggerDoc() map[string]string {
}
var map_JobStatus = map[string]string{
- "": "JobStatus represents the current state of a Job.",
- "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
- "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.",
- "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.",
- "active": "The number of actively running pods.",
- "succeeded": "The number of pods which reached phase Succeeded.",
- "failed": "The number of pods which reached phase Failed.",
- "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
+ "": "JobStatus represents the current state of a Job.",
+ "conditions": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/",
+ "startTime": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.",
+ "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.",
+ "active": "The number of actively running pods.",
+ "succeeded": "The number of pods which reached phase Succeeded.",
+ "failed": "The number of pods which reached phase Failed.",
+ "completedIndexes": "CompletedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".",
+ "uncountedTerminatedPods": "UncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status: (1) Add the pod UID to the arrays in this field. (2) Remove the pod finalizer. (3) Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nThis field is alpha-level. The job controller only makes use of this field when the feature gate PodTrackingWithFinalizers is enabled. Old jobs might not be tracked using this field, in which case the field remains null.",
}
func (JobStatus) SwaggerDoc() map[string]string {
@@ -152,4 +153,14 @@ func (JobTemplateSpec) SwaggerDoc() map[string]string {
return map_JobTemplateSpec
}
+var map_UncountedTerminatedPods = map[string]string{
+ "": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.",
+ "succeeded": "Succeeded holds UIDs of succeeded Pods.",
+ "failed": "Failed holds UIDs of failed Pods.",
+}
+
+func (UncountedTerminatedPods) SwaggerDoc() map[string]string {
+ return map_UncountedTerminatedPods
+}
+
// AUTO-GENERATED FUNCTIONS END HERE
diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
index 6018ad1d3..fe60aaf37 100644
--- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go
@@ -24,6 +24,7 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
+ types "k8s.io/apimachinery/pkg/types"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -312,6 +313,11 @@ func (in *JobStatus) DeepCopyInto(out *JobStatus) {
in, out := &in.CompletionTime, &out.CompletionTime
*out = (*in).DeepCopy()
}
+ if in.UncountedTerminatedPods != nil {
+ in, out := &in.UncountedTerminatedPods, &out.UncountedTerminatedPods
+ *out = new(UncountedTerminatedPods)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -342,3 +348,29 @@ func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec {
in.DeepCopyInto(out)
return out
}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *UncountedTerminatedPods) DeepCopyInto(out *UncountedTerminatedPods) {
+ *out = *in
+ if in.Succeeded != nil {
+ in, out := &in.Succeeded, &out.Succeeded
+ *out = make([]types.UID, len(*in))
+ copy(*out, *in)
+ }
+ if in.Failed != nil {
+ in, out := &in.Failed, &out.Failed
+ *out = make([]types.UID, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UncountedTerminatedPods.
+func (in *UncountedTerminatedPods) DeepCopy() *UncountedTerminatedPods {
+ if in == nil {
+ return nil
+ }
+ out := new(UncountedTerminatedPods)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/api/certificates/v1/generated.pb.go b/vendor/k8s.io/api/certificates/v1/generated.pb.go
index fca7d2115..3f06ac97a 100644
--- a/vendor/k8s.io/api/certificates/v1/generated.pb.go
+++ b/vendor/k8s.io/api/certificates/v1/generated.pb.go
@@ -229,62 +229,64 @@ func init() {
}
var fileDescriptor_17e045d0de66f3c7 = []byte{
- // 873 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0xcf, 0x6f, 0x1b, 0x45,
- 0x14, 0xf6, 0xfa, 0x57, 0xec, 0x71, 0x49, 0xab, 0x11, 0xaa, 0x16, 0x4b, 0xdd, 0x8d, 0x56, 0x50,
- 0x05, 0x04, 0xbb, 0x38, 0x2a, 0x10, 0x0a, 0xe2, 0xb0, 0x69, 0x85, 0x2a, 0x52, 0x90, 0x26, 0x09,
- 0x87, 0xc2, 0xa1, 0x93, 0xf5, 0xeb, 0x66, 0xea, 0xee, 0x0f, 0x66, 0x66, 0x2d, 0x7c, 0xeb, 0x9f,
- 0xc0, 0x91, 0x23, 0xff, 0x09, 0xd7, 0x1c, 0x7b, 0x2c, 0x12, 0xb2, 0x88, 0x7b, 0xe1, 0x6f, 0xc8,
- 0x09, 0xcd, 0xec, 0x78, 0xed, 0xfc, 0x70, 0x5b, 0x72, 0xdb, 0xf9, 0xde, 0xf7, 0xbe, 0xef, 0xbd,
- 0xb7, 0x6f, 0x06, 0xed, 0x8c, 0xb6, 0x85, 0xcf, 0xb2, 0x60, 0x54, 0x1c, 0x02, 0x4f, 0x41, 0x82,
- 0x08, 0xc6, 0x90, 0x0e, 0x33, 0x1e, 0x98, 0x00, 0xcd, 0x59, 0x10, 0x01, 0x97, 0xec, 0x09, 0x8b,
- 0xa8, 0x0e, 0x0f, 0x82, 0x18, 0x52, 0xe0, 0x54, 0xc2, 0xd0, 0xcf, 0x79, 0x26, 0x33, 0xdc, 0x2f,
- 0xb9, 0x3e, 0xcd, 0x99, 0xbf, 0xcc, 0xf5, 0xc7, 0x83, 0xfe, 0x27, 0x31, 0x93, 0x47, 0xc5, 0xa1,
- 0x1f, 0x65, 0x49, 0x10, 0x67, 0x71, 0x16, 0xe8, 0x94, 0xc3, 0xe2, 0x89, 0x3e, 0xe9, 0x83, 0xfe,
- 0x2a, 0xa5, 0xfa, 0xde, 0xb2, 0x6d, 0xc6, 0xe1, 0x12, 0xbb, 0xfe, 0x9d, 0x05, 0x27, 0xa1, 0xd1,
- 0x11, 0x4b, 0x81, 0x4f, 0x82, 0x7c, 0x14, 0x2b, 0x40, 0x04, 0x09, 0x48, 0x7a, 0x59, 0x56, 0xb0,
- 0x2a, 0x8b, 0x17, 0xa9, 0x64, 0x09, 0x5c, 0x48, 0xf8, 0xfc, 0x4d, 0x09, 0x22, 0x3a, 0x82, 0x84,
- 0x9e, 0xcf, 0xf3, 0xfe, 0xac, 0xa3, 0xf7, 0x76, 0x16, 0x53, 0xd8, 0x63, 0x71, 0xca, 0xd2, 0x98,
- 0xc0, 0x2f, 0x05, 0x08, 0x89, 0x1f, 0xa3, 0x8e, 0xaa, 0x70, 0x48, 0x25, 0xb5, 0xad, 0x0d, 0x6b,
- 0xb3, 0xb7, 0xf5, 0xa9, 0xbf, 0x18, 0x5f, 0x65, 0xe4, 0xe7, 0xa3, 0x58, 0x01, 0xc2, 0x57, 0x6c,
- 0x7f, 0x3c, 0xf0, 0x7f, 0x38, 0x7c, 0x0a, 0x91, 0x7c, 0x08, 0x92, 0x86, 0xf8, 0x78, 0xea, 0xd6,
- 0x66, 0x53, 0x17, 0x2d, 0x30, 0x52, 0xa9, 0xe2, 0x9f, 0x50, 0x53, 0xe4, 0x10, 0xd9, 0x75, 0xad,
- 0xfe, 0xa5, 0xbf, 0xfa, 0xe7, 0xf8, 0x2b, 0xcb, 0xdc, 0xcb, 0x21, 0x0a, 0xaf, 0x19, 0x9b, 0xa6,
- 0x3a, 0x11, 0x2d, 0x8a, 0x23, 0xd4, 0x16, 0x92, 0xca, 0x42, 0xd8, 0x0d, 0x2d, 0xff, 0xd5, 0xd5,
- 0xe4, 0xb5, 0x44, 0xb8, 0x6e, 0x0c, 0xda, 0xe5, 0x99, 0x18, 0x69, 0xef, 0x55, 0x03, 0x79, 0x2b,
- 0x73, 0x77, 0xb2, 0x74, 0xc8, 0x24, 0xcb, 0x52, 0xbc, 0x8d, 0x9a, 0x72, 0x92, 0x83, 0x1e, 0x63,
- 0x37, 0x7c, 0x7f, 0x5e, 0xed, 0xfe, 0x24, 0x87, 0xd3, 0xa9, 0xfb, 0xee, 0x79, 0xbe, 0xc2, 0x89,
- 0xce, 0xc0, 0xbb, 0x55, 0x17, 0x6d, 0x9d, 0x7b, 0xe7, 0x6c, 0x21, 0xa7, 0x53, 0xf7, 0x92, 0x3d,
- 0xf4, 0x2b, 0xa5, 0xb3, 0xe5, 0xe2, 0xdb, 0xa8, 0xcd, 0x81, 0x8a, 0x2c, 0xd5, 0x23, 0xef, 0x2e,
- 0xda, 0x22, 0x1a, 0x25, 0x26, 0x8a, 0x3f, 0x44, 0x6b, 0x09, 0x08, 0x41, 0x63, 0xd0, 0xc3, 0xeb,
- 0x86, 0xd7, 0x0d, 0x71, 0xed, 0x61, 0x09, 0x93, 0x79, 0x1c, 0x3f, 0x45, 0xeb, 0xcf, 0xa8, 0x90,
- 0x07, 0xf9, 0x90, 0x4a, 0xd8, 0x67, 0x09, 0xd8, 0x4d, 0x3d, 0xee, 0x8f, 0xde, 0x6e, 0x57, 0x54,
- 0x46, 0x78, 0xd3, 0xa8, 0xaf, 0xef, 0x9e, 0x51, 0x22, 0xe7, 0x94, 0xf1, 0x18, 0x61, 0x85, 0xec,
- 0x73, 0x9a, 0x8a, 0x72, 0x50, 0xca, 0xaf, 0xf5, 0xbf, 0xfd, 0xfa, 0xc6, 0x0f, 0xef, 0x5e, 0x50,
- 0x23, 0x97, 0x38, 0x78, 0x7f, 0x59, 0xe8, 0xd6, 0xca, 0xbf, 0xbc, 0xcb, 0x84, 0xc4, 0x3f, 0x5f,
- 0xb8, 0x2b, 0xfe, 0xdb, 0xd5, 0xa3, 0xb2, 0xf5, 0x4d, 0xb9, 0x61, 0x6a, 0xea, 0xcc, 0x91, 0xa5,
- 0x7b, 0xf2, 0x08, 0xb5, 0x98, 0x84, 0x44, 0xd8, 0xf5, 0x8d, 0xc6, 0x66, 0x6f, 0xeb, 0xb3, 0x2b,
- 0x6d, 0x72, 0xf8, 0x8e, 0x71, 0x68, 0x3d, 0x50, 0x5a, 0xa4, 0x94, 0xf4, 0xfe, 0x6d, 0xbc, 0xa6,
- 0x37, 0x75, 0x9d, 0xf0, 0x07, 0x68, 0x8d, 0x97, 0x47, 0xdd, 0xda, 0xb5, 0xb0, 0xa7, 0x16, 0xc1,
- 0x30, 0xc8, 0x3c, 0x86, 0xb7, 0x10, 0x12, 0x2c, 0x4e, 0x81, 0x7f, 0x4f, 0x13, 0xb0, 0xd7, 0xf4,
- 0xda, 0x54, 0xd7, 0x7f, 0xaf, 0x8a, 0x90, 0x25, 0x16, 0xf6, 0x51, 0xbb, 0x50, 0x5b, 0x24, 0xec,
- 0xd6, 0x46, 0x63, 0xb3, 0x1b, 0xde, 0x54, 0xbb, 0x78, 0xa0, 0x91, 0xd3, 0xa9, 0xdb, 0xf9, 0x0e,
- 0x26, 0xfa, 0x40, 0x0c, 0x0b, 0x7f, 0x8c, 0x3a, 0x85, 0x00, 0x9e, 0x2a, 0x87, 0x72, 0x83, 0xab,
- 0xb1, 0x1d, 0x18, 0x9c, 0x54, 0x0c, 0x7c, 0x0b, 0x35, 0x0a, 0x36, 0x34, 0x1b, 0xdc, 0x33, 0xc4,
- 0xc6, 0xc1, 0x83, 0x7b, 0x44, 0xe1, 0xd8, 0x43, 0xed, 0x98, 0x67, 0x45, 0x2e, 0xec, 0xa6, 0x36,
- 0x47, 0xca, 0xfc, 0x5b, 0x8d, 0x10, 0x13, 0xc1, 0x0c, 0xb5, 0xe0, 0x57, 0xc9, 0xa9, 0xdd, 0xd6,
- 0x93, 0xbf, 0x77, 0xe5, 0x27, 0xca, 0xbf, 0xaf, 0x64, 0xee, 0xa7, 0x92, 0x4f, 0x16, 0x3f, 0x42,
- 0x63, 0xa4, 0x74, 0xe8, 0x3f, 0x46, 0x68, 0xc1, 0xc1, 0x37, 0x50, 0x63, 0x04, 0x93, 0xf2, 0xc1,
- 0x20, 0xea, 0x13, 0x7f, 0x8d, 0x5a, 0x63, 0xfa, 0xac, 0x00, 0xf3, 0x5a, 0xde, 0x7e, 0x5d, 0x29,
- 0x5a, 0xe8, 0x47, 0xc5, 0x26, 0x65, 0xd2, 0xdd, 0xfa, 0xb6, 0xe5, 0x1d, 0x5b, 0xc8, 0x7d, 0xc3,
- 0x43, 0x87, 0x39, 0x42, 0xd1, 0xfc, 0xf1, 0x10, 0xb6, 0xa5, 0xbb, 0xfe, 0xe6, 0x4a, 0x5d, 0x57,
- 0x6f, 0xd0, 0x62, 0x0b, 0x2a, 0x48, 0x90, 0x25, 0x17, 0x3c, 0x40, 0xbd, 0x25, 0x55, 0xdd, 0xdf,
- 0xb5, 0xf0, 0xfa, 0x6c, 0xea, 0xf6, 0x96, 0xc4, 0xc9, 0x32, 0xc7, 0xfb, 0xc2, 0x0c, 0x4b, 0xf7,
- 0x88, 0xdd, 0xf9, 0xfd, 0xb0, 0xf4, 0x8f, 0xec, 0x9e, 0x5f, 0xf2, 0xbb, 0x9d, 0xdf, 0xff, 0x70,
- 0x6b, 0xcf, 0xff, 0xde, 0xa8, 0x85, 0x9b, 0xc7, 0x27, 0x4e, 0xed, 0xc5, 0x89, 0x53, 0x7b, 0x79,
- 0xe2, 0xd4, 0x9e, 0xcf, 0x1c, 0xeb, 0x78, 0xe6, 0x58, 0x2f, 0x66, 0x8e, 0xf5, 0x72, 0xe6, 0x58,
- 0xff, 0xcc, 0x1c, 0xeb, 0xb7, 0x57, 0x4e, 0xed, 0x51, 0x7d, 0x3c, 0xf8, 0x2f, 0x00, 0x00, 0xff,
- 0xff, 0x9d, 0x8f, 0x4c, 0xfa, 0x70, 0x08, 0x00, 0x00,
+ // 906 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4f, 0x6f, 0x1b, 0x45,
+ 0x14, 0xf7, 0xc6, 0x7f, 0x62, 0x8f, 0x43, 0xda, 0x8e, 0xa0, 0x5a, 0x2c, 0xd5, 0x6b, 0x59, 0x50,
+ 0x19, 0x04, 0xbb, 0x38, 0x2a, 0x10, 0x0a, 0xe2, 0xb0, 0x69, 0x84, 0x2a, 0x52, 0x90, 0x26, 0x09,
+ 0x87, 0xc2, 0xa1, 0x93, 0xf5, 0xeb, 0x66, 0xea, 0xee, 0x1f, 0x66, 0x66, 0xad, 0xfa, 0xd6, 0x8f,
+ 0xc0, 0x91, 0x23, 0x5f, 0x80, 0xcf, 0xc0, 0x35, 0xc7, 0x1e, 0x8b, 0x84, 0x2c, 0xe2, 0x7e, 0x8b,
+ 0x9c, 0xd0, 0xcc, 0x8e, 0xd7, 0x8e, 0x13, 0xb7, 0x25, 0xb7, 0x99, 0xf7, 0x7e, 0xef, 0xf7, 0x7b,
+ 0xef, 0xcd, 0x7b, 0x83, 0x76, 0x86, 0xdb, 0xc2, 0x65, 0x89, 0x37, 0xcc, 0x8e, 0x80, 0xc7, 0x20,
+ 0x41, 0x78, 0x23, 0x88, 0x07, 0x09, 0xf7, 0x8c, 0x83, 0xa6, 0xcc, 0x0b, 0x80, 0x4b, 0xf6, 0x98,
+ 0x05, 0x54, 0xbb, 0xfb, 0x5e, 0x08, 0x31, 0x70, 0x2a, 0x61, 0xe0, 0xa6, 0x3c, 0x91, 0x09, 0x6e,
+ 0xe5, 0x58, 0x97, 0xa6, 0xcc, 0x5d, 0xc4, 0xba, 0xa3, 0x7e, 0xeb, 0xd3, 0x90, 0xc9, 0xe3, 0xec,
+ 0xc8, 0x0d, 0x92, 0xc8, 0x0b, 0x93, 0x30, 0xf1, 0x74, 0xc8, 0x51, 0xf6, 0x58, 0xdf, 0xf4, 0x45,
+ 0x9f, 0x72, 0xaa, 0x56, 0x77, 0x51, 0x36, 0xe1, 0x70, 0x89, 0x5c, 0xeb, 0xce, 0x1c, 0x13, 0xd1,
+ 0xe0, 0x98, 0xc5, 0xc0, 0xc7, 0x5e, 0x3a, 0x0c, 0x95, 0x41, 0x78, 0x11, 0x48, 0x7a, 0x59, 0x94,
+ 0xb7, 0x2a, 0x8a, 0x67, 0xb1, 0x64, 0x11, 0x5c, 0x08, 0xf8, 0xe2, 0x4d, 0x01, 0x22, 0x38, 0x86,
+ 0x88, 0x2e, 0xc7, 0x75, 0xff, 0x5a, 0x43, 0xef, 0xef, 0xcc, 0xbb, 0xb0, 0xcf, 0xc2, 0x98, 0xc5,
+ 0x21, 0x81, 0x5f, 0x33, 0x10, 0x12, 0x3f, 0x42, 0x75, 0x95, 0xe1, 0x80, 0x4a, 0x6a, 0x5b, 0x1d,
+ 0xab, 0xd7, 0xdc, 0xfa, 0xcc, 0x9d, 0xb7, 0xaf, 0x10, 0x72, 0xd3, 0x61, 0xa8, 0x0c, 0xc2, 0x55,
+ 0x68, 0x77, 0xd4, 0x77, 0x7f, 0x3c, 0x7a, 0x02, 0x81, 0x7c, 0x00, 0x92, 0xfa, 0xf8, 0x64, 0xe2,
+ 0x94, 0xa6, 0x13, 0x07, 0xcd, 0x6d, 0xa4, 0x60, 0xc5, 0x3f, 0xa3, 0x8a, 0x48, 0x21, 0xb0, 0xd7,
+ 0x34, 0xfb, 0x57, 0xee, 0xea, 0xc7, 0x71, 0x57, 0xa6, 0xb9, 0x9f, 0x42, 0xe0, 0x6f, 0x18, 0x99,
+ 0x8a, 0xba, 0x11, 0x4d, 0x8a, 0x03, 0x54, 0x13, 0x92, 0xca, 0x4c, 0xd8, 0x65, 0x4d, 0xff, 0xf5,
+ 0xd5, 0xe8, 0x35, 0x85, 0xbf, 0x69, 0x04, 0x6a, 0xf9, 0x9d, 0x18, 0xea, 0xee, 0xab, 0x32, 0xea,
+ 0xae, 0x8c, 0xdd, 0x49, 0xe2, 0x01, 0x93, 0x2c, 0x89, 0xf1, 0x36, 0xaa, 0xc8, 0x71, 0x0a, 0xba,
+ 0x8d, 0x0d, 0xff, 0x83, 0x59, 0xb6, 0x07, 0xe3, 0x14, 0xce, 0x26, 0xce, 0xbb, 0xcb, 0x78, 0x65,
+ 0x27, 0x3a, 0x02, 0xef, 0x15, 0x55, 0xd4, 0x74, 0xec, 0x9d, 0xf3, 0x89, 0x9c, 0x4d, 0x9c, 0x4b,
+ 0xe6, 0xd0, 0x2d, 0x98, 0xce, 0xa7, 0x8b, 0x6f, 0xa3, 0x1a, 0x07, 0x2a, 0x92, 0x58, 0xb7, 0xbc,
+ 0x31, 0x2f, 0x8b, 0x68, 0x2b, 0x31, 0x5e, 0xfc, 0x11, 0x5a, 0x8f, 0x40, 0x08, 0x1a, 0x82, 0x6e,
+ 0x5e, 0xc3, 0xbf, 0x66, 0x80, 0xeb, 0x0f, 0x72, 0x33, 0x99, 0xf9, 0xf1, 0x13, 0xb4, 0xf9, 0x94,
+ 0x0a, 0x79, 0x98, 0x0e, 0xa8, 0x84, 0x03, 0x16, 0x81, 0x5d, 0xd1, 0xed, 0xfe, 0xf8, 0xed, 0x66,
+ 0x45, 0x45, 0xf8, 0x37, 0x0d, 0xfb, 0xe6, 0xde, 0x39, 0x26, 0xb2, 0xc4, 0x8c, 0x47, 0x08, 0x2b,
+ 0xcb, 0x01, 0xa7, 0xb1, 0xc8, 0x1b, 0xa5, 0xf4, 0xaa, 0xff, 0x5b, 0xaf, 0x65, 0xf4, 0xf0, 0xde,
+ 0x05, 0x36, 0x72, 0x89, 0x42, 0xf7, 0x6f, 0x0b, 0xdd, 0x5a, 0xf9, 0xca, 0x7b, 0x4c, 0x48, 0xfc,
+ 0xcb, 0x85, 0x5d, 0x71, 0xdf, 0x2e, 0x1f, 0x15, 0xad, 0x37, 0xe5, 0xba, 0xc9, 0xa9, 0x3e, 0xb3,
+ 0x2c, 0xec, 0xc9, 0x43, 0x54, 0x65, 0x12, 0x22, 0x61, 0xaf, 0x75, 0xca, 0xbd, 0xe6, 0xd6, 0xe7,
+ 0x57, 0x9a, 0x64, 0xff, 0x1d, 0xa3, 0x50, 0xbd, 0xaf, 0xb8, 0x48, 0x4e, 0xd9, 0xfd, 0xb3, 0xf2,
+ 0x9a, 0xda, 0xd4, 0x3a, 0xe1, 0x0f, 0xd1, 0x3a, 0xcf, 0xaf, 0xba, 0xb4, 0x0d, 0xbf, 0xa9, 0x06,
+ 0xc1, 0x20, 0xc8, 0xcc, 0x87, 0xb7, 0x10, 0x12, 0x2c, 0x8c, 0x81, 0xff, 0x40, 0x23, 0xb0, 0xd7,
+ 0xf5, 0xd8, 0x14, 0xeb, 0xbf, 0x5f, 0x78, 0xc8, 0x02, 0x0a, 0xef, 0xa0, 0x1b, 0xf0, 0x2c, 0x65,
+ 0x9c, 0xea, 0x59, 0x85, 0x20, 0x89, 0x07, 0xc2, 0xae, 0x77, 0xac, 0x5e, 0xd5, 0x7f, 0x6f, 0x3a,
+ 0x71, 0x6e, 0xec, 0x2e, 0x3b, 0xc9, 0x45, 0x3c, 0x76, 0x51, 0x2d, 0x53, 0xa3, 0x28, 0xec, 0x6a,
+ 0xa7, 0xdc, 0x6b, 0xf8, 0x37, 0xd5, 0x40, 0x1f, 0x6a, 0xcb, 0xd9, 0xc4, 0xa9, 0x7f, 0x0f, 0x63,
+ 0x7d, 0x21, 0x06, 0x85, 0x3f, 0x41, 0xf5, 0x4c, 0x00, 0x8f, 0x55, 0x9a, 0xf9, 0x1a, 0x14, 0xbd,
+ 0x3f, 0x34, 0x76, 0x52, 0x20, 0xf0, 0x2d, 0x54, 0xce, 0xd8, 0xc0, 0xac, 0x41, 0xd3, 0x00, 0xcb,
+ 0x87, 0xf7, 0xef, 0x11, 0x65, 0xc7, 0x5d, 0x54, 0x0b, 0x79, 0x92, 0xa5, 0xc2, 0xae, 0x68, 0x71,
+ 0xa4, 0xc4, 0xbf, 0xd3, 0x16, 0x62, 0x3c, 0x98, 0xa1, 0x2a, 0x3c, 0x93, 0x9c, 0xda, 0x35, 0xfd,
+ 0x7c, 0xf7, 0xae, 0xfc, 0xcf, 0xb9, 0xbb, 0x8a, 0x66, 0x37, 0x96, 0x7c, 0x3c, 0x7f, 0x4d, 0x6d,
+ 0x23, 0xb9, 0x42, 0xeb, 0x11, 0x42, 0x73, 0x0c, 0xbe, 0x8e, 0xca, 0x43, 0x18, 0xe7, 0xbf, 0x0e,
+ 0x51, 0x47, 0xfc, 0x0d, 0xaa, 0x8e, 0xe8, 0xd3, 0x0c, 0xcc, 0x97, 0x7b, 0xfb, 0x75, 0xa9, 0x68,
+ 0xa2, 0x9f, 0x14, 0x9a, 0xe4, 0x41, 0x77, 0xd7, 0xb6, 0xad, 0xee, 0x89, 0x85, 0x9c, 0x37, 0xfc,
+ 0x96, 0x98, 0x23, 0x14, 0xcc, 0x7e, 0x20, 0x61, 0x5b, 0xba, 0xea, 0x6f, 0xaf, 0x54, 0x75, 0xf1,
+ 0x91, 0xcd, 0x47, 0xa9, 0x30, 0x09, 0xb2, 0xa0, 0x82, 0xfb, 0xa8, 0xb9, 0xc0, 0xaa, 0xeb, 0xdb,
+ 0xf0, 0xaf, 0x4d, 0x27, 0x4e, 0x73, 0x81, 0x9c, 0x2c, 0x62, 0xba, 0x5f, 0x9a, 0x66, 0xe9, 0x1a,
+ 0xb1, 0x33, 0x5b, 0x32, 0x4b, 0x3f, 0x64, 0x63, 0x79, 0x53, 0xee, 0xd6, 0x7f, 0xff, 0xc3, 0x29,
+ 0x3d, 0xff, 0xa7, 0x53, 0xf2, 0x7b, 0x27, 0xa7, 0xed, 0xd2, 0x8b, 0xd3, 0x76, 0xe9, 0xe5, 0x69,
+ 0xbb, 0xf4, 0x7c, 0xda, 0xb6, 0x4e, 0xa6, 0x6d, 0xeb, 0xc5, 0xb4, 0x6d, 0xbd, 0x9c, 0xb6, 0xad,
+ 0x7f, 0xa7, 0x6d, 0xeb, 0xb7, 0x57, 0xed, 0xd2, 0xc3, 0xb5, 0x51, 0xff, 0xbf, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x88, 0x08, 0x6d, 0x53, 0xb5, 0x08, 0x00, 0x00,
}
func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
@@ -470,6 +472,11 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.ExpirationSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
i -= len(m.SignerName)
copy(dAtA[i:], m.SignerName)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.SignerName)))
@@ -719,6 +726,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) {
}
l = len(m.SignerName)
n += 1 + l + sovGenerated(uint64(l))
+ if m.ExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
+ }
return n
}
@@ -827,6 +837,7 @@ func (this *CertificateSigningRequestSpec) String() string {
`Usages:` + fmt.Sprintf("%v", this.Usages) + `,`,
`Extra:` + mapStringForExtra + `,`,
`SignerName:` + fmt.Sprintf("%v", this.SignerName) + `,`,
+ `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`,
`}`,
}, "")
return s
@@ -1717,6 +1728,26 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
}
m.SignerName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ExpirationSeconds = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/certificates/v1/generated.proto b/vendor/k8s.io/api/certificates/v1/generated.proto
index 839c1aa87..1db9e324e 100644
--- a/vendor/k8s.io/api/certificates/v1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1/generated.proto
@@ -44,7 +44,7 @@ message CertificateSigningRequest {
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec contains the certificate request, and is immutable after creation.
- // Only the request, signerName, and usages fields can be set on creation.
+ // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
// Other fields are derived by Kubernetes and cannot be modified by users.
optional CertificateSigningRequestSpec spec = 2;
@@ -135,6 +135,30 @@ message CertificateSigningRequestSpec {
// 6. Whether or not requests for CA certificates are allowed.
optional string signerName = 7;
+ // expirationSeconds is the requested duration of validity of the issued
+ // certificate. The certificate signer may issue a certificate with a different
+ // validity duration so a client must check the delta between the notBefore and
+ // and notAfter fields in the issued certificate to determine the actual duration.
+ //
+ // The v1.22+ in-tree implementations of the well-known Kubernetes signers will
+ // honor this field as long as the requested duration is not greater than the
+ // maximum duration they will honor per the --cluster-signing-duration CLI
+ // flag to the Kubernetes controller manager.
+ //
+ // Certificate signers may not honor this field for various reasons:
+ //
+ // 1. Old signer that is unaware of the field (such as the in-tree
+ // implementations prior to v1.22)
+ // 2. Signer whose configured maximum is shorter than the requested duration
+ // 3. Signer whose configured minimum is longer than the requested duration
+ //
+ // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes.
+ //
+ // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate.
+ //
+ // +optional
+ optional int32 expirationSeconds = 8;
+
// usages specifies a set of key usages requested in the issued certificate.
//
// Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth".
diff --git a/vendor/k8s.io/api/certificates/v1/types.go b/vendor/k8s.io/api/certificates/v1/types.go
index 8d3b2305e..4bfd1fb61 100644
--- a/vendor/k8s.io/api/certificates/v1/types.go
+++ b/vendor/k8s.io/api/certificates/v1/types.go
@@ -44,7 +44,7 @@ type CertificateSigningRequest struct {
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec contains the certificate request, and is immutable after creation.
- // Only the request, signerName, and usages fields can be set on creation.
+ // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
// Other fields are derived by Kubernetes and cannot be modified by users.
Spec CertificateSigningRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
@@ -84,6 +84,30 @@ type CertificateSigningRequestSpec struct {
// 6. Whether or not requests for CA certificates are allowed.
SignerName string `json:"signerName" protobuf:"bytes,7,opt,name=signerName"`
+ // expirationSeconds is the requested duration of validity of the issued
+ // certificate. The certificate signer may issue a certificate with a different
+ // validity duration so a client must check the delta between the notBefore and
+ // and notAfter fields in the issued certificate to determine the actual duration.
+ //
+ // The v1.22+ in-tree implementations of the well-known Kubernetes signers will
+ // honor this field as long as the requested duration is not greater than the
+ // maximum duration they will honor per the --cluster-signing-duration CLI
+ // flag to the Kubernetes controller manager.
+ //
+ // Certificate signers may not honor this field for various reasons:
+ //
+ // 1. Old signer that is unaware of the field (such as the in-tree
+ // implementations prior to v1.22)
+ // 2. Signer whose configured maximum is shorter than the requested duration
+ // 3. Signer whose configured minimum is longer than the requested duration
+ //
+ // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes.
+ //
+ // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate.
+ //
+ // +optional
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty" protobuf:"varint,8,opt,name=expirationSeconds"`
+
// usages specifies a set of key usages requested in the issued certificate.
//
// Requests for TLS client certificates typically request: "digital signature", "key encipherment", "client auth".
diff --git a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go
index 9a078fa0c..2714584ed 100644
--- a/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/certificates/v1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_CertificateSigningRequest = map[string]string{
"": "CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued.\n\nKubelets use this API to obtain:\n 1. client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client-kubelet\" signerName).\n 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the \"kubernetes.io/kubelet-serving\" signerName).\n\nThis API can be used to request client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client\" signerName), or to obtain certificates from custom non-Kubernetes signers.",
- "spec": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.",
+ "spec": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.",
"status": "status contains information about whether the request is approved or denied, and the certificate issued by the signer, or the failure condition indicating signer failure.",
}
@@ -61,14 +61,15 @@ func (CertificateSigningRequestList) SwaggerDoc() map[string]string {
}
var map_CertificateSigningRequestSpec = map[string]string{
- "": "CertificateSigningRequestSpec contains the certificate request.",
- "request": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.",
- "signerName": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.",
- "usages": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"",
- "username": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
- "uid": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
- "groups": "groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
- "extra": "extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
+ "": "CertificateSigningRequestSpec contains the certificate request.",
+ "request": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.",
+ "signerName": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.",
+ "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.\n\nAs of v1.22, this field is beta and is controlled via the CSRDuration feature gate.",
+ "usages": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"",
+ "username": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
+ "uid": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
+ "groups": "groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
+ "extra": "extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.",
}
func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go
index cc6a60be7..af591b46f 100644
--- a/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/certificates/v1/zz_generated.deepcopy.go
@@ -111,6 +111,11 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq
*out = make([]byte, len(*in))
copy(*out, *in)
}
+ if in.ExpirationSeconds != nil {
+ in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+ *out = new(int32)
+ **out = **in
+ }
if in.Usages != nil {
in, out := &in.Usages, &out.Usages
*out = make([]KeyUsage, len(*in))
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
index f21256f48..d9a297644 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
@@ -229,62 +229,64 @@ func init() {
}
var fileDescriptor_09d156762b8218ef = []byte{
- // 878 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0x1c, 0x45,
- 0x10, 0xde, 0xf1, 0xbe, 0x7b, 0x8d, 0x13, 0xb5, 0x50, 0x34, 0xac, 0x94, 0x19, 0x6b, 0x04, 0xc8,
- 0x3c, 0xd2, 0x83, 0xa3, 0x08, 0x2c, 0x1f, 0x10, 0x8c, 0x89, 0xc0, 0xc2, 0x01, 0xa9, 0x6d, 0x73,
- 0x40, 0x48, 0xa4, 0x77, 0xb6, 0x32, 0xee, 0x6c, 0xe6, 0xc1, 0x74, 0xcf, 0xc2, 0xde, 0xf2, 0x13,
- 0x38, 0x72, 0xe4, 0xe7, 0x98, 0x03, 0x52, 0x8e, 0x39, 0xa0, 0x15, 0xde, 0xdc, 0xf9, 0x01, 0x3e,
- 0xa1, 0xee, 0xe9, 0x9d, 0x5d, 0xbf, 0x70, 0x48, 0x6e, 0xdb, 0x5f, 0xd7, 0xf7, 0x7d, 0x55, 0x35,
- 0xd5, 0xb5, 0xe8, 0xab, 0xd1, 0x96, 0x20, 0x3c, 0xf5, 0x47, 0xc5, 0x00, 0xf2, 0x04, 0x24, 0x08,
- 0x7f, 0x0c, 0xc9, 0x30, 0xcd, 0x7d, 0x73, 0xc1, 0x32, 0xee, 0x87, 0x90, 0x4b, 0xfe, 0x88, 0x87,
- 0x4c, 0x5f, 0x6f, 0x0e, 0x40, 0xb2, 0x4d, 0x3f, 0x82, 0x04, 0x72, 0x26, 0x61, 0x48, 0xb2, 0x3c,
- 0x95, 0x29, 0x76, 0x4b, 0x02, 0x61, 0x19, 0x27, 0xcb, 0x04, 0x62, 0x08, 0xfd, 0x3b, 0x11, 0x97,
- 0x47, 0xc5, 0x80, 0x84, 0x69, 0xec, 0x47, 0x69, 0x94, 0xfa, 0x9a, 0x37, 0x28, 0x1e, 0xe9, 0x93,
- 0x3e, 0xe8, 0x5f, 0xa5, 0x5e, 0xdf, 0x5b, 0x4e, 0x20, 0xcd, 0xc1, 0x1f, 0x5f, 0xf0, 0xec, 0xdf,
- 0x5b, 0xc4, 0xc4, 0x2c, 0x3c, 0xe2, 0x09, 0xe4, 0x13, 0x3f, 0x1b, 0x45, 0x0a, 0x10, 0x7e, 0x0c,
- 0x92, 0x5d, 0xc6, 0xf2, 0xaf, 0x62, 0xe5, 0x45, 0x22, 0x79, 0x0c, 0x17, 0x08, 0x1f, 0x5f, 0x47,
- 0x10, 0xe1, 0x11, 0xc4, 0xec, 0x3c, 0xcf, 0xfb, 0x63, 0x05, 0xbd, 0xb5, 0xb3, 0x68, 0xc5, 0x3e,
- 0x8f, 0x12, 0x9e, 0x44, 0x14, 0x7e, 0x2a, 0x40, 0x48, 0xfc, 0x10, 0x75, 0x54, 0x86, 0x43, 0x26,
- 0x99, 0x6d, 0xad, 0x5b, 0x1b, 0xbd, 0xbb, 0x1f, 0x91, 0x45, 0x0f, 0x2b, 0x23, 0x92, 0x8d, 0x22,
- 0x05, 0x08, 0xa2, 0xa2, 0xc9, 0x78, 0x93, 0x7c, 0x3b, 0x78, 0x0c, 0xa1, 0x7c, 0x00, 0x92, 0x05,
- 0xf8, 0x78, 0xea, 0xd6, 0x66, 0x53, 0x17, 0x2d, 0x30, 0x5a, 0xa9, 0xe2, 0x87, 0xa8, 0x21, 0x32,
- 0x08, 0xed, 0x15, 0xad, 0xfe, 0x29, 0xb9, 0xe6, 0x0b, 0x91, 0x2b, 0x73, 0xdd, 0xcf, 0x20, 0x0c,
- 0x56, 0x8d, 0x57, 0x43, 0x9d, 0xa8, 0x56, 0xc6, 0x47, 0xa8, 0x25, 0x24, 0x93, 0x85, 0xb0, 0xeb,
- 0xda, 0xe3, 0xb3, 0xd7, 0xf0, 0xd0, 0x3a, 0xc1, 0x9a, 0x71, 0x69, 0x95, 0x67, 0x6a, 0xf4, 0xbd,
- 0x17, 0x75, 0xe4, 0x5d, 0xc9, 0xdd, 0x49, 0x93, 0x21, 0x97, 0x3c, 0x4d, 0xf0, 0x16, 0x6a, 0xc8,
- 0x49, 0x06, 0xba, 0xa1, 0xdd, 0xe0, 0xed, 0x79, 0xca, 0x07, 0x93, 0x0c, 0x4e, 0xa7, 0xee, 0x9b,
- 0xe7, 0xe3, 0x15, 0x4e, 0x35, 0x03, 0xef, 0x55, 0xa5, 0xb4, 0x34, 0xf7, 0xde, 0xd9, 0x44, 0x4e,
- 0xa7, 0xee, 0x25, 0x13, 0x49, 0x2a, 0xa5, 0xb3, 0xe9, 0xe2, 0x77, 0x51, 0x2b, 0x07, 0x26, 0xd2,
- 0x44, 0x37, 0xbf, 0xbb, 0x28, 0x8b, 0x6a, 0x94, 0x9a, 0x5b, 0xfc, 0x1e, 0x6a, 0xc7, 0x20, 0x04,
- 0x8b, 0x40, 0x77, 0xb0, 0x1b, 0xdc, 0x30, 0x81, 0xed, 0x07, 0x25, 0x4c, 0xe7, 0xf7, 0xf8, 0x31,
- 0x5a, 0x7b, 0xc2, 0x84, 0x3c, 0xcc, 0x86, 0x4c, 0xc2, 0x01, 0x8f, 0xc1, 0x6e, 0xe8, 0x9e, 0xbf,
- 0xff, 0x72, 0x53, 0xa3, 0x18, 0xc1, 0x2d, 0xa3, 0xbe, 0xb6, 0x77, 0x46, 0x89, 0x9e, 0x53, 0xc6,
- 0x63, 0x84, 0x15, 0x72, 0x90, 0xb3, 0x44, 0x94, 0x8d, 0x52, 0x7e, 0xcd, 0xff, 0xed, 0xd7, 0x37,
- 0x7e, 0x78, 0xef, 0x82, 0x1a, 0xbd, 0xc4, 0xc1, 0x9b, 0x5a, 0xe8, 0xf6, 0x95, 0x5f, 0x79, 0x8f,
- 0x0b, 0x89, 0x7f, 0xb8, 0xf0, 0x6a, 0xc8, 0xcb, 0xe5, 0xa3, 0xd8, 0xfa, 0xcd, 0xdc, 0x34, 0x39,
- 0x75, 0xe6, 0xc8, 0xd2, 0x8b, 0xf9, 0x11, 0x35, 0xb9, 0x84, 0x58, 0xd8, 0x2b, 0xeb, 0xf5, 0x8d,
- 0xde, 0xdd, 0xed, 0x57, 0x1f, 0xe7, 0xe0, 0x0d, 0x63, 0xd3, 0xdc, 0x55, 0x82, 0xb4, 0xd4, 0xf5,
- 0xfe, 0xa9, 0xff, 0x47, 0x81, 0xea, 0x61, 0xe1, 0x77, 0x50, 0x3b, 0x2f, 0x8f, 0xba, 0xbe, 0xd5,
- 0xa0, 0xa7, 0xa6, 0xc1, 0x44, 0xd0, 0xf9, 0x1d, 0x26, 0x08, 0x09, 0x1e, 0x25, 0x90, 0x7f, 0xc3,
- 0x62, 0xb0, 0xdb, 0xe5, 0x90, 0xa9, 0x4d, 0xb0, 0x5f, 0xa1, 0x74, 0x29, 0x02, 0x13, 0xd4, 0x2a,
- 0xd4, 0x18, 0x09, 0xbb, 0xb9, 0x5e, 0xdf, 0xe8, 0x06, 0xb7, 0xd4, 0x30, 0x1e, 0x6a, 0xe4, 0x74,
- 0xea, 0x76, 0xbe, 0x86, 0x89, 0x3e, 0x50, 0x13, 0x85, 0x3f, 0x44, 0x9d, 0x42, 0x40, 0x9e, 0x28,
- 0xf5, 0x72, 0x84, 0xab, 0xbe, 0x1d, 0x1a, 0x9c, 0x56, 0x11, 0xf8, 0x36, 0xaa, 0x17, 0x7c, 0x68,
- 0x46, 0xb8, 0x67, 0x02, 0xeb, 0x87, 0xbb, 0x5f, 0x50, 0x85, 0x63, 0x0f, 0xb5, 0xa2, 0x3c, 0x2d,
- 0x32, 0x61, 0x37, 0xb4, 0x39, 0x52, 0xe6, 0x5f, 0x6a, 0x84, 0x9a, 0x1b, 0x9c, 0xa0, 0x26, 0xfc,
- 0x22, 0x73, 0x66, 0xb7, 0x74, 0xeb, 0x77, 0x5f, 0x6f, 0x5b, 0x91, 0xfb, 0x4a, 0xeb, 0x7e, 0x22,
- 0xf3, 0xc9, 0xe2, 0x4b, 0x68, 0x8c, 0x96, 0x36, 0x7d, 0x40, 0x68, 0x11, 0x83, 0x6f, 0xa2, 0xfa,
- 0x08, 0x26, 0xe5, 0xda, 0xa0, 0xea, 0x27, 0xfe, 0x1c, 0x35, 0xc7, 0xec, 0x49, 0x01, 0x66, 0x7b,
- 0x7e, 0x70, 0x6d, 0x3e, 0x5a, 0xed, 0x3b, 0x45, 0xa1, 0x25, 0x73, 0x7b, 0x65, 0xcb, 0xf2, 0xfe,
- 0xb4, 0x90, 0x7b, 0xcd, 0xce, 0xc3, 0x3f, 0x23, 0x14, 0xce, 0xf7, 0x88, 0xb0, 0x2d, 0x5d, 0xff,
- 0xce, 0xab, 0xd7, 0x5f, 0xed, 0xa4, 0xc5, 0xdf, 0x43, 0x05, 0x09, 0xba, 0x64, 0x85, 0x37, 0x51,
- 0x6f, 0x49, 0x5a, 0x57, 0xba, 0x1a, 0xdc, 0x98, 0x4d, 0xdd, 0xde, 0x92, 0x38, 0x5d, 0x8e, 0xf1,
- 0x3e, 0x31, 0x6d, 0xd3, 0x85, 0x62, 0x77, 0xfe, 0x5e, 0x2c, 0xfd, 0x5d, 0xbb, 0xe7, 0xe7, 0x7d,
- 0xbb, 0xf3, 0xdb, 0xef, 0x6e, 0xed, 0xe9, 0x5f, 0xeb, 0xb5, 0xe0, 0xce, 0xf1, 0x89, 0x53, 0x7b,
- 0x76, 0xe2, 0xd4, 0x9e, 0x9f, 0x38, 0xb5, 0xa7, 0x33, 0xc7, 0x3a, 0x9e, 0x39, 0xd6, 0xb3, 0x99,
- 0x63, 0x3d, 0x9f, 0x39, 0xd6, 0xdf, 0x33, 0xc7, 0xfa, 0xf5, 0x85, 0x53, 0xfb, 0xbe, 0x6d, 0xaa,
- 0xfb, 0x37, 0x00, 0x00, 0xff, 0xff, 0x21, 0x97, 0x54, 0xe9, 0x99, 0x08, 0x00, 0x00,
+ // 912 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdd, 0x6e, 0x1b, 0x45,
+ 0x14, 0xf6, 0xc6, 0xff, 0xe3, 0x92, 0xb6, 0x23, 0xa8, 0x16, 0x4b, 0xf5, 0x5a, 0x16, 0xa0, 0xf0,
+ 0xd3, 0x59, 0x52, 0x55, 0x10, 0xe5, 0x02, 0xc1, 0x86, 0x08, 0x22, 0x52, 0x90, 0x26, 0x09, 0x17,
+ 0x08, 0x89, 0x8e, 0xd7, 0xa7, 0x9b, 0xa9, 0xbb, 0x3f, 0xec, 0xcc, 0x9a, 0xfa, 0xae, 0x8f, 0xc0,
+ 0x25, 0x97, 0xbc, 0x03, 0x2f, 0x11, 0x2e, 0x90, 0x7a, 0xd9, 0x0b, 0x64, 0x11, 0xf7, 0x2d, 0x72,
+ 0x85, 0x66, 0x76, 0xbc, 0x76, 0xec, 0x84, 0x94, 0xf6, 0xce, 0xf3, 0xcd, 0xf9, 0xbe, 0xef, 0x9c,
+ 0xb3, 0xe7, 0x8c, 0xd1, 0xd7, 0xc3, 0x2d, 0x41, 0x78, 0xec, 0x0e, 0xb3, 0x3e, 0xa4, 0x11, 0x48,
+ 0x10, 0xee, 0x08, 0xa2, 0x41, 0x9c, 0xba, 0xe6, 0x82, 0x25, 0xdc, 0xf5, 0x21, 0x95, 0xfc, 0x21,
+ 0xf7, 0x99, 0xbe, 0xde, 0xec, 0x83, 0x64, 0x9b, 0x6e, 0x00, 0x11, 0xa4, 0x4c, 0xc2, 0x80, 0x24,
+ 0x69, 0x2c, 0x63, 0xec, 0xe4, 0x04, 0xc2, 0x12, 0x4e, 0x16, 0x09, 0xc4, 0x10, 0xda, 0x77, 0x02,
+ 0x2e, 0x8f, 0xb3, 0x3e, 0xf1, 0xe3, 0xd0, 0x0d, 0xe2, 0x20, 0x76, 0x35, 0xaf, 0x9f, 0x3d, 0xd4,
+ 0x27, 0x7d, 0xd0, 0xbf, 0x72, 0xbd, 0x76, 0x6f, 0x31, 0x81, 0x38, 0x05, 0x77, 0xb4, 0xe2, 0xd9,
+ 0xbe, 0x37, 0x8f, 0x09, 0x99, 0x7f, 0xcc, 0x23, 0x48, 0xc7, 0x6e, 0x32, 0x0c, 0x14, 0x20, 0xdc,
+ 0x10, 0x24, 0xbb, 0x88, 0xe5, 0x5e, 0xc6, 0x4a, 0xb3, 0x48, 0xf2, 0x10, 0x56, 0x08, 0x9f, 0x5c,
+ 0x45, 0x10, 0xfe, 0x31, 0x84, 0x6c, 0x99, 0xd7, 0xfb, 0x73, 0x0d, 0xbd, 0xbd, 0x33, 0x6f, 0xc5,
+ 0x01, 0x0f, 0x22, 0x1e, 0x05, 0x14, 0x7e, 0xce, 0x40, 0x48, 0xfc, 0x00, 0x35, 0x54, 0x86, 0x03,
+ 0x26, 0x99, 0x6d, 0x75, 0xad, 0x8d, 0xd6, 0xdd, 0x8f, 0xc9, 0xbc, 0x87, 0x85, 0x11, 0x49, 0x86,
+ 0x81, 0x02, 0x04, 0x51, 0xd1, 0x64, 0xb4, 0x49, 0xbe, 0xeb, 0x3f, 0x02, 0x5f, 0xde, 0x07, 0xc9,
+ 0x3c, 0x7c, 0x32, 0x71, 0x4a, 0xd3, 0x89, 0x83, 0xe6, 0x18, 0x2d, 0x54, 0xf1, 0x03, 0x54, 0x11,
+ 0x09, 0xf8, 0xf6, 0x9a, 0x56, 0xff, 0x8c, 0x5c, 0xf1, 0x85, 0xc8, 0xa5, 0xb9, 0x1e, 0x24, 0xe0,
+ 0x7b, 0xd7, 0x8c, 0x57, 0x45, 0x9d, 0xa8, 0x56, 0xc6, 0xc7, 0xa8, 0x26, 0x24, 0x93, 0x99, 0xb0,
+ 0xcb, 0xda, 0xe3, 0xf3, 0xd7, 0xf0, 0xd0, 0x3a, 0xde, 0xba, 0x71, 0xa9, 0xe5, 0x67, 0x6a, 0xf4,
+ 0x7b, 0x2f, 0xca, 0xa8, 0x77, 0x29, 0x77, 0x27, 0x8e, 0x06, 0x5c, 0xf2, 0x38, 0xc2, 0x5b, 0xa8,
+ 0x22, 0xc7, 0x09, 0xe8, 0x86, 0x36, 0xbd, 0x77, 0x66, 0x29, 0x1f, 0x8e, 0x13, 0x38, 0x9b, 0x38,
+ 0x6f, 0x2e, 0xc7, 0x2b, 0x9c, 0x6a, 0x06, 0xde, 0x2f, 0x4a, 0xa9, 0x69, 0xee, 0xbd, 0xf3, 0x89,
+ 0x9c, 0x4d, 0x9c, 0x0b, 0x26, 0x92, 0x14, 0x4a, 0xe7, 0xd3, 0xc5, 0xef, 0xa1, 0x5a, 0x0a, 0x4c,
+ 0xc4, 0x91, 0x6e, 0x7e, 0x73, 0x5e, 0x16, 0xd5, 0x28, 0x35, 0xb7, 0xf8, 0x7d, 0x54, 0x0f, 0x41,
+ 0x08, 0x16, 0x80, 0xee, 0x60, 0xd3, 0xbb, 0x6e, 0x02, 0xeb, 0xf7, 0x73, 0x98, 0xce, 0xee, 0xf1,
+ 0x23, 0xb4, 0xfe, 0x98, 0x09, 0x79, 0x94, 0x0c, 0x98, 0x84, 0x43, 0x1e, 0x82, 0x5d, 0xd1, 0x3d,
+ 0xff, 0xe0, 0xe5, 0xa6, 0x46, 0x31, 0xbc, 0x5b, 0x46, 0x7d, 0x7d, 0xff, 0x9c, 0x12, 0x5d, 0x52,
+ 0xc6, 0x23, 0x84, 0x15, 0x72, 0x98, 0xb2, 0x48, 0xe4, 0x8d, 0x52, 0x7e, 0xd5, 0xff, 0xed, 0xd7,
+ 0x36, 0x7e, 0x78, 0x7f, 0x45, 0x8d, 0x5e, 0xe0, 0xd0, 0x9b, 0x58, 0xe8, 0xf6, 0xa5, 0x5f, 0x79,
+ 0x9f, 0x0b, 0x89, 0x7f, 0x5c, 0xd9, 0x1a, 0xf2, 0x72, 0xf9, 0x28, 0xb6, 0xde, 0x99, 0x1b, 0x26,
+ 0xa7, 0xc6, 0x0c, 0x59, 0xd8, 0x98, 0x9f, 0x50, 0x95, 0x4b, 0x08, 0x85, 0xbd, 0xd6, 0x2d, 0x6f,
+ 0xb4, 0xee, 0x6e, 0xbf, 0xfa, 0x38, 0x7b, 0x6f, 0x18, 0x9b, 0xea, 0x9e, 0x12, 0xa4, 0xb9, 0x6e,
+ 0xef, 0x8f, 0xca, 0x7f, 0x14, 0xa8, 0x16, 0x0b, 0xbf, 0x8b, 0xea, 0x69, 0x7e, 0xd4, 0xf5, 0x5d,
+ 0xf3, 0x5a, 0x6a, 0x1a, 0x4c, 0x04, 0x9d, 0xdd, 0x61, 0x82, 0x90, 0xe0, 0x41, 0x04, 0xe9, 0xb7,
+ 0x2c, 0x04, 0xbb, 0x9e, 0x0f, 0x99, 0x7a, 0x09, 0x0e, 0x0a, 0x94, 0x2e, 0x44, 0xe0, 0x1d, 0x74,
+ 0x13, 0x9e, 0x24, 0x3c, 0x65, 0x7a, 0x58, 0xc1, 0x8f, 0xa3, 0x81, 0xb0, 0x1b, 0x5d, 0x6b, 0xa3,
+ 0xea, 0xbd, 0x35, 0x9d, 0x38, 0x37, 0x77, 0x97, 0x2f, 0xe9, 0x6a, 0x3c, 0x26, 0xa8, 0x96, 0xa9,
+ 0x59, 0x14, 0x76, 0xb5, 0x5b, 0xde, 0x68, 0x7a, 0xb7, 0xd4, 0x44, 0x1f, 0x69, 0xe4, 0x6c, 0xe2,
+ 0x34, 0xbe, 0x81, 0xb1, 0x3e, 0x50, 0x13, 0x85, 0x3f, 0x42, 0x8d, 0x4c, 0x40, 0x1a, 0xa9, 0x14,
+ 0xf3, 0x3d, 0x28, 0x9a, 0x7f, 0x64, 0x70, 0x5a, 0x44, 0xe0, 0xdb, 0xa8, 0x9c, 0xf1, 0x81, 0xd9,
+ 0x83, 0x96, 0x09, 0x2c, 0x1f, 0xed, 0x7d, 0x49, 0x15, 0x8e, 0x7b, 0xa8, 0x16, 0xa4, 0x71, 0x96,
+ 0x08, 0xbb, 0xa2, 0xcd, 0x91, 0x32, 0xff, 0x4a, 0x23, 0xd4, 0xdc, 0xe0, 0x08, 0x55, 0xe1, 0x89,
+ 0x4c, 0x99, 0x5d, 0xd3, 0xdf, 0x6f, 0xef, 0xf5, 0x9e, 0x3c, 0xb2, 0xab, 0xb4, 0x76, 0x23, 0x99,
+ 0x8e, 0xe7, 0x9f, 0x53, 0x63, 0x34, 0xb7, 0x69, 0x03, 0x42, 0xf3, 0x18, 0x7c, 0x03, 0x95, 0x87,
+ 0x30, 0xce, 0xdf, 0x1e, 0xaa, 0x7e, 0xe2, 0x2f, 0x50, 0x75, 0xc4, 0x1e, 0x67, 0x60, 0x9e, 0xe0,
+ 0x0f, 0xaf, 0xcc, 0x47, 0xab, 0x7d, 0xaf, 0x28, 0x34, 0x67, 0x6e, 0xaf, 0x6d, 0x59, 0xbd, 0xbf,
+ 0x2c, 0xe4, 0x5c, 0xf1, 0x70, 0xe2, 0x5f, 0x10, 0xf2, 0x67, 0x8f, 0x91, 0xb0, 0x2d, 0x5d, 0xff,
+ 0xce, 0xab, 0xd7, 0x5f, 0x3c, 0x6c, 0xf3, 0xff, 0x98, 0x02, 0x12, 0x74, 0xc1, 0x0a, 0x6f, 0xa2,
+ 0xd6, 0x82, 0xb4, 0xae, 0xf4, 0x9a, 0x77, 0x7d, 0x3a, 0x71, 0x5a, 0x0b, 0xe2, 0x74, 0x31, 0xa6,
+ 0xf7, 0xa9, 0x69, 0x9b, 0x2e, 0x14, 0x3b, 0xb3, 0xa5, 0xb3, 0xf4, 0x77, 0x6d, 0x2e, 0x2f, 0xcd,
+ 0x76, 0xe3, 0xb7, 0xdf, 0x9d, 0xd2, 0xd3, 0xbf, 0xbb, 0x25, 0xef, 0xce, 0xc9, 0x69, 0xa7, 0xf4,
+ 0xec, 0xb4, 0x53, 0x7a, 0x7e, 0xda, 0x29, 0x3d, 0x9d, 0x76, 0xac, 0x93, 0x69, 0xc7, 0x7a, 0x36,
+ 0xed, 0x58, 0xcf, 0xa7, 0x1d, 0xeb, 0x9f, 0x69, 0xc7, 0xfa, 0xf5, 0x45, 0xa7, 0xf4, 0x43, 0xdd,
+ 0x54, 0xf7, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x95, 0x4b, 0xd3, 0xe2, 0xde, 0x08, 0x00, 0x00,
}
func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) {
@@ -470,6 +472,11 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.ExpirationSeconds != nil {
+ i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds))
+ i--
+ dAtA[i] = 0x40
+ }
if m.SignerName != nil {
i -= len(*m.SignerName)
copy(dAtA[i:], *m.SignerName)
@@ -723,6 +730,9 @@ func (m *CertificateSigningRequestSpec) Size() (n int) {
l = len(*m.SignerName)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.ExpirationSeconds != nil {
+ n += 1 + sovGenerated(uint64(*m.ExpirationSeconds))
+ }
return n
}
@@ -831,6 +841,7 @@ func (this *CertificateSigningRequestSpec) String() string {
`Usages:` + fmt.Sprintf("%v", this.Usages) + `,`,
`Extra:` + mapStringForExtra + `,`,
`SignerName:` + valueToStringGenerated(this.SignerName) + `,`,
+ `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`,
`}`,
}, "")
return s
@@ -1722,6 +1733,26 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
s := string(dAtA[iNdEx:postIndex])
m.SignerName = &s
iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType)
+ }
+ var v int32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ExpirationSeconds = &v
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
index 73631fb77..904e95207 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto
@@ -34,8 +34,9 @@ message CertificateSigningRequest {
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
- // The certificate request itself and any additional information.
- // +optional
+ // spec contains the certificate request, and is immutable after creation.
+ // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
+ // Other fields are derived by Kubernetes and cannot be modified by users.
optional CertificateSigningRequestSpec spec = 2;
// Derived information about the request.
@@ -80,9 +81,7 @@ message CertificateSigningRequestList {
repeated CertificateSigningRequest items = 2;
}
-// This information is immutable after the request is created. Only the Request
-// and Usages fields can be set on creation, other fields are derived by
-// Kubernetes and cannot be modified by users.
+// CertificateSigningRequestSpec contains the certificate request.
message CertificateSigningRequestSpec {
// Base64-encoded PKCS#10 CSR data
// +listType=atomic
@@ -101,6 +100,30 @@ message CertificateSigningRequestSpec {
// +optional
optional string signerName = 7;
+ // expirationSeconds is the requested duration of validity of the issued
+ // certificate. The certificate signer may issue a certificate with a different
+ // validity duration so a client must check the delta between the notBefore and
+ // and notAfter fields in the issued certificate to determine the actual duration.
+ //
+ // The v1.22+ in-tree implementations of the well-known Kubernetes signers will
+ // honor this field as long as the requested duration is not greater than the
+ // maximum duration they will honor per the --cluster-signing-duration CLI
+ // flag to the Kubernetes controller manager.
+ //
+ // Certificate signers may not honor this field for various reasons:
+ //
+ // 1. Old signer that is unaware of the field (such as the in-tree
+ // implementations prior to v1.22)
+ // 2. Signer whose configured maximum is shorter than the requested duration
+ // 3. Signer whose configured minimum is longer than the requested duration
+ //
+ // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes.
+ //
+ // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate.
+ //
+ // +optional
+ optional int32 expirationSeconds = 8;
+
// allowedUsages specifies a set of usage contexts the key will be
// valid for.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go
index 9e61c67ff..031ef7755 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types.go
@@ -36,18 +36,17 @@ type CertificateSigningRequest struct {
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- // The certificate request itself and any additional information.
- // +optional
- Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
+ // spec contains the certificate request, and is immutable after creation.
+ // Only the request, signerName, expirationSeconds, and usages fields can be set on creation.
+ // Other fields are derived by Kubernetes and cannot be modified by users.
+ Spec CertificateSigningRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"`
// Derived information about the request.
// +optional
Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
}
-// This information is immutable after the request is created. Only the Request
-// and Usages fields can be set on creation, other fields are derived by
-// Kubernetes and cannot be modified by users.
+// CertificateSigningRequestSpec contains the certificate request.
type CertificateSigningRequestSpec struct {
// Base64-encoded PKCS#10 CSR data
// +listType=atomic
@@ -66,6 +65,30 @@ type CertificateSigningRequestSpec struct {
// +optional
SignerName *string `json:"signerName,omitempty" protobuf:"bytes,7,opt,name=signerName"`
+ // expirationSeconds is the requested duration of validity of the issued
+ // certificate. The certificate signer may issue a certificate with a different
+ // validity duration so a client must check the delta between the notBefore and
+ // and notAfter fields in the issued certificate to determine the actual duration.
+ //
+ // The v1.22+ in-tree implementations of the well-known Kubernetes signers will
+ // honor this field as long as the requested duration is not greater than the
+ // maximum duration they will honor per the --cluster-signing-duration CLI
+ // flag to the Kubernetes controller manager.
+ //
+ // Certificate signers may not honor this field for various reasons:
+ //
+ // 1. Old signer that is unaware of the field (such as the in-tree
+ // implementations prior to v1.22)
+ // 2. Signer whose configured maximum is shorter than the requested duration
+ // 3. Signer whose configured minimum is longer than the requested duration
+ //
+ // The minimum valid value for expirationSeconds is 600, i.e. 10 minutes.
+ //
+ // As of v1.22, this field is beta and is controlled via the CSRDuration feature gate.
+ //
+ // +optional
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty" protobuf:"varint,8,opt,name=expirationSeconds"`
+
// allowedUsages specifies a set of usage contexts the key will be
// valid for.
// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3
diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
index 396fee0bf..816a618cb 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_CertificateSigningRequest = map[string]string{
"": "Describes a certificate signing request",
- "spec": "The certificate request itself and any additional information.",
+ "spec": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.",
"status": "Derived information about the request.",
}
@@ -51,14 +51,15 @@ func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string {
}
var map_CertificateSigningRequestSpec = map[string]string{
- "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.",
- "request": "Base64-encoded PKCS#10 CSR data",
- "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.",
- "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"",
- "username": "Information about the requesting user. See user.Info interface for details.",
- "uid": "UID information about the requesting user. See user.Info interface for details.",
- "groups": "Group information about the requesting user. See user.Info interface for details.",
- "extra": "Extra information about the requesting user. See user.Info interface for details.",
+ "": "CertificateSigningRequestSpec contains the certificate request.",
+ "request": "Base64-encoded PKCS#10 CSR data",
+ "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.",
+ "expirationSeconds": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.\n\nAs of v1.22, this field is beta and is controlled via the CSRDuration feature gate.",
+ "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12\nValid values are:\n \"signing\",\n \"digital signature\",\n \"content commitment\",\n \"key encipherment\",\n \"key agreement\",\n \"data encipherment\",\n \"cert sign\",\n \"crl sign\",\n \"encipher only\",\n \"decipher only\",\n \"any\",\n \"server auth\",\n \"client auth\",\n \"code signing\",\n \"email protection\",\n \"s/mime\",\n \"ipsec end system\",\n \"ipsec tunnel\",\n \"ipsec user\",\n \"timestamping\",\n \"ocsp signing\",\n \"microsoft sgc\",\n \"netscape sgc\"",
+ "username": "Information about the requesting user. See user.Info interface for details.",
+ "uid": "UID information about the requesting user. See user.Info interface for details.",
+ "groups": "Group information about the requesting user. See user.Info interface for details.",
+ "extra": "Extra information about the requesting user. See user.Info interface for details.",
}
func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
index 0463f5bb0..48e460aac 100644
--- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go
@@ -116,6 +116,11 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq
*out = new(string)
**out = **in
}
+ if in.ExpirationSeconds != nil {
+ in, out := &in.ExpirationSeconds, &out.ExpirationSeconds
+ *out = new(int32)
+ **out = **in
+ }
if in.Usages != nil {
in, out := &in.Usages, &out.Usages
*out = make([]KeyUsage, len(*in))
diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
index 22476b2bd..7fde09126 100644
--- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go
+++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go
@@ -23,9 +23,6 @@ const (
// webhook backend fails.
ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open"
- // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation
- PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude"
-
// MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods
MirrorPodAnnotationKey string = "kubernetes.io/config.mirror"
@@ -144,7 +141,7 @@ const (
// pod deletion order.
// The implicit deletion cost for pods that don't set the annotation is 0, negative values are permitted.
//
- // This annotation is alpha-level and is only honored when PodDeletionCost feature is enabled.
+ // This annotation is beta-level and is only honored when PodDeletionCost feature is enabled.
PodDeletionCost = "controller.kubernetes.io/pod-deletion-cost"
// AnnotationTopologyAwareHints can be used to enable or disable Topology
diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go
index 286a82ecd..259444371 100644
--- a/vendor/k8s.io/api/core/v1/generated.pb.go
+++ b/vendor/k8s.io/api/core/v1/generated.pb.go
@@ -1421,38 +1421,10 @@ func (m *EphemeralContainerCommon) XXX_DiscardUnknown() {
var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo
-func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} }
-func (*EphemeralContainers) ProtoMessage() {}
-func (*EphemeralContainers) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{49}
-}
-func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
-}
-func (m *EphemeralContainers) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EphemeralContainers.Merge(m, src)
-}
-func (m *EphemeralContainers) XXX_Size() int {
- return m.Size()
-}
-func (m *EphemeralContainers) XXX_DiscardUnknown() {
- xxx_messageInfo_EphemeralContainers.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo
-
func (m *EphemeralVolumeSource) Reset() { *m = EphemeralVolumeSource{} }
func (*EphemeralVolumeSource) ProtoMessage() {}
func (*EphemeralVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{50}
+ return fileDescriptor_83c10c24ec417dc9, []int{49}
}
func (m *EphemeralVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1480,7 +1452,7 @@ var xxx_messageInfo_EphemeralVolumeSource proto.InternalMessageInfo
func (m *Event) Reset() { *m = Event{} }
func (*Event) ProtoMessage() {}
func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{51}
+ return fileDescriptor_83c10c24ec417dc9, []int{50}
}
func (m *Event) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1508,7 +1480,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo
func (m *EventList) Reset() { *m = EventList{} }
func (*EventList) ProtoMessage() {}
func (*EventList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{52}
+ return fileDescriptor_83c10c24ec417dc9, []int{51}
}
func (m *EventList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1536,7 +1508,7 @@ var xxx_messageInfo_EventList proto.InternalMessageInfo
func (m *EventSeries) Reset() { *m = EventSeries{} }
func (*EventSeries) ProtoMessage() {}
func (*EventSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{53}
+ return fileDescriptor_83c10c24ec417dc9, []int{52}
}
func (m *EventSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1564,7 +1536,7 @@ var xxx_messageInfo_EventSeries proto.InternalMessageInfo
func (m *EventSource) Reset() { *m = EventSource{} }
func (*EventSource) ProtoMessage() {}
func (*EventSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{54}
+ return fileDescriptor_83c10c24ec417dc9, []int{53}
}
func (m *EventSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1592,7 +1564,7 @@ var xxx_messageInfo_EventSource proto.InternalMessageInfo
func (m *ExecAction) Reset() { *m = ExecAction{} }
func (*ExecAction) ProtoMessage() {}
func (*ExecAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{55}
+ return fileDescriptor_83c10c24ec417dc9, []int{54}
}
func (m *ExecAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1620,7 +1592,7 @@ var xxx_messageInfo_ExecAction proto.InternalMessageInfo
func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} }
func (*FCVolumeSource) ProtoMessage() {}
func (*FCVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{56}
+ return fileDescriptor_83c10c24ec417dc9, []int{55}
}
func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1648,7 +1620,7 @@ var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo
func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} }
func (*FlexPersistentVolumeSource) ProtoMessage() {}
func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{57}
+ return fileDescriptor_83c10c24ec417dc9, []int{56}
}
func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1676,7 +1648,7 @@ var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo
func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} }
func (*FlexVolumeSource) ProtoMessage() {}
func (*FlexVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{58}
+ return fileDescriptor_83c10c24ec417dc9, []int{57}
}
func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1704,7 +1676,7 @@ var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo
func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} }
func (*FlockerVolumeSource) ProtoMessage() {}
func (*FlockerVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{59}
+ return fileDescriptor_83c10c24ec417dc9, []int{58}
}
func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1732,7 +1704,7 @@ var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo
func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} }
func (*GCEPersistentDiskVolumeSource) ProtoMessage() {}
func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{60}
+ return fileDescriptor_83c10c24ec417dc9, []int{59}
}
func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1760,7 +1732,7 @@ var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo
func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} }
func (*GitRepoVolumeSource) ProtoMessage() {}
func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{61}
+ return fileDescriptor_83c10c24ec417dc9, []int{60}
}
func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1788,7 +1760,7 @@ var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo
func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} }
func (*GlusterfsPersistentVolumeSource) ProtoMessage() {}
func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{62}
+ return fileDescriptor_83c10c24ec417dc9, []int{61}
}
func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1816,7 +1788,7 @@ var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo
func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} }
func (*GlusterfsVolumeSource) ProtoMessage() {}
func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{63}
+ return fileDescriptor_83c10c24ec417dc9, []int{62}
}
func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1844,7 +1816,7 @@ var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo
func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} }
func (*HTTPGetAction) ProtoMessage() {}
func (*HTTPGetAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{64}
+ return fileDescriptor_83c10c24ec417dc9, []int{63}
}
func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1872,7 +1844,7 @@ var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo
func (m *HTTPHeader) Reset() { *m = HTTPHeader{} }
func (*HTTPHeader) ProtoMessage() {}
func (*HTTPHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{65}
+ return fileDescriptor_83c10c24ec417dc9, []int{64}
}
func (m *HTTPHeader) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1900,7 +1872,7 @@ var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo
func (m *Handler) Reset() { *m = Handler{} }
func (*Handler) ProtoMessage() {}
func (*Handler) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{66}
+ return fileDescriptor_83c10c24ec417dc9, []int{65}
}
func (m *Handler) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1928,7 +1900,7 @@ var xxx_messageInfo_Handler proto.InternalMessageInfo
func (m *HostAlias) Reset() { *m = HostAlias{} }
func (*HostAlias) ProtoMessage() {}
func (*HostAlias) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{67}
+ return fileDescriptor_83c10c24ec417dc9, []int{66}
}
func (m *HostAlias) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1956,7 +1928,7 @@ var xxx_messageInfo_HostAlias proto.InternalMessageInfo
func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} }
func (*HostPathVolumeSource) ProtoMessage() {}
func (*HostPathVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{68}
+ return fileDescriptor_83c10c24ec417dc9, []int{67}
}
func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -1984,7 +1956,7 @@ var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo
func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} }
func (*ISCSIPersistentVolumeSource) ProtoMessage() {}
func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{69}
+ return fileDescriptor_83c10c24ec417dc9, []int{68}
}
func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2012,7 +1984,7 @@ var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo
func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} }
func (*ISCSIVolumeSource) ProtoMessage() {}
func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{70}
+ return fileDescriptor_83c10c24ec417dc9, []int{69}
}
func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2040,7 +2012,7 @@ var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo
func (m *KeyToPath) Reset() { *m = KeyToPath{} }
func (*KeyToPath) ProtoMessage() {}
func (*KeyToPath) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{71}
+ return fileDescriptor_83c10c24ec417dc9, []int{70}
}
func (m *KeyToPath) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2068,7 +2040,7 @@ var xxx_messageInfo_KeyToPath proto.InternalMessageInfo
func (m *Lifecycle) Reset() { *m = Lifecycle{} }
func (*Lifecycle) ProtoMessage() {}
func (*Lifecycle) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{72}
+ return fileDescriptor_83c10c24ec417dc9, []int{71}
}
func (m *Lifecycle) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2096,7 +2068,7 @@ var xxx_messageInfo_Lifecycle proto.InternalMessageInfo
func (m *LimitRange) Reset() { *m = LimitRange{} }
func (*LimitRange) ProtoMessage() {}
func (*LimitRange) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{73}
+ return fileDescriptor_83c10c24ec417dc9, []int{72}
}
func (m *LimitRange) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2124,7 +2096,7 @@ var xxx_messageInfo_LimitRange proto.InternalMessageInfo
func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} }
func (*LimitRangeItem) ProtoMessage() {}
func (*LimitRangeItem) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{74}
+ return fileDescriptor_83c10c24ec417dc9, []int{73}
}
func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2152,7 +2124,7 @@ var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo
func (m *LimitRangeList) Reset() { *m = LimitRangeList{} }
func (*LimitRangeList) ProtoMessage() {}
func (*LimitRangeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{75}
+ return fileDescriptor_83c10c24ec417dc9, []int{74}
}
func (m *LimitRangeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2180,7 +2152,7 @@ var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo
func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} }
func (*LimitRangeSpec) ProtoMessage() {}
func (*LimitRangeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{76}
+ return fileDescriptor_83c10c24ec417dc9, []int{75}
}
func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2208,7 +2180,7 @@ var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo
func (m *List) Reset() { *m = List{} }
func (*List) ProtoMessage() {}
func (*List) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{77}
+ return fileDescriptor_83c10c24ec417dc9, []int{76}
}
func (m *List) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2236,7 +2208,7 @@ var xxx_messageInfo_List proto.InternalMessageInfo
func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} }
func (*LoadBalancerIngress) ProtoMessage() {}
func (*LoadBalancerIngress) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{78}
+ return fileDescriptor_83c10c24ec417dc9, []int{77}
}
func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2264,7 +2236,7 @@ var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo
func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} }
func (*LoadBalancerStatus) ProtoMessage() {}
func (*LoadBalancerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{79}
+ return fileDescriptor_83c10c24ec417dc9, []int{78}
}
func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2292,7 +2264,7 @@ var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo
func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} }
func (*LocalObjectReference) ProtoMessage() {}
func (*LocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{80}
+ return fileDescriptor_83c10c24ec417dc9, []int{79}
}
func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2320,7 +2292,7 @@ var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo
func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} }
func (*LocalVolumeSource) ProtoMessage() {}
func (*LocalVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{81}
+ return fileDescriptor_83c10c24ec417dc9, []int{80}
}
func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2348,7 +2320,7 @@ var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo
func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} }
func (*NFSVolumeSource) ProtoMessage() {}
func (*NFSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{82}
+ return fileDescriptor_83c10c24ec417dc9, []int{81}
}
func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2376,7 +2348,7 @@ var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo
func (m *Namespace) Reset() { *m = Namespace{} }
func (*Namespace) ProtoMessage() {}
func (*Namespace) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{83}
+ return fileDescriptor_83c10c24ec417dc9, []int{82}
}
func (m *Namespace) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2404,7 +2376,7 @@ var xxx_messageInfo_Namespace proto.InternalMessageInfo
func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} }
func (*NamespaceCondition) ProtoMessage() {}
func (*NamespaceCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{84}
+ return fileDescriptor_83c10c24ec417dc9, []int{83}
}
func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2432,7 +2404,7 @@ var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo
func (m *NamespaceList) Reset() { *m = NamespaceList{} }
func (*NamespaceList) ProtoMessage() {}
func (*NamespaceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{85}
+ return fileDescriptor_83c10c24ec417dc9, []int{84}
}
func (m *NamespaceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2460,7 +2432,7 @@ var xxx_messageInfo_NamespaceList proto.InternalMessageInfo
func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} }
func (*NamespaceSpec) ProtoMessage() {}
func (*NamespaceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{86}
+ return fileDescriptor_83c10c24ec417dc9, []int{85}
}
func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2488,7 +2460,7 @@ var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo
func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} }
func (*NamespaceStatus) ProtoMessage() {}
func (*NamespaceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{87}
+ return fileDescriptor_83c10c24ec417dc9, []int{86}
}
func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2516,7 +2488,7 @@ var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo
func (m *Node) Reset() { *m = Node{} }
func (*Node) ProtoMessage() {}
func (*Node) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{88}
+ return fileDescriptor_83c10c24ec417dc9, []int{87}
}
func (m *Node) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2544,7 +2516,7 @@ var xxx_messageInfo_Node proto.InternalMessageInfo
func (m *NodeAddress) Reset() { *m = NodeAddress{} }
func (*NodeAddress) ProtoMessage() {}
func (*NodeAddress) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{89}
+ return fileDescriptor_83c10c24ec417dc9, []int{88}
}
func (m *NodeAddress) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2572,7 +2544,7 @@ var xxx_messageInfo_NodeAddress proto.InternalMessageInfo
func (m *NodeAffinity) Reset() { *m = NodeAffinity{} }
func (*NodeAffinity) ProtoMessage() {}
func (*NodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{90}
+ return fileDescriptor_83c10c24ec417dc9, []int{89}
}
func (m *NodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2600,7 +2572,7 @@ var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo
func (m *NodeCondition) Reset() { *m = NodeCondition{} }
func (*NodeCondition) ProtoMessage() {}
func (*NodeCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{91}
+ return fileDescriptor_83c10c24ec417dc9, []int{90}
}
func (m *NodeCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2628,7 +2600,7 @@ var xxx_messageInfo_NodeCondition proto.InternalMessageInfo
func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} }
func (*NodeConfigSource) ProtoMessage() {}
func (*NodeConfigSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{92}
+ return fileDescriptor_83c10c24ec417dc9, []int{91}
}
func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2656,7 +2628,7 @@ var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo
func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} }
func (*NodeConfigStatus) ProtoMessage() {}
func (*NodeConfigStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{93}
+ return fileDescriptor_83c10c24ec417dc9, []int{92}
}
func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2684,7 +2656,7 @@ var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo
func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} }
func (*NodeDaemonEndpoints) ProtoMessage() {}
func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{94}
+ return fileDescriptor_83c10c24ec417dc9, []int{93}
}
func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2712,7 +2684,7 @@ var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo
func (m *NodeList) Reset() { *m = NodeList{} }
func (*NodeList) ProtoMessage() {}
func (*NodeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{95}
+ return fileDescriptor_83c10c24ec417dc9, []int{94}
}
func (m *NodeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2740,7 +2712,7 @@ var xxx_messageInfo_NodeList proto.InternalMessageInfo
func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} }
func (*NodeProxyOptions) ProtoMessage() {}
func (*NodeProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{96}
+ return fileDescriptor_83c10c24ec417dc9, []int{95}
}
func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2768,7 +2740,7 @@ var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo
func (m *NodeResources) Reset() { *m = NodeResources{} }
func (*NodeResources) ProtoMessage() {}
func (*NodeResources) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{97}
+ return fileDescriptor_83c10c24ec417dc9, []int{96}
}
func (m *NodeResources) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2796,7 +2768,7 @@ var xxx_messageInfo_NodeResources proto.InternalMessageInfo
func (m *NodeSelector) Reset() { *m = NodeSelector{} }
func (*NodeSelector) ProtoMessage() {}
func (*NodeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{98}
+ return fileDescriptor_83c10c24ec417dc9, []int{97}
}
func (m *NodeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2824,7 +2796,7 @@ var xxx_messageInfo_NodeSelector proto.InternalMessageInfo
func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} }
func (*NodeSelectorRequirement) ProtoMessage() {}
func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{99}
+ return fileDescriptor_83c10c24ec417dc9, []int{98}
}
func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2852,7 +2824,7 @@ var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo
func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} }
func (*NodeSelectorTerm) ProtoMessage() {}
func (*NodeSelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{100}
+ return fileDescriptor_83c10c24ec417dc9, []int{99}
}
func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2880,7 +2852,7 @@ var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo
func (m *NodeSpec) Reset() { *m = NodeSpec{} }
func (*NodeSpec) ProtoMessage() {}
func (*NodeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{101}
+ return fileDescriptor_83c10c24ec417dc9, []int{100}
}
func (m *NodeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2908,7 +2880,7 @@ var xxx_messageInfo_NodeSpec proto.InternalMessageInfo
func (m *NodeStatus) Reset() { *m = NodeStatus{} }
func (*NodeStatus) ProtoMessage() {}
func (*NodeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{102}
+ return fileDescriptor_83c10c24ec417dc9, []int{101}
}
func (m *NodeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2936,7 +2908,7 @@ var xxx_messageInfo_NodeStatus proto.InternalMessageInfo
func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} }
func (*NodeSystemInfo) ProtoMessage() {}
func (*NodeSystemInfo) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{103}
+ return fileDescriptor_83c10c24ec417dc9, []int{102}
}
func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2964,7 +2936,7 @@ var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo
func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} }
func (*ObjectFieldSelector) ProtoMessage() {}
func (*ObjectFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{104}
+ return fileDescriptor_83c10c24ec417dc9, []int{103}
}
func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -2992,7 +2964,7 @@ var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo
func (m *ObjectReference) Reset() { *m = ObjectReference{} }
func (*ObjectReference) ProtoMessage() {}
func (*ObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{105}
+ return fileDescriptor_83c10c24ec417dc9, []int{104}
}
func (m *ObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3020,7 +2992,7 @@ var xxx_messageInfo_ObjectReference proto.InternalMessageInfo
func (m *PersistentVolume) Reset() { *m = PersistentVolume{} }
func (*PersistentVolume) ProtoMessage() {}
func (*PersistentVolume) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{106}
+ return fileDescriptor_83c10c24ec417dc9, []int{105}
}
func (m *PersistentVolume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3048,7 +3020,7 @@ var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo
func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} }
func (*PersistentVolumeClaim) ProtoMessage() {}
func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{107}
+ return fileDescriptor_83c10c24ec417dc9, []int{106}
}
func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3076,7 +3048,7 @@ var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo
func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} }
func (*PersistentVolumeClaimCondition) ProtoMessage() {}
func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{108}
+ return fileDescriptor_83c10c24ec417dc9, []int{107}
}
func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3104,7 +3076,7 @@ var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo
func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} }
func (*PersistentVolumeClaimList) ProtoMessage() {}
func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{109}
+ return fileDescriptor_83c10c24ec417dc9, []int{108}
}
func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3132,7 +3104,7 @@ var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo
func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} }
func (*PersistentVolumeClaimSpec) ProtoMessage() {}
func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{110}
+ return fileDescriptor_83c10c24ec417dc9, []int{109}
}
func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3160,7 +3132,7 @@ var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo
func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} }
func (*PersistentVolumeClaimStatus) ProtoMessage() {}
func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{111}
+ return fileDescriptor_83c10c24ec417dc9, []int{110}
}
func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3188,7 +3160,7 @@ var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo
func (m *PersistentVolumeClaimTemplate) Reset() { *m = PersistentVolumeClaimTemplate{} }
func (*PersistentVolumeClaimTemplate) ProtoMessage() {}
func (*PersistentVolumeClaimTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{112}
+ return fileDescriptor_83c10c24ec417dc9, []int{111}
}
func (m *PersistentVolumeClaimTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3216,7 +3188,7 @@ var xxx_messageInfo_PersistentVolumeClaimTemplate proto.InternalMessageInfo
func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} }
func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {}
func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{113}
+ return fileDescriptor_83c10c24ec417dc9, []int{112}
}
func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3244,7 +3216,7 @@ var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} }
func (*PersistentVolumeList) ProtoMessage() {}
func (*PersistentVolumeList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{114}
+ return fileDescriptor_83c10c24ec417dc9, []int{113}
}
func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3272,7 +3244,7 @@ var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo
func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} }
func (*PersistentVolumeSource) ProtoMessage() {}
func (*PersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{115}
+ return fileDescriptor_83c10c24ec417dc9, []int{114}
}
func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3300,7 +3272,7 @@ var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo
func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} }
func (*PersistentVolumeSpec) ProtoMessage() {}
func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{116}
+ return fileDescriptor_83c10c24ec417dc9, []int{115}
}
func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3328,7 +3300,7 @@ var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo
func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} }
func (*PersistentVolumeStatus) ProtoMessage() {}
func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{117}
+ return fileDescriptor_83c10c24ec417dc9, []int{116}
}
func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3356,7 +3328,7 @@ var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo
func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} }
func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {}
func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{118}
+ return fileDescriptor_83c10c24ec417dc9, []int{117}
}
func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3384,7 +3356,7 @@ var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo
func (m *Pod) Reset() { *m = Pod{} }
func (*Pod) ProtoMessage() {}
func (*Pod) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{119}
+ return fileDescriptor_83c10c24ec417dc9, []int{118}
}
func (m *Pod) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3412,7 +3384,7 @@ var xxx_messageInfo_Pod proto.InternalMessageInfo
func (m *PodAffinity) Reset() { *m = PodAffinity{} }
func (*PodAffinity) ProtoMessage() {}
func (*PodAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{120}
+ return fileDescriptor_83c10c24ec417dc9, []int{119}
}
func (m *PodAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3440,7 +3412,7 @@ var xxx_messageInfo_PodAffinity proto.InternalMessageInfo
func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} }
func (*PodAffinityTerm) ProtoMessage() {}
func (*PodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{121}
+ return fileDescriptor_83c10c24ec417dc9, []int{120}
}
func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3468,7 +3440,7 @@ var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo
func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} }
func (*PodAntiAffinity) ProtoMessage() {}
func (*PodAntiAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{122}
+ return fileDescriptor_83c10c24ec417dc9, []int{121}
}
func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3496,7 +3468,7 @@ var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo
func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} }
func (*PodAttachOptions) ProtoMessage() {}
func (*PodAttachOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{123}
+ return fileDescriptor_83c10c24ec417dc9, []int{122}
}
func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3524,7 +3496,7 @@ var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo
func (m *PodCondition) Reset() { *m = PodCondition{} }
func (*PodCondition) ProtoMessage() {}
func (*PodCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{124}
+ return fileDescriptor_83c10c24ec417dc9, []int{123}
}
func (m *PodCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3552,7 +3524,7 @@ var xxx_messageInfo_PodCondition proto.InternalMessageInfo
func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} }
func (*PodDNSConfig) ProtoMessage() {}
func (*PodDNSConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{125}
+ return fileDescriptor_83c10c24ec417dc9, []int{124}
}
func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3580,7 +3552,7 @@ var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo
func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} }
func (*PodDNSConfigOption) ProtoMessage() {}
func (*PodDNSConfigOption) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{126}
+ return fileDescriptor_83c10c24ec417dc9, []int{125}
}
func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3608,7 +3580,7 @@ var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo
func (m *PodExecOptions) Reset() { *m = PodExecOptions{} }
func (*PodExecOptions) ProtoMessage() {}
func (*PodExecOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{127}
+ return fileDescriptor_83c10c24ec417dc9, []int{126}
}
func (m *PodExecOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3636,7 +3608,7 @@ var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo
func (m *PodIP) Reset() { *m = PodIP{} }
func (*PodIP) ProtoMessage() {}
func (*PodIP) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{128}
+ return fileDescriptor_83c10c24ec417dc9, []int{127}
}
func (m *PodIP) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3664,7 +3636,7 @@ var xxx_messageInfo_PodIP proto.InternalMessageInfo
func (m *PodList) Reset() { *m = PodList{} }
func (*PodList) ProtoMessage() {}
func (*PodList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{129}
+ return fileDescriptor_83c10c24ec417dc9, []int{128}
}
func (m *PodList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3692,7 +3664,7 @@ var xxx_messageInfo_PodList proto.InternalMessageInfo
func (m *PodLogOptions) Reset() { *m = PodLogOptions{} }
func (*PodLogOptions) ProtoMessage() {}
func (*PodLogOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{130}
+ return fileDescriptor_83c10c24ec417dc9, []int{129}
}
func (m *PodLogOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3720,7 +3692,7 @@ var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo
func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} }
func (*PodPortForwardOptions) ProtoMessage() {}
func (*PodPortForwardOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{131}
+ return fileDescriptor_83c10c24ec417dc9, []int{130}
}
func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3748,7 +3720,7 @@ var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo
func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} }
func (*PodProxyOptions) ProtoMessage() {}
func (*PodProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{132}
+ return fileDescriptor_83c10c24ec417dc9, []int{131}
}
func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3776,7 +3748,7 @@ var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo
func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} }
func (*PodReadinessGate) ProtoMessage() {}
func (*PodReadinessGate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{133}
+ return fileDescriptor_83c10c24ec417dc9, []int{132}
}
func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3804,7 +3776,7 @@ var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo
func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} }
func (*PodSecurityContext) ProtoMessage() {}
func (*PodSecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{134}
+ return fileDescriptor_83c10c24ec417dc9, []int{133}
}
func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3832,7 +3804,7 @@ var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo
func (m *PodSignature) Reset() { *m = PodSignature{} }
func (*PodSignature) ProtoMessage() {}
func (*PodSignature) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{135}
+ return fileDescriptor_83c10c24ec417dc9, []int{134}
}
func (m *PodSignature) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3860,7 +3832,7 @@ var xxx_messageInfo_PodSignature proto.InternalMessageInfo
func (m *PodSpec) Reset() { *m = PodSpec{} }
func (*PodSpec) ProtoMessage() {}
func (*PodSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{136}
+ return fileDescriptor_83c10c24ec417dc9, []int{135}
}
func (m *PodSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3888,7 +3860,7 @@ var xxx_messageInfo_PodSpec proto.InternalMessageInfo
func (m *PodStatus) Reset() { *m = PodStatus{} }
func (*PodStatus) ProtoMessage() {}
func (*PodStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{137}
+ return fileDescriptor_83c10c24ec417dc9, []int{136}
}
func (m *PodStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3916,7 +3888,7 @@ var xxx_messageInfo_PodStatus proto.InternalMessageInfo
func (m *PodStatusResult) Reset() { *m = PodStatusResult{} }
func (*PodStatusResult) ProtoMessage() {}
func (*PodStatusResult) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{138}
+ return fileDescriptor_83c10c24ec417dc9, []int{137}
}
func (m *PodStatusResult) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3944,7 +3916,7 @@ var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo
func (m *PodTemplate) Reset() { *m = PodTemplate{} }
func (*PodTemplate) ProtoMessage() {}
func (*PodTemplate) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{139}
+ return fileDescriptor_83c10c24ec417dc9, []int{138}
}
func (m *PodTemplate) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -3972,7 +3944,7 @@ var xxx_messageInfo_PodTemplate proto.InternalMessageInfo
func (m *PodTemplateList) Reset() { *m = PodTemplateList{} }
func (*PodTemplateList) ProtoMessage() {}
func (*PodTemplateList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{140}
+ return fileDescriptor_83c10c24ec417dc9, []int{139}
}
func (m *PodTemplateList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4000,7 +3972,7 @@ var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo
func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} }
func (*PodTemplateSpec) ProtoMessage() {}
func (*PodTemplateSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{141}
+ return fileDescriptor_83c10c24ec417dc9, []int{140}
}
func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4028,7 +4000,7 @@ var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo
func (m *PortStatus) Reset() { *m = PortStatus{} }
func (*PortStatus) ProtoMessage() {}
func (*PortStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{142}
+ return fileDescriptor_83c10c24ec417dc9, []int{141}
}
func (m *PortStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4056,7 +4028,7 @@ var xxx_messageInfo_PortStatus proto.InternalMessageInfo
func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} }
func (*PortworxVolumeSource) ProtoMessage() {}
func (*PortworxVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{143}
+ return fileDescriptor_83c10c24ec417dc9, []int{142}
}
func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4084,7 +4056,7 @@ var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo
func (m *Preconditions) Reset() { *m = Preconditions{} }
func (*Preconditions) ProtoMessage() {}
func (*Preconditions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{144}
+ return fileDescriptor_83c10c24ec417dc9, []int{143}
}
func (m *Preconditions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4112,7 +4084,7 @@ var xxx_messageInfo_Preconditions proto.InternalMessageInfo
func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} }
func (*PreferAvoidPodsEntry) ProtoMessage() {}
func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{145}
+ return fileDescriptor_83c10c24ec417dc9, []int{144}
}
func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4140,7 +4112,7 @@ var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo
func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} }
func (*PreferredSchedulingTerm) ProtoMessage() {}
func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{146}
+ return fileDescriptor_83c10c24ec417dc9, []int{145}
}
func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4168,7 +4140,7 @@ var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo
func (m *Probe) Reset() { *m = Probe{} }
func (*Probe) ProtoMessage() {}
func (*Probe) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{147}
+ return fileDescriptor_83c10c24ec417dc9, []int{146}
}
func (m *Probe) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4196,7 +4168,7 @@ var xxx_messageInfo_Probe proto.InternalMessageInfo
func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} }
func (*ProjectedVolumeSource) ProtoMessage() {}
func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{148}
+ return fileDescriptor_83c10c24ec417dc9, []int{147}
}
func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4224,7 +4196,7 @@ var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo
func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} }
func (*QuobyteVolumeSource) ProtoMessage() {}
func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{149}
+ return fileDescriptor_83c10c24ec417dc9, []int{148}
}
func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4252,7 +4224,7 @@ var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo
func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} }
func (*RBDPersistentVolumeSource) ProtoMessage() {}
func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{150}
+ return fileDescriptor_83c10c24ec417dc9, []int{149}
}
func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4280,7 +4252,7 @@ var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo
func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} }
func (*RBDVolumeSource) ProtoMessage() {}
func (*RBDVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{151}
+ return fileDescriptor_83c10c24ec417dc9, []int{150}
}
func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4308,7 +4280,7 @@ var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo
func (m *RangeAllocation) Reset() { *m = RangeAllocation{} }
func (*RangeAllocation) ProtoMessage() {}
func (*RangeAllocation) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{152}
+ return fileDescriptor_83c10c24ec417dc9, []int{151}
}
func (m *RangeAllocation) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4336,7 +4308,7 @@ var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo
func (m *ReplicationController) Reset() { *m = ReplicationController{} }
func (*ReplicationController) ProtoMessage() {}
func (*ReplicationController) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{153}
+ return fileDescriptor_83c10c24ec417dc9, []int{152}
}
func (m *ReplicationController) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4364,7 +4336,7 @@ var xxx_messageInfo_ReplicationController proto.InternalMessageInfo
func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} }
func (*ReplicationControllerCondition) ProtoMessage() {}
func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{154}
+ return fileDescriptor_83c10c24ec417dc9, []int{153}
}
func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4392,7 +4364,7 @@ var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo
func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} }
func (*ReplicationControllerList) ProtoMessage() {}
func (*ReplicationControllerList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{155}
+ return fileDescriptor_83c10c24ec417dc9, []int{154}
}
func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4420,7 +4392,7 @@ var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo
func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} }
func (*ReplicationControllerSpec) ProtoMessage() {}
func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{156}
+ return fileDescriptor_83c10c24ec417dc9, []int{155}
}
func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4448,7 +4420,7 @@ var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo
func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} }
func (*ReplicationControllerStatus) ProtoMessage() {}
func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{157}
+ return fileDescriptor_83c10c24ec417dc9, []int{156}
}
func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4476,7 +4448,7 @@ var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo
func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} }
func (*ResourceFieldSelector) ProtoMessage() {}
func (*ResourceFieldSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{158}
+ return fileDescriptor_83c10c24ec417dc9, []int{157}
}
func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4504,7 +4476,7 @@ var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo
func (m *ResourceQuota) Reset() { *m = ResourceQuota{} }
func (*ResourceQuota) ProtoMessage() {}
func (*ResourceQuota) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{159}
+ return fileDescriptor_83c10c24ec417dc9, []int{158}
}
func (m *ResourceQuota) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4532,7 +4504,7 @@ var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo
func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} }
func (*ResourceQuotaList) ProtoMessage() {}
func (*ResourceQuotaList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{160}
+ return fileDescriptor_83c10c24ec417dc9, []int{159}
}
func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4560,7 +4532,7 @@ var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo
func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} }
func (*ResourceQuotaSpec) ProtoMessage() {}
func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{161}
+ return fileDescriptor_83c10c24ec417dc9, []int{160}
}
func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4588,7 +4560,7 @@ var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo
func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} }
func (*ResourceQuotaStatus) ProtoMessage() {}
func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{162}
+ return fileDescriptor_83c10c24ec417dc9, []int{161}
}
func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4616,7 +4588,7 @@ var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo
func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} }
func (*ResourceRequirements) ProtoMessage() {}
func (*ResourceRequirements) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{163}
+ return fileDescriptor_83c10c24ec417dc9, []int{162}
}
func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4644,7 +4616,7 @@ var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo
func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} }
func (*SELinuxOptions) ProtoMessage() {}
func (*SELinuxOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{164}
+ return fileDescriptor_83c10c24ec417dc9, []int{163}
}
func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4672,7 +4644,7 @@ var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo
func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} }
func (*ScaleIOPersistentVolumeSource) ProtoMessage() {}
func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{165}
+ return fileDescriptor_83c10c24ec417dc9, []int{164}
}
func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4700,7 +4672,7 @@ var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo
func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} }
func (*ScaleIOVolumeSource) ProtoMessage() {}
func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{166}
+ return fileDescriptor_83c10c24ec417dc9, []int{165}
}
func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4728,7 +4700,7 @@ var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo
func (m *ScopeSelector) Reset() { *m = ScopeSelector{} }
func (*ScopeSelector) ProtoMessage() {}
func (*ScopeSelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{167}
+ return fileDescriptor_83c10c24ec417dc9, []int{166}
}
func (m *ScopeSelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4756,7 +4728,7 @@ var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo
func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} }
func (*ScopedResourceSelectorRequirement) ProtoMessage() {}
func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{168}
+ return fileDescriptor_83c10c24ec417dc9, []int{167}
}
func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4784,7 +4756,7 @@ var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo
func (m *SeccompProfile) Reset() { *m = SeccompProfile{} }
func (*SeccompProfile) ProtoMessage() {}
func (*SeccompProfile) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{169}
+ return fileDescriptor_83c10c24ec417dc9, []int{168}
}
func (m *SeccompProfile) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4812,7 +4784,7 @@ var xxx_messageInfo_SeccompProfile proto.InternalMessageInfo
func (m *Secret) Reset() { *m = Secret{} }
func (*Secret) ProtoMessage() {}
func (*Secret) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{170}
+ return fileDescriptor_83c10c24ec417dc9, []int{169}
}
func (m *Secret) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4840,7 +4812,7 @@ var xxx_messageInfo_Secret proto.InternalMessageInfo
func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} }
func (*SecretEnvSource) ProtoMessage() {}
func (*SecretEnvSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{171}
+ return fileDescriptor_83c10c24ec417dc9, []int{170}
}
func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4868,7 +4840,7 @@ var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo
func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} }
func (*SecretKeySelector) ProtoMessage() {}
func (*SecretKeySelector) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{172}
+ return fileDescriptor_83c10c24ec417dc9, []int{171}
}
func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4896,7 +4868,7 @@ var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo
func (m *SecretList) Reset() { *m = SecretList{} }
func (*SecretList) ProtoMessage() {}
func (*SecretList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{173}
+ return fileDescriptor_83c10c24ec417dc9, []int{172}
}
func (m *SecretList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4924,7 +4896,7 @@ var xxx_messageInfo_SecretList proto.InternalMessageInfo
func (m *SecretProjection) Reset() { *m = SecretProjection{} }
func (*SecretProjection) ProtoMessage() {}
func (*SecretProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{174}
+ return fileDescriptor_83c10c24ec417dc9, []int{173}
}
func (m *SecretProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4952,7 +4924,7 @@ var xxx_messageInfo_SecretProjection proto.InternalMessageInfo
func (m *SecretReference) Reset() { *m = SecretReference{} }
func (*SecretReference) ProtoMessage() {}
func (*SecretReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{175}
+ return fileDescriptor_83c10c24ec417dc9, []int{174}
}
func (m *SecretReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -4980,7 +4952,7 @@ var xxx_messageInfo_SecretReference proto.InternalMessageInfo
func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} }
func (*SecretVolumeSource) ProtoMessage() {}
func (*SecretVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{176}
+ return fileDescriptor_83c10c24ec417dc9, []int{175}
}
func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5008,7 +4980,7 @@ var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo
func (m *SecurityContext) Reset() { *m = SecurityContext{} }
func (*SecurityContext) ProtoMessage() {}
func (*SecurityContext) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{177}
+ return fileDescriptor_83c10c24ec417dc9, []int{176}
}
func (m *SecurityContext) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5036,7 +5008,7 @@ var xxx_messageInfo_SecurityContext proto.InternalMessageInfo
func (m *SerializedReference) Reset() { *m = SerializedReference{} }
func (*SerializedReference) ProtoMessage() {}
func (*SerializedReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{178}
+ return fileDescriptor_83c10c24ec417dc9, []int{177}
}
func (m *SerializedReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5064,7 +5036,7 @@ var xxx_messageInfo_SerializedReference proto.InternalMessageInfo
func (m *Service) Reset() { *m = Service{} }
func (*Service) ProtoMessage() {}
func (*Service) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{179}
+ return fileDescriptor_83c10c24ec417dc9, []int{178}
}
func (m *Service) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5092,7 +5064,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo
func (m *ServiceAccount) Reset() { *m = ServiceAccount{} }
func (*ServiceAccount) ProtoMessage() {}
func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{180}
+ return fileDescriptor_83c10c24ec417dc9, []int{179}
}
func (m *ServiceAccount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5120,7 +5092,7 @@ var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo
func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} }
func (*ServiceAccountList) ProtoMessage() {}
func (*ServiceAccountList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{181}
+ return fileDescriptor_83c10c24ec417dc9, []int{180}
}
func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5148,7 +5120,7 @@ var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo
func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} }
func (*ServiceAccountTokenProjection) ProtoMessage() {}
func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{182}
+ return fileDescriptor_83c10c24ec417dc9, []int{181}
}
func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5176,7 +5148,7 @@ var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo
func (m *ServiceList) Reset() { *m = ServiceList{} }
func (*ServiceList) ProtoMessage() {}
func (*ServiceList) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{183}
+ return fileDescriptor_83c10c24ec417dc9, []int{182}
}
func (m *ServiceList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5204,7 +5176,7 @@ var xxx_messageInfo_ServiceList proto.InternalMessageInfo
func (m *ServicePort) Reset() { *m = ServicePort{} }
func (*ServicePort) ProtoMessage() {}
func (*ServicePort) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{184}
+ return fileDescriptor_83c10c24ec417dc9, []int{183}
}
func (m *ServicePort) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5232,7 +5204,7 @@ var xxx_messageInfo_ServicePort proto.InternalMessageInfo
func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} }
func (*ServiceProxyOptions) ProtoMessage() {}
func (*ServiceProxyOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{185}
+ return fileDescriptor_83c10c24ec417dc9, []int{184}
}
func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5260,7 +5232,7 @@ var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo
func (m *ServiceSpec) Reset() { *m = ServiceSpec{} }
func (*ServiceSpec) ProtoMessage() {}
func (*ServiceSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{186}
+ return fileDescriptor_83c10c24ec417dc9, []int{185}
}
func (m *ServiceSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5288,7 +5260,7 @@ var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo
func (m *ServiceStatus) Reset() { *m = ServiceStatus{} }
func (*ServiceStatus) ProtoMessage() {}
func (*ServiceStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{187}
+ return fileDescriptor_83c10c24ec417dc9, []int{186}
}
func (m *ServiceStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5316,7 +5288,7 @@ var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo
func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} }
func (*SessionAffinityConfig) ProtoMessage() {}
func (*SessionAffinityConfig) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{188}
+ return fileDescriptor_83c10c24ec417dc9, []int{187}
}
func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5344,7 +5316,7 @@ var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo
func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} }
func (*StorageOSPersistentVolumeSource) ProtoMessage() {}
func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{189}
+ return fileDescriptor_83c10c24ec417dc9, []int{188}
}
func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5372,7 +5344,7 @@ var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo
func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} }
func (*StorageOSVolumeSource) ProtoMessage() {}
func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{190}
+ return fileDescriptor_83c10c24ec417dc9, []int{189}
}
func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5400,7 +5372,7 @@ var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo
func (m *Sysctl) Reset() { *m = Sysctl{} }
func (*Sysctl) ProtoMessage() {}
func (*Sysctl) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{191}
+ return fileDescriptor_83c10c24ec417dc9, []int{190}
}
func (m *Sysctl) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5428,7 +5400,7 @@ var xxx_messageInfo_Sysctl proto.InternalMessageInfo
func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} }
func (*TCPSocketAction) ProtoMessage() {}
func (*TCPSocketAction) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{192}
+ return fileDescriptor_83c10c24ec417dc9, []int{191}
}
func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5456,7 +5428,7 @@ var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo
func (m *Taint) Reset() { *m = Taint{} }
func (*Taint) ProtoMessage() {}
func (*Taint) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{193}
+ return fileDescriptor_83c10c24ec417dc9, []int{192}
}
func (m *Taint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5484,7 +5456,7 @@ var xxx_messageInfo_Taint proto.InternalMessageInfo
func (m *Toleration) Reset() { *m = Toleration{} }
func (*Toleration) ProtoMessage() {}
func (*Toleration) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{194}
+ return fileDescriptor_83c10c24ec417dc9, []int{193}
}
func (m *Toleration) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5512,7 +5484,7 @@ var xxx_messageInfo_Toleration proto.InternalMessageInfo
func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} }
func (*TopologySelectorLabelRequirement) ProtoMessage() {}
func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{195}
+ return fileDescriptor_83c10c24ec417dc9, []int{194}
}
func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5540,7 +5512,7 @@ var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo
func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} }
func (*TopologySelectorTerm) ProtoMessage() {}
func (*TopologySelectorTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{196}
+ return fileDescriptor_83c10c24ec417dc9, []int{195}
}
func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5568,7 +5540,7 @@ var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo
func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} }
func (*TopologySpreadConstraint) ProtoMessage() {}
func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{197}
+ return fileDescriptor_83c10c24ec417dc9, []int{196}
}
func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5596,7 +5568,7 @@ var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo
func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} }
func (*TypedLocalObjectReference) ProtoMessage() {}
func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{198}
+ return fileDescriptor_83c10c24ec417dc9, []int{197}
}
func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5624,7 +5596,7 @@ var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo
func (m *Volume) Reset() { *m = Volume{} }
func (*Volume) ProtoMessage() {}
func (*Volume) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{199}
+ return fileDescriptor_83c10c24ec417dc9, []int{198}
}
func (m *Volume) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5652,7 +5624,7 @@ var xxx_messageInfo_Volume proto.InternalMessageInfo
func (m *VolumeDevice) Reset() { *m = VolumeDevice{} }
func (*VolumeDevice) ProtoMessage() {}
func (*VolumeDevice) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{200}
+ return fileDescriptor_83c10c24ec417dc9, []int{199}
}
func (m *VolumeDevice) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5680,7 +5652,7 @@ var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo
func (m *VolumeMount) Reset() { *m = VolumeMount{} }
func (*VolumeMount) ProtoMessage() {}
func (*VolumeMount) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{201}
+ return fileDescriptor_83c10c24ec417dc9, []int{200}
}
func (m *VolumeMount) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5708,7 +5680,7 @@ var xxx_messageInfo_VolumeMount proto.InternalMessageInfo
func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} }
func (*VolumeNodeAffinity) ProtoMessage() {}
func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{202}
+ return fileDescriptor_83c10c24ec417dc9, []int{201}
}
func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5736,7 +5708,7 @@ var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo
func (m *VolumeProjection) Reset() { *m = VolumeProjection{} }
func (*VolumeProjection) ProtoMessage() {}
func (*VolumeProjection) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{203}
+ return fileDescriptor_83c10c24ec417dc9, []int{202}
}
func (m *VolumeProjection) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5764,7 +5736,7 @@ var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo
func (m *VolumeSource) Reset() { *m = VolumeSource{} }
func (*VolumeSource) ProtoMessage() {}
func (*VolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{204}
+ return fileDescriptor_83c10c24ec417dc9, []int{203}
}
func (m *VolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5792,7 +5764,7 @@ var xxx_messageInfo_VolumeSource proto.InternalMessageInfo
func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} }
func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {}
func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{205}
+ return fileDescriptor_83c10c24ec417dc9, []int{204}
}
func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5820,7 +5792,7 @@ var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo
func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} }
func (*WeightedPodAffinityTerm) ProtoMessage() {}
func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{206}
+ return fileDescriptor_83c10c24ec417dc9, []int{205}
}
func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5848,7 +5820,7 @@ var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo
func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} }
func (*WindowsSecurityContextOptions) ProtoMessage() {}
func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) {
- return fileDescriptor_83c10c24ec417dc9, []int{207}
+ return fileDescriptor_83c10c24ec417dc9, []int{206}
}
func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -5927,7 +5899,6 @@ func init() {
proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource")
proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer")
proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon")
- proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers")
proto.RegisterType((*EphemeralVolumeSource)(nil), "k8s.io.api.core.v1.EphemeralVolumeSource")
proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event")
proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList")
@@ -6116,887 +6087,887 @@ func init() {
}
var fileDescriptor_83c10c24ec417dc9 = []byte{
- // 14066 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6b, 0x70, 0x1c, 0xd9,
- 0x75, 0x18, 0xac, 0x9e, 0xc1, 0x6b, 0x0e, 0xde, 0x17, 0x24, 0x17, 0xc4, 0x2e, 0x09, 0x6e, 0x53,
- 0xe2, 0x72, 0xb5, 0xbb, 0xa0, 0xb8, 0x0f, 0x69, 0xbd, 0x2b, 0xad, 0x05, 0x60, 0x00, 0x72, 0x96,
- 0x04, 0x38, 0x7b, 0x07, 0x24, 0x25, 0x79, 0xa5, 0x52, 0x63, 0xe6, 0x02, 0x68, 0x61, 0xa6, 0x7b,
- 0xb6, 0xbb, 0x07, 0x24, 0xf6, 0x93, 0xeb, 0xf3, 0x27, 0x3f, 0xe5, 0xc7, 0x57, 0xaa, 0x94, 0xf3,
- 0xb2, 0x5d, 0xae, 0x94, 0xe3, 0x54, 0xac, 0x38, 0x49, 0xc5, 0xb1, 0x63, 0x3b, 0x96, 0x13, 0x3b,
- 0x71, 0x1e, 0x4e, 0x7e, 0x38, 0x8e, 0x2b, 0xb1, 0x5c, 0xe5, 0x0a, 0x62, 0xd3, 0x49, 0xb9, 0xf4,
- 0x23, 0xb6, 0x13, 0x3b, 0x3f, 0x82, 0xb8, 0xe2, 0xd4, 0x7d, 0xf6, 0xbd, 0xfd, 0x98, 0x19, 0x70,
- 0x41, 0x68, 0xa5, 0xda, 0x7f, 0x33, 0xf7, 0x9c, 0x7b, 0xee, 0xed, 0xfb, 0x3c, 0xf7, 0x3c, 0xe1,
- 0xd5, 0xdd, 0x97, 0xc3, 0x05, 0xd7, 0xbf, 0xb2, 0xdb, 0xd9, 0x24, 0x81, 0x47, 0x22, 0x12, 0x5e,
- 0xd9, 0x23, 0x5e, 0xc3, 0x0f, 0xae, 0x08, 0x80, 0xd3, 0x76, 0xaf, 0xd4, 0xfd, 0x80, 0x5c, 0xd9,
- 0xbb, 0x7a, 0x65, 0x9b, 0x78, 0x24, 0x70, 0x22, 0xd2, 0x58, 0x68, 0x07, 0x7e, 0xe4, 0x23, 0xc4,
- 0x71, 0x16, 0x9c, 0xb6, 0xbb, 0x40, 0x71, 0x16, 0xf6, 0xae, 0xce, 0x3d, 0xb7, 0xed, 0x46, 0x3b,
- 0x9d, 0xcd, 0x85, 0xba, 0xdf, 0xba, 0xb2, 0xed, 0x6f, 0xfb, 0x57, 0x18, 0xea, 0x66, 0x67, 0x8b,
- 0xfd, 0x63, 0x7f, 0xd8, 0x2f, 0x4e, 0x62, 0xee, 0xc5, 0xb8, 0x99, 0x96, 0x53, 0xdf, 0x71, 0x3d,
- 0x12, 0xec, 0x5f, 0x69, 0xef, 0x6e, 0xb3, 0x76, 0x03, 0x12, 0xfa, 0x9d, 0xa0, 0x4e, 0x92, 0x0d,
- 0x77, 0xad, 0x15, 0x5e, 0x69, 0x91, 0xc8, 0xc9, 0xe8, 0xee, 0xdc, 0x95, 0xbc, 0x5a, 0x41, 0xc7,
- 0x8b, 0xdc, 0x56, 0xba, 0x99, 0x0f, 0xf7, 0xaa, 0x10, 0xd6, 0x77, 0x48, 0xcb, 0x49, 0xd5, 0x7b,
- 0x21, 0xaf, 0x5e, 0x27, 0x72, 0x9b, 0x57, 0x5c, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, 0xaa,
- 0x05, 0x17, 0x16, 0xef, 0xd6, 0x56, 0x9a, 0x4e, 0x18, 0xb9, 0xf5, 0xa5, 0xa6, 0x5f, 0xdf, 0xad,
- 0x45, 0x7e, 0x40, 0xee, 0xf8, 0xcd, 0x4e, 0x8b, 0xd4, 0xd8, 0x40, 0xa0, 0x67, 0x61, 0x64, 0x8f,
- 0xfd, 0xaf, 0x94, 0x67, 0xad, 0x0b, 0xd6, 0xe5, 0xd2, 0xd2, 0xd4, 0xaf, 0x1f, 0xcc, 0xbf, 0xef,
- 0xc1, 0xc1, 0xfc, 0xc8, 0x1d, 0x51, 0x8e, 0x15, 0x06, 0xba, 0x04, 0x43, 0x5b, 0xe1, 0xc6, 0x7e,
- 0x9b, 0xcc, 0x16, 0x18, 0xee, 0x84, 0xc0, 0x1d, 0x5a, 0xad, 0xd1, 0x52, 0x2c, 0xa0, 0xe8, 0x0a,
- 0x94, 0xda, 0x4e, 0x10, 0xb9, 0x91, 0xeb, 0x7b, 0xb3, 0xc5, 0x0b, 0xd6, 0xe5, 0xc1, 0xa5, 0x69,
- 0x81, 0x5a, 0xaa, 0x4a, 0x00, 0x8e, 0x71, 0x68, 0x37, 0x02, 0xe2, 0x34, 0x6e, 0x79, 0xcd, 0xfd,
- 0xd9, 0x81, 0x0b, 0xd6, 0xe5, 0x91, 0xb8, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x48, 0x01,
- 0x46, 0x16, 0xb7, 0xb6, 0x5c, 0xcf, 0x8d, 0xf6, 0xd1, 0x1d, 0x18, 0xf3, 0xfc, 0x06, 0x91, 0xff,
- 0xd9, 0x57, 0x8c, 0x3e, 0x7f, 0x61, 0x21, 0xbd, 0x94, 0x16, 0xd6, 0x35, 0xbc, 0xa5, 0xa9, 0x07,
- 0x07, 0xf3, 0x63, 0x7a, 0x09, 0x36, 0xe8, 0x20, 0x0c, 0xa3, 0x6d, 0xbf, 0xa1, 0xc8, 0x16, 0x18,
- 0xd9, 0xf9, 0x2c, 0xb2, 0xd5, 0x18, 0x6d, 0x69, 0xf2, 0xc1, 0xc1, 0xfc, 0xa8, 0x56, 0x80, 0x75,
- 0x22, 0x68, 0x13, 0x26, 0xe9, 0x5f, 0x2f, 0x72, 0x15, 0xdd, 0x22, 0xa3, 0x7b, 0x31, 0x8f, 0xae,
- 0x86, 0xba, 0x34, 0xf3, 0xe0, 0x60, 0x7e, 0x32, 0x51, 0x88, 0x93, 0x04, 0xed, 0xb7, 0x61, 0x62,
- 0x31, 0x8a, 0x9c, 0xfa, 0x0e, 0x69, 0xf0, 0x19, 0x44, 0x2f, 0xc2, 0x80, 0xe7, 0xb4, 0x88, 0x98,
- 0xdf, 0x0b, 0x62, 0x60, 0x07, 0xd6, 0x9d, 0x16, 0x39, 0x3c, 0x98, 0x9f, 0xba, 0xed, 0xb9, 0x6f,
- 0x75, 0xc4, 0xaa, 0xa0, 0x65, 0x98, 0x61, 0xa3, 0xe7, 0x01, 0x1a, 0x64, 0xcf, 0xad, 0x93, 0xaa,
- 0x13, 0xed, 0x88, 0xf9, 0x46, 0xa2, 0x2e, 0x94, 0x15, 0x04, 0x6b, 0x58, 0xf6, 0x7d, 0x28, 0x2d,
- 0xee, 0xf9, 0x6e, 0xa3, 0xea, 0x37, 0x42, 0xb4, 0x0b, 0x93, 0xed, 0x80, 0x6c, 0x91, 0x40, 0x15,
- 0xcd, 0x5a, 0x17, 0x8a, 0x97, 0x47, 0x9f, 0xbf, 0x9c, 0xf9, 0xb1, 0x26, 0xea, 0x8a, 0x17, 0x05,
- 0xfb, 0x4b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0x8b, 0x02, 0x9c, 0x5e,
- 0x7c, 0xbb, 0x13, 0x90, 0xb2, 0x1b, 0xee, 0x26, 0x57, 0x78, 0xc3, 0x0d, 0x77, 0xd7, 0xe3, 0x11,
- 0x50, 0x4b, 0xab, 0x2c, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc1, 0x30, 0xfd, 0x7d, 0x1b, 0x57, 0xc4,
- 0x27, 0xcf, 0x08, 0xe4, 0xd1, 0xb2, 0x13, 0x39, 0x65, 0x0e, 0xc2, 0x12, 0x07, 0xad, 0xc1, 0x68,
- 0x9d, 0x6d, 0xc8, 0xed, 0x35, 0xbf, 0x41, 0xd8, 0x64, 0x96, 0x96, 0x9e, 0xa1, 0xe8, 0xcb, 0x71,
- 0xf1, 0xe1, 0xc1, 0xfc, 0x2c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f,
- 0x0d, 0x30, 0x4a, 0x90, 0xb1, 0xb7, 0x2e, 0x6b, 0x5b, 0x65, 0x90, 0x6d, 0x95, 0xb1, 0xec, 0x6d,
- 0x82, 0xae, 0xc2, 0xc0, 0xae, 0xeb, 0x35, 0x66, 0x87, 0x18, 0xad, 0x73, 0x74, 0xce, 0x6f, 0xb8,
- 0x5e, 0xe3, 0xf0, 0x60, 0x7e, 0xda, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x53, 0x0b, 0xe6,
- 0x19, 0x6c, 0xd5, 0x6d, 0x92, 0x2a, 0x09, 0x42, 0x37, 0x8c, 0x88, 0x17, 0x19, 0x03, 0xfa, 0x3c,
- 0x40, 0x48, 0xea, 0x01, 0x89, 0xb4, 0x21, 0x55, 0x0b, 0xa3, 0xa6, 0x20, 0x58, 0xc3, 0xa2, 0x07,
- 0x42, 0xb8, 0xe3, 0x04, 0x6c, 0x7d, 0x89, 0x81, 0x55, 0x07, 0x42, 0x4d, 0x02, 0x70, 0x8c, 0x63,
- 0x1c, 0x08, 0xc5, 0x5e, 0x07, 0x02, 0xfa, 0x18, 0x4c, 0xc6, 0x8d, 0x85, 0x6d, 0xa7, 0x2e, 0x07,
- 0x90, 0x6d, 0x99, 0x9a, 0x09, 0xc2, 0x49, 0x5c, 0xfb, 0xef, 0x58, 0x62, 0xf1, 0xd0, 0xaf, 0x7e,
- 0x97, 0x7f, 0xab, 0xfd, 0x8b, 0x16, 0x0c, 0x2f, 0xb9, 0x5e, 0xc3, 0xf5, 0xb6, 0xd1, 0x67, 0x61,
- 0x84, 0xde, 0x4d, 0x0d, 0x27, 0x72, 0xc4, 0xb9, 0xf7, 0x21, 0x6d, 0x6f, 0xa9, 0xab, 0x62, 0xa1,
- 0xbd, 0xbb, 0x4d, 0x0b, 0xc2, 0x05, 0x8a, 0x4d, 0x77, 0xdb, 0xad, 0xcd, 0xcf, 0x91, 0x7a, 0xb4,
- 0x46, 0x22, 0x27, 0xfe, 0x9c, 0xb8, 0x0c, 0x2b, 0xaa, 0xe8, 0x06, 0x0c, 0x45, 0x4e, 0xb0, 0x4d,
- 0x22, 0x71, 0x00, 0x66, 0x1e, 0x54, 0xbc, 0x26, 0xa6, 0x3b, 0x92, 0x78, 0x75, 0x12, 0x5f, 0x0b,
- 0x1b, 0xac, 0x2a, 0x16, 0x24, 0xec, 0x1f, 0x1a, 0x86, 0xb3, 0xcb, 0xb5, 0x4a, 0xce, 0xba, 0xba,
- 0x04, 0x43, 0x8d, 0xc0, 0xdd, 0x23, 0x81, 0x18, 0x67, 0x45, 0xa5, 0xcc, 0x4a, 0xb1, 0x80, 0xa2,
- 0x97, 0x61, 0x8c, 0x5f, 0x48, 0xd7, 0x1d, 0xaf, 0xd1, 0x94, 0x43, 0x7c, 0x4a, 0x60, 0x8f, 0xdd,
- 0xd1, 0x60, 0xd8, 0xc0, 0x3c, 0xe2, 0xa2, 0xba, 0x94, 0xd8, 0x8c, 0x79, 0x97, 0xdd, 0x17, 0x2d,
- 0x98, 0xe2, 0xcd, 0x2c, 0x46, 0x51, 0xe0, 0x6e, 0x76, 0x22, 0x12, 0xce, 0x0e, 0xb2, 0x93, 0x6e,
- 0x39, 0x6b, 0xb4, 0x72, 0x47, 0x60, 0xe1, 0x4e, 0x82, 0x0a, 0x3f, 0x04, 0x67, 0x45, 0xbb, 0x53,
- 0x49, 0x30, 0x4e, 0x35, 0x8b, 0xbe, 0xd3, 0x82, 0xb9, 0xba, 0xef, 0x45, 0x81, 0xdf, 0x6c, 0x92,
- 0xa0, 0xda, 0xd9, 0x6c, 0xba, 0xe1, 0x0e, 0x5f, 0xa7, 0x98, 0x6c, 0xb1, 0x93, 0x20, 0x67, 0x0e,
- 0x15, 0x92, 0x98, 0xc3, 0xf3, 0x0f, 0x0e, 0xe6, 0xe7, 0x96, 0x73, 0x49, 0xe1, 0x2e, 0xcd, 0xa0,
- 0x5d, 0x40, 0xf4, 0x2a, 0xad, 0x45, 0xce, 0x36, 0x89, 0x1b, 0x1f, 0xee, 0xbf, 0xf1, 0x33, 0x0f,
- 0x0e, 0xe6, 0xd1, 0x7a, 0x8a, 0x04, 0xce, 0x20, 0x8b, 0xde, 0x82, 0x53, 0xb4, 0x34, 0xf5, 0xad,
- 0x23, 0xfd, 0x37, 0x37, 0xfb, 0xe0, 0x60, 0xfe, 0xd4, 0x7a, 0x06, 0x11, 0x9c, 0x49, 0x1a, 0x7d,
- 0x87, 0x05, 0x67, 0xe3, 0xcf, 0x5f, 0xb9, 0xdf, 0x76, 0xbc, 0x46, 0xdc, 0x70, 0xa9, 0xff, 0x86,
- 0xe9, 0x99, 0x7c, 0x76, 0x39, 0x8f, 0x12, 0xce, 0x6f, 0x64, 0x6e, 0x19, 0x4e, 0x67, 0xae, 0x16,
- 0x34, 0x05, 0xc5, 0x5d, 0xc2, 0xb9, 0xa0, 0x12, 0xa6, 0x3f, 0xd1, 0x29, 0x18, 0xdc, 0x73, 0x9a,
- 0x1d, 0xb1, 0x51, 0x30, 0xff, 0xf3, 0x4a, 0xe1, 0x65, 0xcb, 0xfe, 0x97, 0x45, 0x98, 0x5c, 0xae,
- 0x55, 0x1e, 0x6a, 0x17, 0xea, 0xd7, 0x50, 0xa1, 0xeb, 0x35, 0x14, 0x5f, 0x6a, 0xc5, 0xdc, 0x4b,
- 0xed, 0xff, 0xcd, 0xd8, 0x42, 0x03, 0x6c, 0x0b, 0x7d, 0x4b, 0xce, 0x16, 0x3a, 0xe6, 0x8d, 0xb3,
- 0x97, 0xb3, 0x8a, 0x06, 0xd9, 0x64, 0x66, 0x72, 0x2c, 0x37, 0xfd, 0xba, 0xd3, 0x4c, 0x1e, 0x7d,
- 0x47, 0x5c, 0x4a, 0xc7, 0x33, 0x8f, 0x75, 0x18, 0x5b, 0x76, 0xda, 0xce, 0xa6, 0xdb, 0x74, 0x23,
- 0x97, 0x84, 0xe8, 0x29, 0x28, 0x3a, 0x8d, 0x06, 0xe3, 0xb6, 0x4a, 0x4b, 0xa7, 0x1f, 0x1c, 0xcc,
- 0x17, 0x17, 0x1b, 0xf4, 0xda, 0x07, 0x85, 0xb5, 0x8f, 0x29, 0x06, 0xfa, 0x20, 0x0c, 0x34, 0x02,
- 0xbf, 0x3d, 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x94, 0x03, 0xbf, 0x9d, 0x40, 0x65, 0x38, 0xf6,
- 0xaf, 0x16, 0xe0, 0x89, 0x65, 0xd2, 0xde, 0x59, 0xad, 0xe5, 0x9c, 0xdf, 0x97, 0x61, 0xa4, 0xe5,
- 0x7b, 0x6e, 0xe4, 0x07, 0xa1, 0x68, 0x9a, 0xad, 0x88, 0x35, 0x51, 0x86, 0x15, 0x14, 0x5d, 0x80,
- 0x81, 0x76, 0xcc, 0x54, 0x8e, 0x49, 0x86, 0x94, 0xb1, 0x93, 0x0c, 0x42, 0x31, 0x3a, 0x21, 0x09,
- 0xc4, 0x8a, 0x51, 0x18, 0xb7, 0x43, 0x12, 0x60, 0x06, 0x89, 0x6f, 0x66, 0x7a, 0x67, 0x8b, 0x13,
- 0x3a, 0x71, 0x33, 0x53, 0x08, 0xd6, 0xb0, 0x50, 0x15, 0x4a, 0x61, 0x62, 0x66, 0xfb, 0xda, 0xa6,
- 0xe3, 0xec, 0xea, 0x56, 0x33, 0x19, 0x13, 0x31, 0x6e, 0x94, 0xa1, 0x9e, 0x57, 0xf7, 0x57, 0x0a,
- 0x80, 0xf8, 0x10, 0x7e, 0x83, 0x0d, 0xdc, 0xed, 0xf4, 0xc0, 0xf5, 0xbf, 0x25, 0x8e, 0x6b, 0xf4,
- 0xfe, 0xcc, 0x82, 0x27, 0x96, 0x5d, 0xaf, 0x41, 0x82, 0x9c, 0x05, 0xf8, 0x68, 0xde, 0xb2, 0x47,
- 0x63, 0x1a, 0x8c, 0x25, 0x36, 0x70, 0x0c, 0x4b, 0xcc, 0xfe, 0x63, 0x0b, 0x10, 0xff, 0xec, 0x77,
- 0xdd, 0xc7, 0xde, 0x4e, 0x7f, 0xec, 0x31, 0x2c, 0x0b, 0xfb, 0x26, 0x4c, 0x2c, 0x37, 0x5d, 0xe2,
- 0x45, 0x95, 0xea, 0xb2, 0xef, 0x6d, 0xb9, 0xdb, 0xe8, 0x15, 0x98, 0x88, 0xdc, 0x16, 0xf1, 0x3b,
- 0x51, 0x8d, 0xd4, 0x7d, 0x8f, 0xbd, 0x24, 0xad, 0xcb, 0x83, 0x4b, 0xe8, 0xc1, 0xc1, 0xfc, 0xc4,
- 0x86, 0x01, 0xc1, 0x09, 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xad, 0xb6, 0xef, 0x11, 0x2f, 0x5a,
- 0xf6, 0xbd, 0x06, 0x97, 0x38, 0xbc, 0x02, 0x03, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x97, 0xe4, 0x46,
- 0xa1, 0xa3, 0x70, 0x78, 0x30, 0x7f, 0x26, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x05, 0x86,
- 0xc2, 0xc8, 0x89, 0x3a, 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x58, 0xe9, 0xe1, 0xc1, 0xfc,
- 0xa4, 0xaa, 0xc6, 0x8b, 0xb0, 0xa8, 0x80, 0x9e, 0x86, 0xe1, 0x16, 0x09, 0x43, 0x67, 0x5b, 0xde,
- 0x86, 0x93, 0xa2, 0xee, 0xf0, 0x1a, 0x2f, 0xc6, 0x12, 0x8e, 0x2e, 0xc2, 0x20, 0x09, 0x02, 0x3f,
- 0x10, 0x7b, 0x74, 0x5c, 0x20, 0x0e, 0xae, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb3, 0x60, 0x52,
- 0xf5, 0x95, 0xb7, 0x75, 0x02, 0xaf, 0x82, 0x4f, 0x01, 0xd4, 0xe5, 0x07, 0x86, 0xec, 0xf6, 0x18,
- 0x7d, 0xfe, 0x52, 0xe6, 0x45, 0x9d, 0x1a, 0xc6, 0x98, 0xb2, 0x2a, 0x0a, 0xb1, 0x46, 0xcd, 0xfe,
- 0x27, 0x16, 0xcc, 0x24, 0xbe, 0xe8, 0xa6, 0x1b, 0x46, 0xe8, 0xcd, 0xd4, 0x57, 0x2d, 0xf4, 0xf7,
- 0x55, 0xb4, 0x36, 0xfb, 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0xd7, 0x61, 0xd0, 0x8d, 0x48,
- 0x4b, 0x7e, 0xcc, 0xc5, 0xae, 0x1f, 0xc3, 0x7b, 0x15, 0xcf, 0x48, 0x85, 0xd6, 0xc4, 0x9c, 0x80,
- 0xfd, 0xab, 0x45, 0x28, 0xf1, 0x65, 0xbb, 0xe6, 0xb4, 0x4f, 0x60, 0x2e, 0x9e, 0x81, 0x92, 0xdb,
- 0x6a, 0x75, 0x22, 0x67, 0x53, 0x1c, 0xe7, 0x23, 0x7c, 0x6b, 0x55, 0x64, 0x21, 0x8e, 0xe1, 0xa8,
- 0x02, 0x03, 0xac, 0x2b, 0xfc, 0x2b, 0x9f, 0xca, 0xfe, 0x4a, 0xd1, 0xf7, 0x85, 0xb2, 0x13, 0x39,
- 0x9c, 0x93, 0x52, 0xf7, 0x08, 0x2d, 0xc2, 0x8c, 0x04, 0x72, 0x00, 0x36, 0x5d, 0xcf, 0x09, 0xf6,
- 0x69, 0xd9, 0x6c, 0x91, 0x11, 0x7c, 0xae, 0x3b, 0xc1, 0x25, 0x85, 0xcf, 0xc9, 0xaa, 0x0f, 0x8b,
- 0x01, 0x58, 0x23, 0x3a, 0xf7, 0x11, 0x28, 0x29, 0xe4, 0xa3, 0x30, 0x44, 0x73, 0x1f, 0x83, 0xc9,
- 0x44, 0x5b, 0xbd, 0xaa, 0x8f, 0xe9, 0xfc, 0xd4, 0x2f, 0xb1, 0x23, 0x43, 0xf4, 0x7a, 0xc5, 0xdb,
- 0x13, 0x47, 0xee, 0xdb, 0x70, 0xaa, 0x99, 0x71, 0x92, 0x89, 0x79, 0xed, 0xff, 0xe4, 0x7b, 0x42,
- 0x7c, 0xf6, 0xa9, 0x2c, 0x28, 0xce, 0x6c, 0x83, 0xf2, 0x08, 0x7e, 0x9b, 0x6e, 0x10, 0xa7, 0xa9,
- 0xb3, 0xdb, 0xb7, 0x44, 0x19, 0x56, 0x50, 0x7a, 0xde, 0x9d, 0x52, 0x9d, 0xbf, 0x41, 0xf6, 0x6b,
- 0xa4, 0x49, 0xea, 0x91, 0x1f, 0x7c, 0x5d, 0xbb, 0x7f, 0x8e, 0x8f, 0x3e, 0x3f, 0x2e, 0x47, 0x05,
- 0x81, 0xe2, 0x0d, 0xb2, 0xcf, 0xa7, 0x42, 0xff, 0xba, 0x62, 0xd7, 0xaf, 0xfb, 0x19, 0x0b, 0xc6,
- 0xd5, 0xd7, 0x9d, 0xc0, 0xb9, 0xb0, 0x64, 0x9e, 0x0b, 0xe7, 0xba, 0x2e, 0xf0, 0x9c, 0x13, 0xe1,
- 0x2b, 0x05, 0x38, 0xab, 0x70, 0xe8, 0xdb, 0x80, 0xff, 0x11, 0xab, 0xea, 0x0a, 0x94, 0x3c, 0x25,
- 0xb5, 0xb2, 0x4c, 0x71, 0x51, 0x2c, 0xb3, 0x8a, 0x71, 0x28, 0x8b, 0xe7, 0xc5, 0xa2, 0xa5, 0x31,
- 0x5d, 0x9c, 0x2b, 0x44, 0xb7, 0x4b, 0x50, 0xec, 0xb8, 0x0d, 0x71, 0xc1, 0x7c, 0x48, 0x8e, 0xf6,
- 0xed, 0x4a, 0xf9, 0xf0, 0x60, 0xfe, 0xc9, 0x3c, 0x55, 0x02, 0xbd, 0xd9, 0xc2, 0x85, 0xdb, 0x95,
- 0x32, 0xa6, 0x95, 0xd1, 0x22, 0x4c, 0x4a, 0x6d, 0xc9, 0x1d, 0xca, 0x6e, 0xf9, 0x9e, 0xb8, 0x87,
- 0x94, 0x4c, 0x16, 0x9b, 0x60, 0x9c, 0xc4, 0x47, 0x65, 0x98, 0xda, 0xed, 0x6c, 0x92, 0x26, 0x89,
- 0xf8, 0x07, 0xdf, 0x20, 0x5c, 0x62, 0x59, 0x8a, 0x5f, 0x66, 0x37, 0x12, 0x70, 0x9c, 0xaa, 0x61,
- 0xff, 0x05, 0xbb, 0x0f, 0xc4, 0xe8, 0x55, 0x03, 0x9f, 0x2e, 0x2c, 0x4a, 0xfd, 0xeb, 0xb9, 0x9c,
- 0xfb, 0x59, 0x15, 0x37, 0xc8, 0xfe, 0x86, 0x4f, 0x39, 0xf3, 0xec, 0x55, 0x61, 0xac, 0xf9, 0x81,
- 0xae, 0x6b, 0xfe, 0xe7, 0x0a, 0x70, 0x5a, 0x8d, 0x80, 0xc1, 0x04, 0x7e, 0xa3, 0x8f, 0xc1, 0x55,
- 0x18, 0x6d, 0x90, 0x2d, 0xa7, 0xd3, 0x8c, 0x94, 0xf8, 0x7c, 0x90, 0xab, 0x50, 0xca, 0x71, 0x31,
- 0xd6, 0x71, 0x8e, 0x30, 0x6c, 0xff, 0x73, 0x94, 0x5d, 0xc4, 0x91, 0x43, 0xd7, 0xb8, 0xda, 0x35,
- 0x56, 0xee, 0xae, 0xb9, 0x08, 0x83, 0x6e, 0x8b, 0x32, 0x66, 0x05, 0x93, 0xdf, 0xaa, 0xd0, 0x42,
- 0xcc, 0x61, 0xe8, 0x03, 0x30, 0x5c, 0xf7, 0x5b, 0x2d, 0xc7, 0x6b, 0xb0, 0x2b, 0xaf, 0xb4, 0x34,
- 0x4a, 0x79, 0xb7, 0x65, 0x5e, 0x84, 0x25, 0x0c, 0x3d, 0x01, 0x03, 0x4e, 0xb0, 0xcd, 0x65, 0x18,
- 0xa5, 0xa5, 0x11, 0xda, 0xd2, 0x62, 0xb0, 0x1d, 0x62, 0x56, 0x4a, 0x9f, 0x60, 0xf7, 0xfc, 0x60,
- 0xd7, 0xf5, 0xb6, 0xcb, 0x6e, 0x20, 0xb6, 0x84, 0xba, 0x0b, 0xef, 0x2a, 0x08, 0xd6, 0xb0, 0xd0,
- 0x2a, 0x0c, 0xb6, 0xfd, 0x20, 0x0a, 0x67, 0x87, 0xd8, 0x70, 0x3f, 0x99, 0x73, 0x10, 0xf1, 0xaf,
- 0xad, 0xfa, 0x41, 0x14, 0x7f, 0x00, 0xfd, 0x17, 0x62, 0x5e, 0x1d, 0xdd, 0x84, 0x61, 0xe2, 0xed,
- 0xad, 0x06, 0x7e, 0x6b, 0x76, 0x26, 0x9f, 0xd2, 0x0a, 0x47, 0xe1, 0xcb, 0x2c, 0xe6, 0x51, 0x45,
- 0x31, 0x96, 0x24, 0xd0, 0xb7, 0x40, 0x91, 0x78, 0x7b, 0xb3, 0xc3, 0x8c, 0xd2, 0x5c, 0x0e, 0xa5,
- 0x3b, 0x4e, 0x10, 0x9f, 0xf9, 0x2b, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0x27, 0xa1, 0x24, 0x0f, 0x8c,
- 0x50, 0x08, 0xeb, 0x32, 0x17, 0xac, 0x3c, 0x66, 0x30, 0x79, 0xab, 0xe3, 0x06, 0xa4, 0x45, 0xbc,
- 0x28, 0x8c, 0x4f, 0x48, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x52, 0x4a, 0x88, 0xd7, 0xfc, 0x8e,
- 0x17, 0x85, 0xb3, 0x25, 0xd6, 0xbd, 0x4c, 0xdd, 0xdd, 0x9d, 0x18, 0x2f, 0x29, 0x42, 0xe6, 0x95,
- 0xb1, 0x41, 0x0a, 0x7d, 0x1a, 0xc6, 0xf9, 0x7f, 0xae, 0x01, 0x0b, 0x67, 0x4f, 0x33, 0xda, 0x17,
- 0xf2, 0x69, 0x73, 0xc4, 0xa5, 0xd3, 0x82, 0xf8, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x1a, 0xc2, 0x30,
- 0xde, 0x74, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x1a, 0xf8, 0x9b, 0x64, 0x16, 0xd8, 0xc0, 0x9c, 0xcd,
- 0xd6, 0x98, 0xf9, 0x9b, 0x64, 0x69, 0x9a, 0xd2, 0xbc, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b,
- 0x26, 0xe8, 0x8b, 0xcd, 0x8d, 0x89, 0x8e, 0xf6, 0x22, 0xca, 0xde, 0x55, 0xd8, 0xa8, 0x84, 0x13,
- 0x44, 0xd0, 0x2d, 0x18, 0x0b, 0x23, 0x27, 0x88, 0x3a, 0x6d, 0x4e, 0xf4, 0x4c, 0x2f, 0xa2, 0x4c,
- 0xe1, 0x5a, 0xd3, 0xaa, 0x60, 0x83, 0x00, 0x7a, 0x1d, 0x4a, 0x4d, 0x77, 0x8b, 0xd4, 0xf7, 0xeb,
- 0x4d, 0x32, 0x3b, 0xc6, 0xa8, 0x65, 0x1e, 0x2a, 0x37, 0x25, 0x12, 0xe7, 0x73, 0xd5, 0x5f, 0x1c,
- 0x57, 0x47, 0x77, 0xe0, 0x4c, 0x44, 0x82, 0x96, 0xeb, 0x39, 0xf4, 0x30, 0x10, 0x4f, 0x2b, 0xa6,
- 0xc8, 0x1c, 0x67, 0xbb, 0xed, 0xbc, 0x98, 0x8d, 0x33, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba,
- 0x0f, 0xb3, 0x19, 0x10, 0xbf, 0xe9, 0xd6, 0xf7, 0x67, 0x4f, 0x31, 0xca, 0x1f, 0x15, 0x94, 0x67,
- 0x37, 0x72, 0xf0, 0x0e, 0xbb, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x0b, 0x26, 0xd9, 0x09, 0x54, 0xed,
- 0x34, 0x9b, 0xa2, 0xc1, 0x09, 0xd6, 0xe0, 0x07, 0xe4, 0x7d, 0x5c, 0x31, 0xc1, 0x87, 0x07, 0xf3,
- 0x10, 0xff, 0xc3, 0xc9, 0xda, 0x68, 0x93, 0xe9, 0xcc, 0x3a, 0x81, 0x1b, 0xed, 0xd3, 0x73, 0x83,
- 0xdc, 0x8f, 0x66, 0x27, 0xbb, 0xca, 0x2b, 0x74, 0x54, 0xa5, 0x58, 0xd3, 0x0b, 0x71, 0x92, 0x20,
- 0x3d, 0x52, 0xc3, 0xa8, 0xe1, 0x7a, 0xb3, 0x53, 0xfc, 0x5d, 0x22, 0x4f, 0xa4, 0x1a, 0x2d, 0xc4,
- 0x1c, 0xc6, 0xf4, 0x65, 0xf4, 0xc7, 0x2d, 0x7a, 0x73, 0x4d, 0x33, 0xc4, 0x58, 0x5f, 0x26, 0x01,
- 0x38, 0xc6, 0xa1, 0xcc, 0x64, 0x14, 0xed, 0xcf, 0x22, 0x86, 0xaa, 0x0e, 0x96, 0x8d, 0x8d, 0x4f,
- 0x62, 0x5a, 0x6e, 0x6f, 0xc2, 0x84, 0x3a, 0x08, 0xd9, 0x98, 0xa0, 0x79, 0x18, 0x64, 0xec, 0x93,
- 0x90, 0xae, 0x95, 0x68, 0x17, 0x18, 0x6b, 0x85, 0x79, 0x39, 0xeb, 0x82, 0xfb, 0x36, 0x59, 0xda,
- 0x8f, 0x08, 0x7f, 0xd3, 0x17, 0xb5, 0x2e, 0x48, 0x00, 0x8e, 0x71, 0xec, 0xff, 0xc3, 0xd9, 0xd0,
- 0xf8, 0xb4, 0xed, 0xe3, 0x7e, 0x79, 0x16, 0x46, 0x76, 0xfc, 0x30, 0xa2, 0xd8, 0xac, 0x8d, 0xc1,
- 0x98, 0xf1, 0xbc, 0x2e, 0xca, 0xb1, 0xc2, 0x40, 0xaf, 0xc2, 0x78, 0x5d, 0x6f, 0x40, 0x5c, 0x8e,
- 0xea, 0x18, 0x31, 0x5a, 0xc7, 0x26, 0x2e, 0x7a, 0x19, 0x46, 0x98, 0x0d, 0x48, 0xdd, 0x6f, 0x0a,
- 0xae, 0x4d, 0xde, 0xf0, 0x23, 0x55, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x82, 0x21,
- 0xda, 0x85, 0x4a, 0x55, 0x5c, 0x4b, 0x4a, 0x50, 0x74, 0x9d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x52,
- 0x41, 0x1b, 0x65, 0xfa, 0x1e, 0x26, 0xa8, 0x0a, 0xc3, 0xf7, 0x1c, 0x37, 0x72, 0xbd, 0x6d, 0xc1,
- 0x7f, 0x3c, 0xdd, 0xf5, 0x8e, 0x62, 0x95, 0xee, 0xf2, 0x0a, 0xfc, 0x16, 0x15, 0x7f, 0xb0, 0x24,
- 0x43, 0x29, 0x06, 0x1d, 0xcf, 0xa3, 0x14, 0x0b, 0xfd, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe,
- 0x60, 0x49, 0x06, 0xbd, 0x09, 0x20, 0x77, 0x18, 0x69, 0x08, 0xdb, 0x8b, 0x67, 0x7b, 0x13, 0xdd,
- 0x50, 0x75, 0x96, 0x26, 0xe8, 0x1d, 0x1d, 0xff, 0xc7, 0x1a, 0x3d, 0x3b, 0x62, 0x7c, 0x5a, 0xba,
- 0x33, 0xe8, 0xdb, 0xe8, 0x12, 0x77, 0x82, 0x88, 0x34, 0x16, 0x23, 0x31, 0x38, 0x1f, 0xec, 0xef,
- 0x91, 0xb2, 0xe1, 0xb6, 0x88, 0xbe, 0x1d, 0x04, 0x11, 0x1c, 0xd3, 0xb3, 0x7f, 0xa1, 0x08, 0xb3,
- 0x79, 0xdd, 0xa5, 0x8b, 0x8e, 0xdc, 0x77, 0xa3, 0x65, 0xca, 0x5e, 0x59, 0xe6, 0xa2, 0x5b, 0x11,
- 0xe5, 0x58, 0x61, 0xd0, 0xd9, 0x0f, 0xdd, 0x6d, 0xf9, 0xc6, 0x1c, 0x8c, 0x67, 0xbf, 0xc6, 0x4a,
- 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, 0xa1, 0x30, 0xee, 0xd1, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40,
- 0x75, 0x69, 0xd7, 0x40, 0x0f, 0x69, 0x97, 0x31, 0x44, 0x83, 0xc7, 0x3b, 0x44, 0xe8, 0x33, 0x00,
- 0x5b, 0xae, 0xe7, 0x86, 0x3b, 0x8c, 0xfa, 0xd0, 0x91, 0xa9, 0x2b, 0xe6, 0x6c, 0x55, 0x51, 0xc1,
- 0x1a, 0x45, 0xf4, 0x12, 0x8c, 0xaa, 0x0d, 0x58, 0x29, 0x33, 0x4d, 0xa7, 0x66, 0x39, 0x12, 0x9f,
- 0x46, 0x65, 0xac, 0xe3, 0xd9, 0x9f, 0x4b, 0xae, 0x17, 0xb1, 0x03, 0xb4, 0xf1, 0xb5, 0xfa, 0x1d,
- 0xdf, 0x42, 0xf7, 0xf1, 0xb5, 0xbf, 0x56, 0x84, 0x49, 0xa3, 0xb1, 0x4e, 0xd8, 0xc7, 0x99, 0x75,
- 0x8d, 0x1e, 0xe0, 0x4e, 0x44, 0xc4, 0xfe, 0xb3, 0x7b, 0x6f, 0x15, 0xfd, 0x90, 0xa7, 0x3b, 0x80,
- 0xd7, 0x47, 0x9f, 0x81, 0x52, 0xd3, 0x09, 0x99, 0xe4, 0x8c, 0x88, 0x7d, 0xd7, 0x0f, 0xb1, 0xf8,
- 0x61, 0xe2, 0x84, 0x91, 0x76, 0x6b, 0x72, 0xda, 0x31, 0x49, 0x7a, 0xd3, 0x50, 0xfe, 0x44, 0x5a,
- 0x8f, 0xa9, 0x4e, 0x50, 0x26, 0x66, 0x1f, 0x73, 0x18, 0x7a, 0x19, 0xc6, 0x02, 0xc2, 0x56, 0xc5,
- 0x32, 0xe5, 0xe6, 0xd8, 0x32, 0x1b, 0x8c, 0xd9, 0x3e, 0xac, 0xc1, 0xb0, 0x81, 0x19, 0xbf, 0x0d,
- 0x86, 0xba, 0xbc, 0x0d, 0x9e, 0x86, 0x61, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x15, 0x5e, 0x8c,
- 0x25, 0x3c, 0xb9, 0x60, 0x46, 0xfa, 0x5b, 0x30, 0xf4, 0xf5, 0x21, 0x16, 0x35, 0xd3, 0x32, 0x8f,
- 0xf0, 0x53, 0x4e, 0x2c, 0x79, 0x2c, 0x61, 0xf6, 0x07, 0x61, 0xa2, 0xec, 0x90, 0x96, 0xef, 0xad,
- 0x78, 0x8d, 0xb6, 0xef, 0x7a, 0x11, 0x9a, 0x85, 0x01, 0x76, 0x89, 0xf0, 0x23, 0x60, 0x80, 0x36,
- 0x84, 0x59, 0x89, 0xbd, 0x0d, 0xa7, 0xcb, 0xfe, 0x3d, 0xef, 0x9e, 0x13, 0x34, 0x16, 0xab, 0x15,
- 0xed, 0x7d, 0xbd, 0x2e, 0xdf, 0x77, 0xdc, 0x68, 0x2b, 0xf3, 0xe8, 0xd5, 0x6a, 0x72, 0xb6, 0x76,
- 0xd5, 0x6d, 0x92, 0x1c, 0x29, 0xc8, 0x5f, 0x2d, 0x18, 0x2d, 0xc5, 0xf8, 0x4a, 0xab, 0x65, 0xe5,
- 0x6a, 0xb5, 0xde, 0x80, 0x91, 0x2d, 0x97, 0x34, 0x1b, 0x98, 0x6c, 0x89, 0x95, 0xf8, 0x54, 0xbe,
- 0x1d, 0xca, 0x2a, 0xc5, 0x94, 0x52, 0x2f, 0xfe, 0x3a, 0x5c, 0x15, 0x95, 0xb1, 0x22, 0x83, 0x76,
- 0x61, 0x4a, 0x3e, 0x18, 0x24, 0x54, 0xac, 0xcb, 0xa7, 0xbb, 0xbd, 0x42, 0x4c, 0xe2, 0xa7, 0x1e,
- 0x1c, 0xcc, 0x4f, 0xe1, 0x04, 0x19, 0x9c, 0x22, 0x4c, 0x9f, 0x83, 0x2d, 0x7a, 0x02, 0x0f, 0xb0,
- 0xe1, 0x67, 0xcf, 0x41, 0xf6, 0xb2, 0x65, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0x6a, 0x64, 0xc4,
- 0x0b, 0xff, 0x98, 0x67, 0x21, 0xf9, 0xe2, 0x2e, 0xf4, 0x7e, 0x71, 0xdb, 0x7f, 0xd7, 0x82, 0x53,
- 0x2b, 0xad, 0x76, 0xb4, 0x5f, 0x76, 0x4d, 0x15, 0xd4, 0x47, 0x60, 0xa8, 0x45, 0x1a, 0x6e, 0xa7,
- 0x25, 0x66, 0x6e, 0x5e, 0x9e, 0x52, 0x6b, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0xbc, 0x16, 0xf9, 0x81,
- 0xb3, 0x4d, 0x78, 0x01, 0x16, 0xe8, 0xec, 0xac, 0x77, 0xdf, 0x26, 0x37, 0xdd, 0x96, 0x2b, 0xed,
- 0x8a, 0xba, 0xca, 0xec, 0x16, 0xe4, 0x80, 0x2e, 0xbc, 0xd1, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0x85,
- 0xf6, 0x48, 0x12, 0xc1, 0x31, 0x3d, 0xfb, 0xab, 0x16, 0x4c, 0xca, 0x75, 0xbf, 0xd8, 0x68, 0x04,
- 0x24, 0x0c, 0xd1, 0x1c, 0x14, 0xdc, 0xb6, 0xe8, 0x25, 0x88, 0x5e, 0x16, 0x2a, 0x55, 0x5c, 0x70,
- 0xdb, 0x92, 0x2d, 0x63, 0x07, 0x61, 0xd1, 0x54, 0xa4, 0x5d, 0x17, 0xe5, 0x58, 0x61, 0xa0, 0xcb,
- 0x30, 0xe2, 0xf9, 0x0d, 0x6e, 0xdb, 0xc5, 0xaf, 0x34, 0xb6, 0xc0, 0xd6, 0x45, 0x19, 0x56, 0x50,
- 0x54, 0x85, 0x12, 0x37, 0x7b, 0x8a, 0x17, 0x6d, 0x5f, 0xc6, 0x53, 0xec, 0xcb, 0x36, 0x64, 0x4d,
- 0x1c, 0x13, 0xb1, 0x7f, 0xc5, 0x82, 0x31, 0xf9, 0x65, 0x7d, 0xf2, 0x9c, 0x74, 0x6b, 0xc5, 0xfc,
- 0x66, 0xbc, 0xb5, 0x28, 0xcf, 0xc8, 0x20, 0x06, 0xab, 0x58, 0x3c, 0x12, 0xab, 0x78, 0x15, 0x46,
- 0x9d, 0x76, 0xbb, 0x6a, 0xf2, 0x99, 0x6c, 0x29, 0x2d, 0xc6, 0xc5, 0x58, 0xc7, 0xb1, 0x7f, 0xb4,
- 0x00, 0x13, 0xf2, 0x0b, 0x6a, 0x9d, 0xcd, 0x90, 0x44, 0x68, 0x03, 0x4a, 0x0e, 0x9f, 0x25, 0x22,
- 0x17, 0xf9, 0xc5, 0x6c, 0x39, 0x82, 0x31, 0xa5, 0xf1, 0x85, 0xbf, 0x28, 0x6b, 0xe3, 0x98, 0x10,
- 0x6a, 0xc2, 0xb4, 0xe7, 0x47, 0xec, 0xf0, 0x57, 0xf0, 0x6e, 0xaa, 0x9d, 0x24, 0xf5, 0xb3, 0x82,
- 0xfa, 0xf4, 0x7a, 0x92, 0x0a, 0x4e, 0x13, 0x46, 0x2b, 0x52, 0x36, 0x53, 0xcc, 0x17, 0x06, 0xe8,
- 0x13, 0x97, 0x2d, 0x9a, 0xb1, 0x7f, 0xd9, 0x82, 0x92, 0x44, 0x3b, 0x09, 0x2d, 0xde, 0x1a, 0x0c,
- 0x87, 0x6c, 0x12, 0xe4, 0xd0, 0xd8, 0xdd, 0x3a, 0xce, 0xe7, 0x2b, 0xbe, 0xd3, 0xf8, 0xff, 0x10,
- 0x4b, 0x1a, 0x4c, 0x34, 0xaf, 0xba, 0xff, 0x2e, 0x11, 0xcd, 0xab, 0xfe, 0xe4, 0x5c, 0x4a, 0x7f,
- 0xc8, 0xfa, 0xac, 0xc9, 0xba, 0x28, 0xeb, 0xd5, 0x0e, 0xc8, 0x96, 0x7b, 0x3f, 0xc9, 0x7a, 0x55,
- 0x59, 0x29, 0x16, 0x50, 0xf4, 0x26, 0x8c, 0xd5, 0xa5, 0x4c, 0x36, 0xde, 0xe1, 0x97, 0xba, 0xea,
- 0x07, 0x94, 0x2a, 0x89, 0xcb, 0x42, 0x96, 0xb5, 0xfa, 0xd8, 0xa0, 0x66, 0x9a, 0x11, 0x14, 0x7b,
- 0x99, 0x11, 0xc4, 0x74, 0xf3, 0x95, 0xea, 0x3f, 0x6e, 0xc1, 0x10, 0x97, 0xc5, 0xf5, 0x27, 0x0a,
- 0xd5, 0x34, 0x6b, 0xf1, 0xd8, 0xdd, 0xa1, 0x85, 0x42, 0x53, 0x86, 0xd6, 0xa0, 0xc4, 0x7e, 0x30,
- 0x59, 0x62, 0x31, 0xdf, 0xea, 0x9e, 0xb7, 0xaa, 0x77, 0xf0, 0x8e, 0xac, 0x86, 0x63, 0x0a, 0xf6,
- 0x0f, 0x17, 0xe9, 0xe9, 0x16, 0xa3, 0x1a, 0x97, 0xbe, 0xf5, 0xe8, 0x2e, 0xfd, 0xc2, 0xa3, 0xba,
- 0xf4, 0xb7, 0x61, 0xb2, 0xae, 0xe9, 0xe1, 0xe2, 0x99, 0xbc, 0xdc, 0x75, 0x91, 0x68, 0x2a, 0x3b,
- 0x2e, 0x65, 0x59, 0x36, 0x89, 0xe0, 0x24, 0x55, 0xf4, 0x6d, 0x30, 0xc6, 0xe7, 0x59, 0xb4, 0xc2,
- 0x2d, 0x31, 0x3e, 0x90, 0xbf, 0x5e, 0xf4, 0x26, 0xb8, 0x54, 0x4e, 0xab, 0x8e, 0x0d, 0x62, 0xf6,
- 0x9f, 0x58, 0x80, 0x56, 0xda, 0x3b, 0xa4, 0x45, 0x02, 0xa7, 0x19, 0x8b, 0xd3, 0xbf, 0xdf, 0x82,
- 0x59, 0x92, 0x2a, 0x5e, 0xf6, 0x5b, 0x2d, 0xf1, 0x68, 0xc9, 0x79, 0x57, 0xaf, 0xe4, 0xd4, 0x51,
- 0x6e, 0x09, 0xb3, 0x79, 0x18, 0x38, 0xb7, 0x3d, 0xb4, 0x06, 0x33, 0xfc, 0x96, 0x54, 0x00, 0xcd,
- 0xf6, 0xfa, 0x71, 0x41, 0x78, 0x66, 0x23, 0x8d, 0x82, 0xb3, 0xea, 0xd9, 0xdf, 0x35, 0x06, 0xb9,
- 0xbd, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e, 0xe1, 0x3d, 0x3d, 0xc2, 0x7b, 0x7a, 0x84, 0xf7, 0xf4,
- 0x08, 0xdf, 0xf4, 0x7a, 0x84, 0x3f, 0xb2, 0x60, 0x26, 0x7d, 0x0d, 0x9c, 0x04, 0x63, 0xde, 0x81,
- 0x99, 0xf4, 0x5d, 0xd7, 0xd5, 0xce, 0x2e, 0xdd, 0xcf, 0xf8, 0xde, 0xcb, 0xf8, 0x06, 0x9c, 0x45,
- 0xdf, 0xfe, 0xcb, 0x16, 0x9c, 0x56, 0xc8, 0xc6, 0x4b, 0xff, 0xf3, 0x30, 0xc3, 0xcf, 0x97, 0xe5,
- 0xa6, 0xe3, 0xb6, 0x36, 0x48, 0xab, 0xdd, 0x74, 0x22, 0x69, 0x66, 0x70, 0x35, 0x73, 0xab, 0x26,
- 0x4c, 0x74, 0x8d, 0x8a, 0x4b, 0x8f, 0xd1, 0x7e, 0x65, 0x00, 0x70, 0x56, 0x33, 0xf6, 0x2f, 0x8c,
- 0xc0, 0xe0, 0xca, 0x1e, 0xf1, 0xa2, 0x13, 0x18, 0xfa, 0x3a, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xdc,
- 0x23, 0x0d, 0x0e, 0x3f, 0xca, 0xd3, 0xfd, 0x8c, 0x20, 0x3d, 0x51, 0x31, 0x48, 0xe0, 0x04, 0xc9,
- 0x47, 0x21, 0x3e, 0xbf, 0x06, 0x43, 0xfc, 0xd6, 0x12, 0xb2, 0xf3, 0xcc, 0x4b, 0x8a, 0x0d, 0xa2,
- 0xb8, 0x8b, 0x63, 0xd1, 0x3e, 0xbf, 0x15, 0x45, 0x75, 0xf4, 0x39, 0x98, 0xd8, 0x72, 0x83, 0x30,
- 0xda, 0x70, 0x5b, 0x24, 0x8c, 0x9c, 0x56, 0xfb, 0x21, 0xc4, 0xe5, 0x6a, 0x1c, 0x56, 0x0d, 0x4a,
- 0x38, 0x41, 0x19, 0x6d, 0xc3, 0x78, 0xd3, 0xd1, 0x9b, 0x1a, 0x3e, 0x72, 0x53, 0xea, 0x3a, 0xbc,
- 0xa9, 0x13, 0xc2, 0x26, 0x5d, 0x7a, 0x7e, 0xd4, 0x99, 0xc4, 0x77, 0x84, 0xc9, 0x41, 0xd4, 0xf9,
- 0xc1, 0x45, 0xbd, 0x1c, 0x46, 0x39, 0x3b, 0x66, 0x11, 0x5c, 0x32, 0x39, 0x3b, 0xcd, 0xee, 0xf7,
- 0xb3, 0x50, 0x22, 0x74, 0x08, 0x29, 0x61, 0x71, 0xa3, 0x5e, 0xe9, 0xaf, 0xaf, 0x6b, 0x6e, 0x3d,
- 0xf0, 0x4d, 0x45, 0xc5, 0x8a, 0xa4, 0x84, 0x63, 0xa2, 0x68, 0x19, 0x86, 0x42, 0x12, 0xb8, 0x24,
- 0x14, 0x77, 0x6b, 0x97, 0x69, 0x64, 0x68, 0xdc, 0x99, 0x86, 0xff, 0xc6, 0xa2, 0x2a, 0x5d, 0x5e,
- 0x0e, 0x93, 0xe1, 0xb2, 0xdb, 0x4f, 0x5b, 0x5e, 0x8b, 0xac, 0x14, 0x0b, 0x28, 0x7a, 0x1d, 0x86,
- 0x03, 0xd2, 0x64, 0x9a, 0xb0, 0xf1, 0xfe, 0x17, 0x39, 0x57, 0xac, 0xf1, 0x7a, 0x58, 0x12, 0x40,
- 0x37, 0x00, 0x05, 0x84, 0x72, 0x86, 0xae, 0xb7, 0xad, 0xec, 0x64, 0xc5, 0xcd, 0xa2, 0x4e, 0x22,
- 0x1c, 0x63, 0x48, 0xbf, 0x26, 0x9c, 0x51, 0x0d, 0x5d, 0x83, 0x69, 0x55, 0x5a, 0xf1, 0xc2, 0xc8,
- 0xa1, 0x27, 0xfa, 0x24, 0xa3, 0xa5, 0x04, 0x33, 0x38, 0x89, 0x80, 0xd3, 0x75, 0xec, 0x2f, 0x5b,
- 0xc0, 0xc7, 0xf9, 0x04, 0xc4, 0x11, 0xaf, 0x99, 0xe2, 0x88, 0xb3, 0xb9, 0x33, 0x97, 0x23, 0x8a,
- 0xf8, 0xb2, 0x05, 0xa3, 0xda, 0xcc, 0xc6, 0x6b, 0xd6, 0xea, 0xb2, 0x66, 0x3b, 0x30, 0x45, 0x57,
- 0xfa, 0xad, 0xcd, 0x90, 0x04, 0x7b, 0xa4, 0xc1, 0x16, 0x66, 0xe1, 0xe1, 0x16, 0xa6, 0xb2, 0xc9,
- 0xbb, 0x99, 0x20, 0x88, 0x53, 0x4d, 0xd8, 0x9f, 0x95, 0x5d, 0x55, 0x26, 0x8c, 0x75, 0x35, 0xe7,
- 0x09, 0x13, 0x46, 0x35, 0xab, 0x38, 0xc6, 0xa1, 0x5b, 0x6d, 0xc7, 0x0f, 0xa3, 0xa4, 0x09, 0xe3,
- 0x75, 0x3f, 0x8c, 0x30, 0x83, 0xd8, 0x2f, 0x00, 0xac, 0xdc, 0x27, 0x75, 0xbe, 0x62, 0xf5, 0xd7,
- 0x92, 0x95, 0xff, 0x5a, 0xb2, 0x7f, 0xcb, 0x82, 0x89, 0xd5, 0x65, 0xe3, 0xe6, 0x5a, 0x00, 0xe0,
- 0x4f, 0xbc, 0xbb, 0x77, 0xd7, 0xa5, 0xfe, 0x9f, 0xab, 0x70, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x59,
- 0x28, 0x36, 0x3b, 0x9e, 0x90, 0x97, 0x0e, 0x53, 0x7e, 0xe0, 0x66, 0xc7, 0xc3, 0xb4, 0x4c, 0xf3,
- 0xa1, 0x28, 0xf6, 0xed, 0x43, 0xd1, 0x33, 0x96, 0x01, 0x9a, 0x87, 0xc1, 0x7b, 0xf7, 0xdc, 0x06,
- 0xf7, 0x18, 0x15, 0xb6, 0x09, 0x77, 0xef, 0x56, 0xca, 0x21, 0xe6, 0xe5, 0xf6, 0x97, 0x8a, 0x30,
- 0xb7, 0xda, 0x24, 0xf7, 0xdf, 0xa1, 0xd7, 0x6c, 0xbf, 0x1e, 0x20, 0x47, 0x93, 0x3c, 0x1d, 0xd5,
- 0xcb, 0xa7, 0xf7, 0x78, 0x6c, 0xc1, 0x30, 0xb7, 0xe0, 0x93, 0x3e, 0xb4, 0xaf, 0x66, 0xb5, 0x9e,
- 0x3f, 0x20, 0x0b, 0xdc, 0x12, 0x50, 0xb8, 0x00, 0xaa, 0x0b, 0x53, 0x94, 0x62, 0x49, 0x7c, 0xee,
- 0x15, 0x18, 0xd3, 0x31, 0x8f, 0xe4, 0x6f, 0xf7, 0xff, 0x15, 0x61, 0x8a, 0xf6, 0xe0, 0x91, 0x4e,
- 0xc4, 0xed, 0xf4, 0x44, 0x1c, 0xb7, 0xcf, 0x55, 0xef, 0xd9, 0x78, 0x33, 0x39, 0x1b, 0x57, 0xf3,
- 0x66, 0xe3, 0xa4, 0xe7, 0xe0, 0x3b, 0x2d, 0x98, 0x59, 0x6d, 0xfa, 0xf5, 0xdd, 0x84, 0x5f, 0xd4,
- 0x4b, 0x30, 0x4a, 0x8f, 0xe3, 0xd0, 0x70, 0xd9, 0x37, 0x82, 0x38, 0x08, 0x10, 0xd6, 0xf1, 0xb4,
- 0x6a, 0xb7, 0x6f, 0x57, 0xca, 0x59, 0xb1, 0x1f, 0x04, 0x08, 0xeb, 0x78, 0xf6, 0x6f, 0x58, 0x70,
- 0xee, 0xda, 0xf2, 0x4a, 0xbc, 0x14, 0x53, 0xe1, 0x27, 0x2e, 0xc1, 0x50, 0xbb, 0xa1, 0x75, 0x25,
- 0x96, 0x27, 0x97, 0x59, 0x2f, 0x04, 0xf4, 0xdd, 0x12, 0x5a, 0xe5, 0xa7, 0x2c, 0x98, 0xb9, 0xe6,
- 0x46, 0xf4, 0x76, 0x4d, 0x06, 0x42, 0xa0, 0xd7, 0x6b, 0xe8, 0x46, 0x7e, 0xb0, 0x9f, 0x0c, 0x84,
- 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, 0x97, 0xd9, 0x8e, 0x17, 0x4c, 0xcd, 0x1a, 0x16,
- 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0x6b, 0xb8, 0x01, 0x13, 0x4a, 0xee, 0x8b, 0x13, 0x56, 0x7d, 0x58,
- 0x59, 0x02, 0x70, 0x8c, 0x43, 0xdf, 0x67, 0xf3, 0xd7, 0x9a, 0x9d, 0x30, 0x22, 0xc1, 0x56, 0x98,
- 0x73, 0x3a, 0xbe, 0x00, 0x25, 0x22, 0x55, 0x00, 0xa2, 0xd7, 0x8a, 0x63, 0x54, 0xba, 0x01, 0x1e,
- 0x8f, 0x41, 0xe1, 0xf5, 0xe1, 0x65, 0x79, 0x34, 0x37, 0xb9, 0x55, 0x40, 0x44, 0x6f, 0x4b, 0x0f,
- 0x50, 0xc1, 0x3c, 0xdd, 0x57, 0x52, 0x50, 0x9c, 0x51, 0xc3, 0xfe, 0x31, 0x0b, 0x4e, 0xab, 0x0f,
- 0x7e, 0xd7, 0x7d, 0xa6, 0xfd, 0xb3, 0x05, 0x18, 0xbf, 0xbe, 0xb1, 0x51, 0xbd, 0x46, 0x22, 0x71,
- 0x6d, 0xf7, 0x56, 0xec, 0x63, 0x4d, 0x3f, 0xd9, 0xed, 0x31, 0xd7, 0x89, 0xdc, 0xe6, 0x02, 0x8f,
- 0x73, 0xb4, 0x50, 0xf1, 0xa2, 0x5b, 0x41, 0x2d, 0x0a, 0x5c, 0x6f, 0x3b, 0x53, 0xa3, 0x29, 0x99,
- 0x8b, 0x62, 0x1e, 0x73, 0x81, 0x5e, 0x80, 0x21, 0x16, 0x68, 0x49, 0x4e, 0xc2, 0xe3, 0xea, 0x2d,
- 0xc4, 0x4a, 0x0f, 0x0f, 0xe6, 0x4b, 0xb7, 0x71, 0x85, 0xff, 0xc1, 0x02, 0x15, 0xdd, 0x86, 0xd1,
- 0x9d, 0x28, 0x6a, 0x5f, 0x27, 0x4e, 0x83, 0x3e, 0xc6, 0xf9, 0x71, 0x78, 0x3e, 0xeb, 0x38, 0xa4,
- 0x83, 0xc0, 0xd1, 0xe2, 0x13, 0x24, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, 0xae, 0x01, 0xc4, 0xb0, 0x63,
- 0x52, 0xcd, 0xd8, 0x7f, 0x60, 0xc1, 0x30, 0x8f, 0x79, 0x11, 0xa0, 0x8f, 0xc2, 0x00, 0xb9, 0x4f,
- 0xea, 0x82, 0xe3, 0xcd, 0xec, 0x70, 0xcc, 0x69, 0x71, 0x11, 0x33, 0xfd, 0x8f, 0x59, 0x2d, 0x74,
- 0x1d, 0x86, 0x69, 0x6f, 0xaf, 0xa9, 0x00, 0x20, 0x4f, 0xe6, 0x7d, 0xb1, 0x9a, 0x76, 0xce, 0x9c,
- 0x89, 0x22, 0x2c, 0xab, 0x33, 0x7d, 0x78, 0xbd, 0x5d, 0xa3, 0x27, 0x76, 0xd4, 0x8d, 0xb1, 0xd8,
- 0x58, 0xae, 0x72, 0x24, 0x41, 0x8d, 0xeb, 0xc3, 0x65, 0x21, 0x8e, 0x89, 0xd8, 0x1b, 0x50, 0xa2,
- 0x93, 0xba, 0xd8, 0x74, 0x9d, 0xee, 0x2a, 0xfe, 0x67, 0xa0, 0x24, 0x15, 0xf8, 0xa1, 0xf0, 0x75,
- 0x67, 0x54, 0xa5, 0x7e, 0x3f, 0xc4, 0x31, 0xdc, 0xde, 0x82, 0x53, 0xcc, 0x1c, 0xd3, 0x89, 0x76,
- 0x8c, 0x3d, 0xd6, 0x7b, 0x31, 0x3f, 0x2b, 0x1e, 0x90, 0x7c, 0x66, 0x66, 0x35, 0x77, 0xd2, 0x31,
- 0x49, 0x31, 0x7e, 0x4c, 0xda, 0x5f, 0x1b, 0x80, 0xc7, 0x2b, 0xb5, 0xfc, 0x70, 0x28, 0x2f, 0xc3,
- 0x18, 0xe7, 0x4b, 0xe9, 0xd2, 0x76, 0x9a, 0xa2, 0x5d, 0x25, 0x5b, 0xde, 0xd0, 0x60, 0xd8, 0xc0,
- 0x44, 0xe7, 0xa0, 0xe8, 0xbe, 0xe5, 0x25, 0x9d, 0xad, 0x2a, 0x6f, 0xac, 0x63, 0x5a, 0x4e, 0xc1,
- 0x94, 0xc5, 0xe5, 0x77, 0x87, 0x02, 0x2b, 0x36, 0xf7, 0x35, 0x98, 0x70, 0xc3, 0x7a, 0xe8, 0x56,
- 0x3c, 0x7a, 0xce, 0x68, 0x27, 0x95, 0x12, 0x6e, 0xd0, 0x4e, 0x2b, 0x28, 0x4e, 0x60, 0x6b, 0x17,
- 0xd9, 0x60, 0xdf, 0x6c, 0x72, 0x4f, 0xe7, 0x6f, 0xfa, 0x02, 0x68, 0xb3, 0xaf, 0x0b, 0x99, 0x92,
- 0x40, 0xbc, 0x00, 0xf8, 0x07, 0x87, 0x58, 0xc2, 0xe8, 0xcb, 0xb1, 0xbe, 0xe3, 0xb4, 0x17, 0x3b,
- 0xd1, 0x4e, 0xd9, 0x0d, 0xeb, 0xfe, 0x1e, 0x09, 0xf6, 0xd9, 0xa3, 0x7f, 0x24, 0x7e, 0x39, 0x2a,
- 0xc0, 0xf2, 0xf5, 0xc5, 0x2a, 0xc5, 0xc4, 0xe9, 0x3a, 0x68, 0x11, 0x26, 0x65, 0x61, 0x8d, 0x84,
- 0xec, 0x0a, 0x1b, 0x65, 0x64, 0x94, 0xfb, 0x93, 0x28, 0x56, 0x44, 0x92, 0xf8, 0x26, 0x27, 0x0d,
- 0xc7, 0xc1, 0x49, 0x7f, 0x04, 0xc6, 0x5d, 0xcf, 0x8d, 0x5c, 0x27, 0xf2, 0xb9, 0x86, 0x8b, 0xbf,
- 0xef, 0x99, 0xe8, 0xbe, 0xa2, 0x03, 0xb0, 0x89, 0x67, 0xff, 0x97, 0x01, 0x98, 0x66, 0xd3, 0xf6,
- 0xde, 0x0a, 0xfb, 0x66, 0x5a, 0x61, 0xb7, 0xd3, 0x2b, 0xec, 0x38, 0x9e, 0x08, 0x0f, 0xbd, 0xcc,
- 0x3e, 0x07, 0x25, 0xe5, 0xf1, 0x25, 0x5d, 0x3e, 0xad, 0x1c, 0x97, 0xcf, 0xde, 0xdc, 0x87, 0x34,
- 0x9a, 0x2b, 0x66, 0x1a, 0xcd, 0xfd, 0x75, 0x0b, 0x62, 0x95, 0x0d, 0xba, 0x0e, 0xa5, 0xb6, 0xcf,
- 0x6c, 0x41, 0x03, 0x69, 0x60, 0xfd, 0x78, 0xe6, 0x45, 0xc5, 0x2f, 0x45, 0xfe, 0xf1, 0x55, 0x59,
- 0x03, 0xc7, 0x95, 0xd1, 0x12, 0x0c, 0xb7, 0x03, 0x52, 0x8b, 0x58, 0x54, 0x94, 0x9e, 0x74, 0xf8,
- 0x1a, 0xe1, 0xf8, 0x58, 0x56, 0xb4, 0x7f, 0xce, 0x02, 0xe0, 0x76, 0x69, 0x8e, 0xb7, 0x4d, 0x4e,
- 0x40, 0x6a, 0x5d, 0x86, 0x81, 0xb0, 0x4d, 0xea, 0xdd, 0xac, 0x74, 0xe3, 0xfe, 0xd4, 0xda, 0xa4,
- 0x1e, 0x0f, 0x38, 0xfd, 0x87, 0x59, 0x6d, 0xfb, 0xbb, 0x01, 0x26, 0x62, 0xb4, 0x4a, 0x44, 0x5a,
- 0xe8, 0x39, 0x23, 0x4a, 0xc2, 0xd9, 0x44, 0x94, 0x84, 0x12, 0xc3, 0xd6, 0x04, 0xa4, 0x9f, 0x83,
- 0x62, 0xcb, 0xb9, 0x2f, 0x24, 0x60, 0xcf, 0x74, 0xef, 0x06, 0xa5, 0xbf, 0xb0, 0xe6, 0xdc, 0xe7,
- 0x8f, 0xc4, 0x67, 0xe4, 0x02, 0x59, 0x73, 0xee, 0x1f, 0x72, 0x5b, 0x5c, 0x76, 0x48, 0xdd, 0x74,
- 0xc3, 0xe8, 0x0b, 0xff, 0x39, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0x7a, 0xc2, 0xe4,
- 0xaa, 0xaf, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0xf5, 0xd1, 0x96, 0xeb, 0xa1, 0xb7, 0x61, 0x58,
- 0x58, 0x44, 0x8a, 0xa8, 0x44, 0x57, 0xfa, 0x68, 0x4f, 0x18, 0x54, 0xf2, 0x36, 0xaf, 0xc8, 0x47,
- 0xb0, 0x28, 0xed, 0xd9, 0xae, 0x6c, 0x10, 0xfd, 0x15, 0x0b, 0x26, 0xc4, 0x6f, 0x4c, 0xde, 0xea,
- 0x90, 0x30, 0x12, 0xbc, 0xe7, 0x87, 0xfb, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, 0x61, 0x79, 0xcc,
- 0x9a, 0xc0, 0x9e, 0x3d, 0x4a, 0xf4, 0x02, 0xfd, 0x7d, 0x0b, 0x4e, 0xb5, 0x9c, 0xfb, 0xbc, 0x45,
- 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0x65, 0xc1, 0x47, 0xfb, 0x9b, 0xfe, 0x54, 0x75, 0xde, 0x49,
- 0xa9, 0xfe, 0x3c, 0x95, 0x85, 0xd2, 0xb3, 0xab, 0x99, 0xfd, 0x9a, 0xdb, 0x82, 0x11, 0xb9, 0xde,
- 0x32, 0x44, 0x0d, 0x65, 0x9d, 0xb1, 0x3e, 0xb2, 0x41, 0xaa, 0x1e, 0x7d, 0x80, 0xb6, 0x23, 0xd6,
- 0xda, 0x23, 0x6d, 0xe7, 0x73, 0x30, 0xa6, 0xaf, 0xb1, 0x47, 0xda, 0xd6, 0x5b, 0x30, 0x93, 0xb1,
- 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe0, 0x6c, 0xee, 0xfa, 0x78, 0x94, 0x0d, 0xdb, 0x3f, 0x6b, 0xe9,
- 0xe7, 0xe0, 0x09, 0xa8, 0x0e, 0x96, 0x4d, 0xd5, 0xc1, 0xf9, 0xee, 0x3b, 0x27, 0x47, 0x7f, 0xf0,
- 0xa6, 0xde, 0x69, 0x7a, 0xaa, 0xa3, 0xd7, 0x61, 0xa8, 0x49, 0x4b, 0xa4, 0x5d, 0xad, 0xdd, 0x7b,
- 0x47, 0xc6, 0xbc, 0x14, 0x2b, 0x0f, 0xb1, 0xa0, 0x60, 0xff, 0xa2, 0x05, 0x03, 0x27, 0x30, 0x12,
- 0xd8, 0x1c, 0x89, 0xe7, 0x72, 0x49, 0x8b, 0x80, 0xc9, 0x0b, 0xd8, 0xb9, 0xb7, 0x72, 0x3f, 0x22,
- 0x5e, 0xc8, 0x9e, 0x8a, 0x99, 0x03, 0xf3, 0x93, 0x16, 0xcc, 0xdc, 0xf4, 0x9d, 0xc6, 0x92, 0xd3,
- 0x74, 0xbc, 0x3a, 0x09, 0x2a, 0xde, 0xf6, 0x91, 0x8c, 0xc2, 0x0b, 0x3d, 0x8d, 0xc2, 0x97, 0xa5,
- 0x4d, 0xd5, 0x40, 0xfe, 0xfc, 0x51, 0x46, 0x32, 0x19, 0x37, 0xc6, 0xb0, 0xfe, 0xdd, 0x01, 0xa4,
- 0xf7, 0x52, 0xb8, 0xe8, 0x60, 0x18, 0x76, 0x79, 0x7f, 0xc5, 0x24, 0x3e, 0x95, 0xcd, 0xe0, 0xa5,
- 0x3e, 0x4f, 0x73, 0x3e, 0xe1, 0x05, 0x58, 0x12, 0xb2, 0x5f, 0x86, 0x4c, 0x3f, 0xff, 0xde, 0xc2,
- 0x07, 0xfb, 0x93, 0x30, 0xcd, 0x6a, 0x1e, 0xf1, 0x61, 0x6c, 0x27, 0x64, 0x9b, 0x19, 0x11, 0x00,
- 0xed, 0x2f, 0x5a, 0x30, 0xb9, 0x9e, 0x08, 0x8c, 0x76, 0x89, 0x69, 0x43, 0x33, 0x44, 0xea, 0x35,
- 0x56, 0x8a, 0x05, 0xf4, 0xd8, 0x25, 0x59, 0x7f, 0x61, 0x41, 0x1c, 0x7a, 0xe3, 0x04, 0xd8, 0xb7,
- 0x65, 0x83, 0x7d, 0xcb, 0x94, 0xb0, 0xa8, 0xee, 0xe4, 0x71, 0x6f, 0xe8, 0x86, 0x0a, 0x4a, 0xd5,
- 0x45, 0xb8, 0x12, 0x93, 0xe1, 0x4b, 0x71, 0xc2, 0x8c, 0x5c, 0x25, 0xc3, 0x54, 0xd9, 0xbf, 0x5d,
- 0x00, 0xa4, 0x70, 0xfb, 0x0e, 0x9a, 0x95, 0xae, 0x71, 0x3c, 0x41, 0xb3, 0xf6, 0x00, 0x31, 0x7d,
- 0x7e, 0xe0, 0x78, 0x21, 0x27, 0xeb, 0x0a, 0xd9, 0xdd, 0xd1, 0x8c, 0x05, 0xe6, 0x44, 0x93, 0xe8,
- 0x66, 0x8a, 0x1a, 0xce, 0x68, 0x41, 0xb3, 0xd3, 0x18, 0xec, 0xd7, 0x4e, 0x63, 0xa8, 0x87, 0x1b,
- 0xde, 0xcf, 0x58, 0x30, 0xae, 0x86, 0xe9, 0x5d, 0x62, 0x24, 0xaf, 0xfa, 0x93, 0x73, 0x80, 0x56,
- 0xb5, 0x2e, 0xb3, 0x8b, 0xe5, 0x5b, 0x99, 0x3b, 0xa5, 0xd3, 0x74, 0xdf, 0x26, 0x2a, 0x64, 0xe1,
- 0xbc, 0x70, 0x8f, 0x14, 0xa5, 0x87, 0x07, 0xf3, 0xe3, 0xea, 0x1f, 0x0f, 0x91, 0x1c, 0x57, 0xa1,
- 0x47, 0xf2, 0x64, 0x62, 0x29, 0xa2, 0x97, 0x60, 0xb0, 0xbd, 0xe3, 0x84, 0x24, 0xe1, 0x4c, 0x34,
- 0x58, 0xa5, 0x85, 0x87, 0x07, 0xf3, 0x13, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xbb, 0xff, 0x50, 0x64,
- 0xe9, 0xc5, 0xd9, 0x33, 0x14, 0xd9, 0x9f, 0x58, 0x30, 0xb0, 0xee, 0x37, 0x4e, 0xe2, 0x08, 0x78,
- 0xcd, 0x38, 0x02, 0x9e, 0xc8, 0x8b, 0x5e, 0x9f, 0xbb, 0xfb, 0x57, 0x13, 0xbb, 0xff, 0x7c, 0x2e,
- 0x85, 0xee, 0x1b, 0xbf, 0x05, 0xa3, 0x2c, 0x26, 0xbe, 0x70, 0x9c, 0x7a, 0xc1, 0xd8, 0xf0, 0xf3,
- 0x89, 0x0d, 0x3f, 0xa9, 0xa1, 0x6a, 0x3b, 0xfd, 0x69, 0x18, 0x16, 0x9e, 0x38, 0x49, 0xaf, 0x54,
- 0x81, 0x8b, 0x25, 0xdc, 0xfe, 0xf1, 0x22, 0x18, 0x31, 0xf8, 0xd1, 0x2f, 0x5b, 0xb0, 0x10, 0x70,
- 0x0b, 0xdd, 0x46, 0xb9, 0x13, 0xb8, 0xde, 0x76, 0xad, 0xbe, 0x43, 0x1a, 0x9d, 0xa6, 0xeb, 0x6d,
- 0x57, 0xb6, 0x3d, 0x5f, 0x15, 0xaf, 0xdc, 0x27, 0xf5, 0x0e, 0x53, 0x82, 0xf5, 0x08, 0xf8, 0xaf,
- 0x2c, 0xdd, 0x9f, 0x7f, 0x70, 0x30, 0xbf, 0x80, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8, 0x37,
- 0x2c, 0xb8, 0xc2, 0x43, 0xd3, 0xf7, 0xdf, 0xff, 0x2e, 0xaf, 0xe5, 0xaa, 0x24, 0x15, 0x13, 0xd9,
- 0x20, 0x41, 0x6b, 0xe9, 0x23, 0x62, 0x40, 0xaf, 0x54, 0x8f, 0xd6, 0x16, 0x3e, 0x6a, 0xe7, 0xec,
- 0x7f, 0x56, 0x84, 0x71, 0x11, 0xb2, 0x4a, 0xdc, 0x01, 0x2f, 0x19, 0x4b, 0xe2, 0xc9, 0xc4, 0x92,
- 0x98, 0x36, 0x90, 0x8f, 0xe7, 0xf8, 0x0f, 0x61, 0x9a, 0x1e, 0xce, 0xd7, 0x89, 0x13, 0x44, 0x9b,
- 0xc4, 0xe1, 0xe6, 0x57, 0xc5, 0x23, 0x9f, 0xfe, 0x4a, 0x3c, 0x77, 0x33, 0x49, 0x0c, 0xa7, 0xe9,
- 0x7f, 0x33, 0xdd, 0x39, 0x1e, 0x4c, 0xa5, 0xa2, 0x8e, 0x7d, 0x0a, 0x4a, 0xca, 0x8d, 0x44, 0x1c,
- 0x3a, 0xdd, 0x83, 0xf7, 0x25, 0x29, 0x70, 0x11, 0x5a, 0xec, 0xc2, 0x14, 0x93, 0xb3, 0xff, 0x41,
- 0xc1, 0x68, 0x90, 0x4f, 0xe2, 0x3a, 0x8c, 0x38, 0x61, 0xe8, 0x6e, 0x7b, 0xa4, 0x21, 0x76, 0xec,
- 0xfb, 0xf3, 0x76, 0xac, 0xd1, 0x0c, 0x73, 0xe5, 0x59, 0x14, 0x35, 0xb1, 0xa2, 0x81, 0xae, 0x73,
- 0x23, 0xb7, 0x3d, 0xf9, 0xde, 0xeb, 0x8f, 0x1a, 0x48, 0x33, 0xb8, 0x3d, 0x82, 0x45, 0x7d, 0xf4,
- 0x69, 0x6e, 0x85, 0x78, 0xc3, 0xf3, 0xef, 0x79, 0xd7, 0x7c, 0x5f, 0x86, 0x85, 0xe8, 0x8f, 0xe0,
- 0xb4, 0xb4, 0x3d, 0x54, 0xd5, 0xb1, 0x49, 0xad, 0xbf, 0x30, 0x9e, 0x9f, 0x87, 0x19, 0x4a, 0xda,
- 0xf4, 0xda, 0x0e, 0x11, 0x81, 0x49, 0x11, 0x0f, 0x4d, 0x96, 0x89, 0xb1, 0xcb, 0x7c, 0xca, 0x99,
- 0xb5, 0x63, 0x39, 0xf2, 0x0d, 0x93, 0x04, 0x4e, 0xd2, 0xb4, 0xff, 0xb6, 0x05, 0xcc, 0x83, 0xf5,
- 0x04, 0xf8, 0x91, 0x8f, 0x99, 0xfc, 0xc8, 0x6c, 0xde, 0x20, 0xe7, 0xb0, 0x22, 0x2f, 0xf2, 0x95,
- 0x55, 0x0d, 0xfc, 0xfb, 0xfb, 0xc2, 0x74, 0xa4, 0xf7, 0xfb, 0xc3, 0xfe, 0xdf, 0x16, 0x3f, 0xc4,
- 0x94, 0x93, 0x07, 0xfa, 0x76, 0x18, 0xa9, 0x3b, 0x6d, 0xa7, 0xce, 0x13, 0xc6, 0xe4, 0x4a, 0xf4,
- 0x8c, 0x4a, 0x0b, 0xcb, 0xa2, 0x06, 0x97, 0x50, 0xc9, 0xb8, 0x7a, 0x23, 0xb2, 0xb8, 0xa7, 0x54,
- 0x4a, 0x35, 0x39, 0xb7, 0x0b, 0xe3, 0x06, 0xb1, 0x47, 0x2a, 0xce, 0xf8, 0x76, 0x7e, 0xc5, 0xaa,
- 0x38, 0x90, 0x2d, 0x98, 0xf6, 0xb4, 0xff, 0xf4, 0x42, 0x91, 0x8f, 0xcb, 0xf7, 0xf7, 0xba, 0x44,
- 0xd9, 0xed, 0xa3, 0x39, 0xc7, 0x26, 0xc8, 0xe0, 0x34, 0x65, 0xfb, 0x27, 0x2c, 0x78, 0x4c, 0x47,
- 0xd4, 0xfc, 0x6f, 0x7a, 0xe9, 0x08, 0xca, 0x30, 0xe2, 0xb7, 0x49, 0xe0, 0x44, 0x7e, 0x20, 0x6e,
- 0x8d, 0xcb, 0x72, 0xd0, 0x6f, 0x89, 0xf2, 0x43, 0x11, 0x6e, 0x5d, 0x52, 0x97, 0xe5, 0x58, 0xd5,
- 0xa4, 0xaf, 0x4f, 0x36, 0x18, 0xa1, 0xf0, 0xb4, 0x62, 0x67, 0x00, 0x53, 0x97, 0x87, 0x58, 0x40,
- 0xec, 0xaf, 0x59, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x6f, 0xc1, 0x54, 0xcb, 0x89, 0xea, 0x3b, 0x2b,
- 0xf7, 0xdb, 0x01, 0xd7, 0xb8, 0xc8, 0x71, 0x7a, 0xa6, 0xd7, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0x56,
- 0xae, 0x25, 0x88, 0xe1, 0x14, 0x79, 0xb4, 0x09, 0xa3, 0xac, 0x8c, 0x39, 0x11, 0x86, 0xdd, 0x58,
- 0x83, 0xbc, 0xd6, 0x94, 0xc5, 0xc1, 0x5a, 0x4c, 0x07, 0xeb, 0x44, 0xed, 0x9f, 0x2e, 0xf2, 0xdd,
- 0xce, 0x58, 0xf9, 0xa7, 0x61, 0xb8, 0xed, 0x37, 0x96, 0x2b, 0x65, 0x2c, 0x66, 0x41, 0x5d, 0x23,
- 0x55, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x86, 0x11, 0xf1, 0x53, 0x6a, 0xc8, 0xd8, 0xd9, 0x2c, 0xf0,
- 0x42, 0xac, 0xa0, 0xe8, 0x79, 0x80, 0x76, 0xe0, 0xef, 0xb9, 0x0d, 0x16, 0xdc, 0xa2, 0x68, 0x1a,
- 0x0b, 0x55, 0x15, 0x04, 0x6b, 0x58, 0xe8, 0x55, 0x18, 0xef, 0x78, 0x21, 0x67, 0x47, 0xb4, 0x50,
- 0xb6, 0xca, 0x8c, 0xe5, 0xb6, 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x84, 0xa1, 0xc8, 0x61, 0xc6, 0x2f,
- 0x83, 0xf9, 0xc6, 0xb7, 0x1b, 0x14, 0x43, 0xcf, 0x4d, 0x42, 0x2b, 0x60, 0x51, 0x11, 0x7d, 0x4a,
- 0xfa, 0xf3, 0xf2, 0x83, 0x5d, 0x58, 0xbd, 0xf7, 0x77, 0x09, 0x68, 0xde, 0xbc, 0xc2, 0x9a, 0xde,
- 0xa0, 0x85, 0x5e, 0x01, 0x20, 0xf7, 0x23, 0x12, 0x78, 0x4e, 0x53, 0xd9, 0x96, 0x29, 0xbe, 0xa0,
- 0xec, 0xaf, 0xfb, 0xd1, 0xed, 0x90, 0xac, 0x28, 0x0c, 0xac, 0x61, 0xdb, 0xbf, 0x51, 0x02, 0x88,
- 0xf9, 0x76, 0xf4, 0x76, 0xea, 0xe0, 0x7a, 0xb6, 0x3b, 0xa7, 0x7f, 0x7c, 0xa7, 0x16, 0xfa, 0x1e,
- 0x0b, 0x46, 0x9d, 0x66, 0xd3, 0xaf, 0x3b, 0x3c, 0xd8, 0x70, 0xa1, 0xfb, 0xc1, 0x29, 0xda, 0x5f,
- 0x8c, 0x6b, 0xf0, 0x2e, 0xbc, 0x20, 0x57, 0xa8, 0x06, 0xe9, 0xd9, 0x0b, 0xbd, 0x61, 0xf4, 0x21,
- 0xf9, 0x54, 0x2c, 0x1a, 0x43, 0xa9, 0x9e, 0x8a, 0x25, 0x76, 0x47, 0xe8, 0xaf, 0xc4, 0xdb, 0xc6,
- 0x2b, 0x71, 0x20, 0xdf, 0x61, 0xd1, 0x60, 0x5f, 0x7b, 0x3d, 0x10, 0x51, 0x55, 0x0f, 0x5e, 0x30,
- 0x98, 0xef, 0x1d, 0xa8, 0xbd, 0x93, 0x7a, 0x04, 0x2e, 0xf8, 0x1c, 0x4c, 0x36, 0x4c, 0x26, 0x40,
- 0xac, 0xc4, 0xa7, 0xf2, 0xe8, 0x26, 0x78, 0x86, 0xf8, 0xda, 0x4f, 0x00, 0x70, 0x92, 0x30, 0xaa,
- 0xf2, 0x58, 0x16, 0x15, 0x6f, 0xcb, 0x17, 0x9e, 0x17, 0x76, 0xee, 0x5c, 0xee, 0x87, 0x11, 0x69,
- 0x51, 0xcc, 0xf8, 0x76, 0x5f, 0x17, 0x75, 0xb1, 0xa2, 0x82, 0x5e, 0x87, 0x21, 0xe6, 0x1e, 0x16,
- 0xce, 0x8e, 0xe4, 0x4b, 0x9c, 0xcd, 0xe0, 0x6c, 0xf1, 0x86, 0x64, 0x7f, 0x43, 0x2c, 0x28, 0xa0,
- 0xeb, 0xd2, 0xf9, 0x32, 0xac, 0x78, 0xb7, 0x43, 0xc2, 0x9c, 0x2f, 0x4b, 0x4b, 0xef, 0x8f, 0xfd,
- 0x2a, 0x79, 0x79, 0x66, 0x06, 0x33, 0xa3, 0x26, 0xe5, 0xa2, 0xc4, 0x7f, 0x99, 0x18, 0x6d, 0x16,
- 0xf2, 0xbb, 0x67, 0x26, 0x4f, 0x8b, 0x87, 0xf3, 0x8e, 0x49, 0x02, 0x27, 0x69, 0x52, 0x8e, 0x94,
- 0xef, 0x7a, 0xe1, 0xbb, 0xd1, 0xeb, 0xec, 0xe0, 0x0f, 0x71, 0x76, 0x1b, 0xf1, 0x12, 0x2c, 0xea,
- 0x9f, 0x28, 0x7b, 0x30, 0xe7, 0xc1, 0x54, 0x72, 0x8b, 0x3e, 0x52, 0x76, 0xe4, 0x0f, 0x06, 0x60,
- 0xc2, 0x5c, 0x52, 0xe8, 0x0a, 0x94, 0x04, 0x11, 0x95, 0xcc, 0x40, 0xed, 0x92, 0x35, 0x09, 0xc0,
- 0x31, 0x0e, 0xcb, 0x61, 0xc1, 0xaa, 0x6b, 0xc6, 0xba, 0x71, 0x0e, 0x0b, 0x05, 0xc1, 0x1a, 0x16,
- 0x7d, 0x58, 0x6d, 0xfa, 0x7e, 0xa4, 0x2e, 0x24, 0xb5, 0xee, 0x96, 0x58, 0x29, 0x16, 0x50, 0x7a,
- 0x11, 0xed, 0x92, 0xc0, 0x23, 0x4d, 0x33, 0xec, 0xb1, 0xba, 0x88, 0x6e, 0xe8, 0x40, 0x6c, 0xe2,
- 0xd2, 0xeb, 0xd4, 0x0f, 0xd9, 0x42, 0x16, 0xcf, 0xb7, 0xd8, 0xf8, 0xb9, 0xc6, 0xfd, 0xbf, 0x25,
- 0x1c, 0x7d, 0x12, 0x1e, 0x53, 0xa1, 0x9d, 0x30, 0xd7, 0x66, 0xc8, 0x16, 0x87, 0x0c, 0x69, 0xcb,
- 0x63, 0xcb, 0xd9, 0x68, 0x38, 0xaf, 0x3e, 0x7a, 0x0d, 0x26, 0x04, 0x8b, 0x2f, 0x29, 0x0e, 0x9b,
- 0x06, 0x36, 0x37, 0x0c, 0x28, 0x4e, 0x60, 0xcb, 0xc0, 0xcd, 0x8c, 0xcb, 0x96, 0x14, 0x46, 0xd2,
- 0x81, 0x9b, 0x75, 0x38, 0x4e, 0xd5, 0x40, 0x8b, 0x30, 0xc9, 0x79, 0x30, 0xd7, 0xdb, 0xe6, 0x73,
- 0x22, 0x5c, 0xab, 0xd4, 0x96, 0xba, 0x65, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0c, 0x63, 0x4e, 0x50,
- 0xdf, 0x71, 0x23, 0x52, 0x8f, 0x3a, 0x01, 0xf7, 0xb9, 0xd2, 0x2c, 0x94, 0x16, 0x35, 0x18, 0x36,
- 0x30, 0xed, 0xb7, 0x61, 0x26, 0x23, 0x30, 0x04, 0x5d, 0x38, 0x4e, 0xdb, 0x95, 0xdf, 0x94, 0x30,
- 0x63, 0x5e, 0xac, 0x56, 0xe4, 0xd7, 0x68, 0x58, 0x74, 0x75, 0xb2, 0x00, 0x12, 0x5a, 0x1e, 0x44,
- 0xb5, 0x3a, 0x57, 0x25, 0x00, 0xc7, 0x38, 0xf6, 0xff, 0x28, 0xc0, 0x64, 0x86, 0x6e, 0x85, 0xe5,
- 0xe2, 0x4b, 0x3c, 0x52, 0xe2, 0xd4, 0x7b, 0x66, 0x1c, 0xf0, 0xc2, 0x11, 0xe2, 0x80, 0x17, 0x7b,
- 0xc5, 0x01, 0x1f, 0x78, 0x27, 0x71, 0xc0, 0xcd, 0x11, 0x1b, 0xec, 0x6b, 0xc4, 0x32, 0x62, 0x87,
- 0x0f, 0x1d, 0x31, 0x76, 0xb8, 0x31, 0xe8, 0xc3, 0x7d, 0x0c, 0xfa, 0x0f, 0x17, 0x60, 0x2a, 0x69,
- 0x49, 0x79, 0x02, 0x72, 0xdb, 0xd7, 0x0d, 0xb9, 0xed, 0xe5, 0x7e, 0x5c, 0x61, 0x73, 0x65, 0xb8,
- 0x38, 0x21, 0xc3, 0xfd, 0x60, 0x5f, 0xd4, 0xba, 0xcb, 0x73, 0xff, 0x66, 0x01, 0x4e, 0x67, 0xfa,
- 0xe2, 0x9e, 0xc0, 0xd8, 0xdc, 0x32, 0xc6, 0xe6, 0xb9, 0xbe, 0xdd, 0x84, 0x73, 0x07, 0xe8, 0x6e,
- 0x62, 0x80, 0xae, 0xf4, 0x4f, 0xb2, 0xfb, 0x28, 0x7d, 0xb5, 0x08, 0xe7, 0x33, 0xeb, 0xc5, 0x62,
- 0xcf, 0x55, 0x43, 0xec, 0xf9, 0x7c, 0x42, 0xec, 0x69, 0x77, 0xaf, 0x7d, 0x3c, 0x72, 0x50, 0xe1,
- 0x2e, 0xcb, 0xa2, 0x1c, 0x3c, 0xa4, 0x0c, 0xd4, 0x70, 0x97, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0x9b,
- 0x49, 0xf6, 0xf9, 0x6f, 0x2c, 0x38, 0x9b, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a, 0x37, 0x65, 0x5d,
- 0x4f, 0xf7, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0xbf, 0x36, 0x90, 0xf3, 0x2d, 0xec, 0x25, 0x7f, 0x0b,
- 0x46, 0x9d, 0x7a, 0x9d, 0x84, 0xe1, 0x9a, 0xdf, 0x50, 0xa1, 0x8e, 0x9f, 0x63, 0xef, 0xac, 0xb8,
- 0xf8, 0xf0, 0x60, 0x7e, 0x2e, 0x49, 0x22, 0x06, 0x63, 0x9d, 0x02, 0xfa, 0x34, 0x8c, 0x84, 0xe2,
- 0xde, 0x14, 0x73, 0xff, 0x42, 0x9f, 0x83, 0xe3, 0x6c, 0x92, 0xa6, 0x19, 0x8b, 0x49, 0x49, 0x2a,
- 0x14, 0x49, 0x33, 0x6e, 0x4b, 0xe1, 0x58, 0xe3, 0xb6, 0x3c, 0x0f, 0xb0, 0xa7, 0x1e, 0x03, 0x49,
- 0xf9, 0x83, 0xf6, 0x4c, 0xd0, 0xb0, 0xd0, 0xc7, 0x61, 0x2a, 0xe4, 0xc1, 0x0a, 0x97, 0x9b, 0x4e,
- 0xc8, 0x9c, 0x65, 0xc4, 0x2a, 0x64, 0xf1, 0x9e, 0x6a, 0x09, 0x18, 0x4e, 0x61, 0xa3, 0x55, 0xd9,
- 0x2a, 0x8b, 0xac, 0xc8, 0x17, 0xe6, 0xa5, 0xb8, 0x45, 0x91, 0x09, 0xf8, 0x54, 0x72, 0xf8, 0xd9,
- 0xc0, 0x6b, 0x35, 0xd1, 0xa7, 0x01, 0xe8, 0xf2, 0x11, 0x72, 0x88, 0xe1, 0xfc, 0xc3, 0x93, 0x9e,
- 0x2a, 0x8d, 0x4c, 0xdb, 0x5e, 0xe6, 0xe1, 0x5a, 0x56, 0x44, 0xb0, 0x46, 0xd0, 0xfe, 0xe1, 0x01,
- 0x78, 0xbc, 0xcb, 0x19, 0x89, 0x16, 0x4d, 0x3d, 0xec, 0x33, 0xc9, 0xc7, 0xf5, 0x5c, 0x66, 0x65,
- 0xe3, 0xb5, 0x9d, 0x58, 0x8a, 0x85, 0x77, 0xbc, 0x14, 0x7f, 0xc0, 0xd2, 0xc4, 0x1e, 0xdc, 0xe2,
- 0xf3, 0x63, 0x47, 0x3c, 0xfb, 0x8f, 0x51, 0x0e, 0xb2, 0x95, 0x21, 0x4c, 0x78, 0xbe, 0xef, 0xee,
- 0xf4, 0x2d, 0x5d, 0x38, 0x59, 0x29, 0xf1, 0x6f, 0x59, 0x70, 0xae, 0x6b, 0xd0, 0x8e, 0x6f, 0x40,
- 0x86, 0xc1, 0xfe, 0x82, 0x05, 0x4f, 0x66, 0xd6, 0x30, 0xcc, 0x8c, 0xae, 0x40, 0xa9, 0x4e, 0x0b,
- 0x35, 0x2f, 0xcd, 0xd8, 0x7d, 0x5d, 0x02, 0x70, 0x8c, 0x63, 0x58, 0x13, 0x15, 0x7a, 0x5a, 0x13,
- 0xfd, 0x8a, 0x05, 0xa9, 0x4d, 0x7f, 0x02, 0xb7, 0x4f, 0xc5, 0xbc, 0x7d, 0xde, 0xdf, 0xcf, 0x68,
- 0xe6, 0x5c, 0x3c, 0x7f, 0x3c, 0x09, 0x67, 0x72, 0xbc, 0x94, 0xf6, 0x60, 0x7a, 0xbb, 0x4e, 0x4c,
- 0xff, 0xd7, 0x6e, 0x71, 0x61, 0xba, 0x3a, 0xcb, 0xb2, 0x5c, 0xa5, 0xd3, 0x29, 0x14, 0x9c, 0x6e,
- 0x02, 0x7d, 0xc1, 0x82, 0x53, 0xce, 0xbd, 0x70, 0x85, 0x72, 0x11, 0x6e, 0x7d, 0xa9, 0xe9, 0xd7,
- 0x77, 0xe9, 0x11, 0x2d, 0x37, 0xc2, 0x8b, 0x99, 0x92, 0x9d, 0xbb, 0xb5, 0x14, 0xbe, 0xd1, 0x3c,
- 0x4b, 0xde, 0x9a, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0x44, 0xf4, 0xa7, 0x6f, 0x94, 0x2e, 0x1e,
- 0xda, 0x59, 0xee, 0x64, 0xfc, 0x5a, 0x94, 0x10, 0xac, 0xe8, 0xa0, 0xcf, 0x42, 0x69, 0x5b, 0xfa,
- 0x78, 0x66, 0x5c, 0xbb, 0xf1, 0x40, 0x76, 0xf7, 0x7c, 0xe5, 0xea, 0x59, 0x85, 0x84, 0x63, 0xa2,
- 0xe8, 0x35, 0x28, 0x7a, 0x5b, 0x61, 0xb7, 0xfc, 0xa7, 0x09, 0x3b, 0x3c, 0x1e, 0x07, 0x61, 0x7d,
- 0xb5, 0x86, 0x69, 0x45, 0x74, 0x1d, 0x8a, 0xc1, 0x66, 0x43, 0x88, 0x25, 0x33, 0x37, 0x29, 0x5e,
- 0x2a, 0xe7, 0xf4, 0x8a, 0x51, 0xc2, 0x4b, 0x65, 0x4c, 0x49, 0xa0, 0x2a, 0x0c, 0x32, 0xd7, 0x1e,
- 0x71, 0xc9, 0x65, 0xb2, 0xf3, 0x5d, 0x5c, 0xe4, 0x78, 0xb0, 0x04, 0x86, 0x80, 0x39, 0x21, 0xb4,
- 0x01, 0x43, 0x75, 0x96, 0x2b, 0x53, 0x44, 0x82, 0xfb, 0x50, 0xa6, 0x00, 0xb2, 0x4b, 0x12, 0x51,
- 0x21, 0x8f, 0x63, 0x18, 0x58, 0xd0, 0x62, 0x54, 0x49, 0x7b, 0x67, 0x2b, 0x14, 0xb9, 0x9d, 0xb3,
- 0xa9, 0x76, 0xc9, 0x8d, 0x2b, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0x57, 0xa0, 0xb0, 0x55, 0x17,
- 0x6e, 0x3b, 0x99, 0x92, 0x48, 0x33, 0x94, 0xc5, 0xd2, 0xd0, 0x83, 0x83, 0xf9, 0xc2, 0xea, 0x32,
- 0x2e, 0x6c, 0xd5, 0xd1, 0x3a, 0x0c, 0x6f, 0x71, 0xe7, 0x77, 0x21, 0x6c, 0x7c, 0x2a, 0xdb, 0x2f,
- 0x3f, 0xe5, 0x1f, 0xcf, 0x3d, 0x56, 0x04, 0x00, 0x4b, 0x22, 0x2c, 0x40, 0xbe, 0x72, 0xe2, 0x17,
- 0x41, 0xd3, 0x16, 0x8e, 0x16, 0x78, 0x81, 0x33, 0x1d, 0x71, 0x28, 0x00, 0xac, 0x51, 0xa4, 0xab,
- 0xda, 0x91, 0x09, 0xf6, 0x45, 0xb0, 0x99, 0xcc, 0x55, 0xad, 0xb2, 0xf0, 0x77, 0x5b, 0xd5, 0x0a,
- 0x09, 0xc7, 0x44, 0xd1, 0x2e, 0x8c, 0xef, 0x85, 0xed, 0x1d, 0x22, 0xb7, 0x34, 0x8b, 0x3d, 0x93,
- 0x73, 0x2f, 0xdf, 0x11, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x66, 0xea, 0x14, 0x62, 0x3a, 0xfd, 0x3b,
- 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0xc3, 0xff, 0x56, 0xc7, 0xdf, 0xdc, 0x8f, 0x88, 0x88, 0x75, 0x96,
- 0x39, 0xfc, 0x6f, 0x70, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44, 0xd0, 0x1d, 0x31, 0x3c, 0xec,
- 0xf4, 0x9c, 0xca, 0x0f, 0x48, 0xba, 0x28, 0x91, 0x72, 0x06, 0x85, 0x9d, 0x96, 0x31, 0x29, 0x76,
- 0x4a, 0xb6, 0x77, 0xfc, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x74, 0xfe, 0x29, 0x59, 0xcd, 0xc0, 0x4f,
- 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0xd4, 0x80, 0x89, 0xb6, 0x1f, 0x44, 0xf7, 0xfc, 0x40,
- 0xae, 0x2f, 0xd4, 0x45, 0x58, 0x62, 0x60, 0x8a, 0x16, 0x59, 0x18, 0x41, 0x13, 0x82, 0x13, 0x34,
- 0xd1, 0x27, 0x60, 0x38, 0xac, 0x3b, 0x4d, 0x52, 0xb9, 0x35, 0x3b, 0x93, 0x7f, 0xfd, 0xd4, 0x38,
- 0x4a, 0xce, 0xea, 0xe2, 0xd1, 0xf4, 0x39, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, 0x0c, 0xb2, 0x04, 0x68,
- 0x2c, 0x30, 0x5f, 0x4e, 0x5c, 0xd5, 0x94, 0x55, 0x34, 0x3f, 0x9b, 0x58, 0x31, 0xe6, 0xd5, 0xe9,
- 0x1e, 0x10, 0x6f, 0x06, 0x3f, 0x9c, 0x3d, 0x9d, 0xbf, 0x07, 0xc4, 0x53, 0xe3, 0x56, 0xad, 0xdb,
- 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x9e, 0xe9, 0x62, 0xce, 0x93, 0x7b,
- 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0x7b, 0xc3, 0x69, 0x9e, 0x85, 0xbd, 0x32,
- 0xbf, 0xcb, 0x4a, 0x29, 0x20, 0x3f, 0xdc, 0xaf, 0xd0, 0xeb, 0x18, 0x59, 0xf0, 0x2f, 0x58, 0x70,
- 0xa6, 0x9d, 0xf9, 0x21, 0x82, 0x01, 0xe8, 0x4f, 0x76, 0xc6, 0x3f, 0x5d, 0x05, 0x71, 0xcc, 0x86,
- 0xe3, 0x9c, 0x96, 0x92, 0xcf, 0x9c, 0xe2, 0x3b, 0x7e, 0xe6, 0xac, 0xc1, 0x08, 0x63, 0x32, 0x7b,
- 0xe4, 0x8e, 0x4e, 0xbe, 0xf6, 0x18, 0x2b, 0xb1, 0x2c, 0x2a, 0x62, 0x45, 0x02, 0xfd, 0xa0, 0x05,
- 0xe7, 0x92, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xe4, 0x47, 0xfe, 0xc0, 0x5d, 0x15, 0xdf, 0x9f, 0xe2,
- 0xff, 0x0d, 0xe4, 0xc3, 0x5e, 0x08, 0xb8, 0x7b, 0x63, 0xa8, 0x9c, 0xf1, 0xc2, 0x1e, 0x32, 0xb5,
- 0x0a, 0x7d, 0xbc, 0xb2, 0x5f, 0x84, 0xb1, 0x96, 0xdf, 0xf1, 0x22, 0x61, 0xfd, 0x23, 0x2c, 0x11,
- 0x98, 0x06, 0x7e, 0x4d, 0x2b, 0xc7, 0x06, 0x56, 0xe2, 0x6d, 0x3e, 0xf2, 0xd0, 0x6f, 0xf3, 0x37,
- 0x61, 0xcc, 0xd3, 0xcc, 0x55, 0x05, 0x3f, 0x70, 0x29, 0x3f, 0x6a, 0xab, 0x6e, 0xdc, 0xca, 0x7b,
- 0xa9, 0x97, 0x60, 0x83, 0xda, 0xc9, 0x3e, 0xf8, 0xbe, 0x6c, 0x65, 0x30, 0xf5, 0x5c, 0x04, 0xf0,
- 0x51, 0x53, 0x04, 0x70, 0x29, 0x29, 0x02, 0x48, 0x49, 0x94, 0x8d, 0xd7, 0x7f, 0xff, 0x49, 0x69,
- 0xfa, 0x0d, 0x84, 0x68, 0x37, 0xe1, 0x42, 0xaf, 0x6b, 0x89, 0x99, 0x81, 0x35, 0x94, 0xfe, 0x30,
- 0x36, 0x03, 0x6b, 0x54, 0xca, 0x98, 0x41, 0xfa, 0x0d, 0xb1, 0x63, 0xff, 0x37, 0x0b, 0x8a, 0x55,
- 0xbf, 0x71, 0x02, 0x0f, 0xde, 0x8f, 0x19, 0x0f, 0xde, 0xc7, 0xb3, 0x2f, 0xc4, 0x46, 0xae, 0x3c,
- 0x7c, 0x25, 0x21, 0x0f, 0x3f, 0x97, 0x47, 0xa0, 0xbb, 0xf4, 0xfb, 0x27, 0x8b, 0x30, 0x5a, 0xf5,
- 0x1b, 0xca, 0x06, 0xfb, 0xd7, 0x1e, 0xc6, 0x06, 0x3b, 0x37, 0xb5, 0x82, 0x46, 0x99, 0x59, 0x8f,
- 0x49, 0xf7, 0xd3, 0x6f, 0x30, 0x53, 0xec, 0xbb, 0xc4, 0xdd, 0xde, 0x89, 0x48, 0x23, 0xf9, 0x39,
- 0x27, 0x67, 0x8a, 0xfd, 0x7b, 0x05, 0x98, 0x4c, 0xb4, 0x8e, 0x9a, 0x30, 0xde, 0xd4, 0xa5, 0xad,
- 0x62, 0x9d, 0x3e, 0x94, 0xa0, 0x56, 0x98, 0xb2, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0x16, 0x00, 0x94,
- 0xfa, 0x51, 0x8a, 0xf5, 0x18, 0xd7, 0xaf, 0xf4, 0x93, 0x21, 0xd6, 0x30, 0xd0, 0x4b, 0x30, 0x1a,
- 0xf9, 0x6d, 0xbf, 0xe9, 0x6f, 0xef, 0xdf, 0x20, 0x32, 0xa8, 0x93, 0x32, 0x50, 0xdb, 0x88, 0x41,
- 0x58, 0xc7, 0x43, 0xf7, 0x61, 0x5a, 0x11, 0xa9, 0x1d, 0x83, 0x04, 0x9a, 0x49, 0x15, 0xd6, 0x93,
- 0x14, 0x71, 0xba, 0x11, 0xfb, 0xa7, 0x8a, 0x7c, 0x88, 0xbd, 0xc8, 0x7d, 0x6f, 0x37, 0xbc, 0xbb,
- 0x77, 0xc3, 0x57, 0x2d, 0x98, 0xa2, 0xad, 0x33, 0xeb, 0x1b, 0x79, 0xcd, 0xab, 0x38, 0xd1, 0x56,
- 0x97, 0x38, 0xd1, 0x97, 0xe8, 0xa9, 0xd9, 0xf0, 0x3b, 0x91, 0x90, 0xdd, 0x69, 0xc7, 0x22, 0x2d,
- 0xc5, 0x02, 0x2a, 0xf0, 0x48, 0x10, 0x08, 0x8f, 0x41, 0x1d, 0x8f, 0x04, 0x01, 0x16, 0x50, 0x19,
- 0x46, 0x7a, 0x20, 0x3b, 0x8c, 0x34, 0x0f, 0x8e, 0x29, 0xec, 0x34, 0x04, 0xc3, 0xa5, 0x05, 0xc7,
- 0x94, 0x06, 0x1c, 0x31, 0x8e, 0xfd, 0xb3, 0x45, 0x18, 0xab, 0xfa, 0x8d, 0x58, 0xf5, 0xf8, 0xa2,
- 0xa1, 0x7a, 0xbc, 0x90, 0x50, 0x3d, 0x4e, 0xe9, 0xb8, 0xef, 0x29, 0x1a, 0xbf, 0x5e, 0x8a, 0xc6,
- 0x7f, 0x6a, 0xb1, 0x59, 0x2b, 0xaf, 0xd7, 0xb8, 0x31, 0x17, 0xba, 0x0a, 0xa3, 0xec, 0x80, 0x61,
- 0x2e, 0xaa, 0x52, 0x1f, 0xc7, 0xd2, 0x23, 0xad, 0xc7, 0xc5, 0x58, 0xc7, 0x41, 0x97, 0x61, 0x24,
- 0x24, 0x4e, 0x50, 0xdf, 0x51, 0xa7, 0xab, 0x50, 0x9e, 0xf1, 0x32, 0xac, 0xa0, 0xe8, 0x8d, 0x38,
- 0x2e, 0x63, 0x31, 0xdf, 0xe5, 0x4d, 0xef, 0x0f, 0xdf, 0x22, 0xf9, 0xc1, 0x18, 0xed, 0xbb, 0x80,
- 0xd2, 0xf8, 0x7d, 0x04, 0x24, 0x9b, 0x37, 0x03, 0x92, 0x95, 0x52, 0xc1, 0xc8, 0xfe, 0xdc, 0x82,
- 0x89, 0xaa, 0xdf, 0xa0, 0x5b, 0xf7, 0x9b, 0x69, 0x9f, 0xea, 0x41, 0x69, 0x87, 0xba, 0x04, 0xa5,
- 0xbd, 0x08, 0x83, 0x55, 0xbf, 0x51, 0xa9, 0x76, 0xf3, 0x37, 0xb7, 0xff, 0x96, 0x05, 0xc3, 0x55,
- 0xbf, 0x71, 0x02, 0x6a, 0x81, 0x8f, 0x9a, 0x6a, 0x81, 0xc7, 0x72, 0xd6, 0x4d, 0x8e, 0x26, 0xe0,
- 0x6f, 0x0c, 0xc0, 0x38, 0xed, 0xa7, 0xbf, 0x2d, 0xa7, 0xd2, 0x18, 0x36, 0xab, 0x8f, 0x61, 0xa3,
- 0x5c, 0xb8, 0xdf, 0x6c, 0xfa, 0xf7, 0x92, 0xd3, 0xba, 0xca, 0x4a, 0xb1, 0x80, 0xa2, 0x67, 0x61,
- 0xa4, 0x1d, 0x90, 0x3d, 0xd7, 0x17, 0xec, 0xad, 0xa6, 0x64, 0xa9, 0x8a, 0x72, 0xac, 0x30, 0xe8,
- 0xb3, 0x30, 0x74, 0x3d, 0x7a, 0x95, 0xd7, 0x7d, 0xaf, 0xc1, 0x25, 0xe7, 0x45, 0x91, 0x2a, 0x42,
- 0x2b, 0xc7, 0x06, 0x16, 0xba, 0x0b, 0x25, 0xf6, 0x9f, 0x1d, 0x3b, 0x47, 0x4f, 0x3a, 0x2a, 0x92,
- 0xd0, 0x09, 0x02, 0x38, 0xa6, 0x85, 0x9e, 0x07, 0x88, 0x64, 0xf4, 0xf1, 0x50, 0x04, 0x9f, 0x52,
- 0x4f, 0x01, 0x15, 0x97, 0x3c, 0xc4, 0x1a, 0x16, 0x7a, 0x06, 0x4a, 0x91, 0xe3, 0x36, 0x6f, 0xba,
- 0x1e, 0x09, 0x99, 0x44, 0xbc, 0x28, 0x73, 0xc1, 0x89, 0x42, 0x1c, 0xc3, 0x29, 0x2b, 0xc6, 0x22,
- 0x33, 0xf0, 0x94, 0xc5, 0x23, 0x0c, 0x9b, 0xb1, 0x62, 0x37, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x0e,
- 0x3c, 0xe1, 0x7a, 0x2c, 0xad, 0x02, 0xa9, 0xed, 0xba, 0xed, 0x8d, 0x9b, 0xb5, 0x3b, 0x24, 0x70,
- 0xb7, 0xf6, 0x97, 0x9c, 0xfa, 0x2e, 0xf1, 0x64, 0x3a, 0xc9, 0xf7, 0x8b, 0x2e, 0x3e, 0x51, 0xe9,
- 0x82, 0x8b, 0xbb, 0x52, 0xb2, 0x5f, 0x86, 0xd3, 0x55, 0xbf, 0x51, 0xf5, 0x83, 0x68, 0xd5, 0x0f,
- 0xee, 0x39, 0x41, 0x43, 0xae, 0x94, 0x79, 0x19, 0x25, 0x81, 0x1e, 0x85, 0x83, 0xfc, 0xa0, 0x30,
- 0x22, 0x20, 0xbc, 0xc0, 0x98, 0xaf, 0x23, 0xfa, 0xf6, 0xd4, 0x19, 0x1b, 0xa0, 0x72, 0x8c, 0x5c,
- 0x73, 0x22, 0x82, 0x6e, 0xb1, 0xdc, 0xc9, 0xf1, 0x8d, 0x28, 0xaa, 0x3f, 0xad, 0xe5, 0x4e, 0x8e,
- 0x81, 0x99, 0x57, 0xa8, 0x59, 0xdf, 0xfe, 0xef, 0x83, 0xec, 0x70, 0x4c, 0xe4, 0xa9, 0x40, 0x9f,
- 0x81, 0x89, 0x90, 0xdc, 0x74, 0xbd, 0xce, 0x7d, 0x29, 0x8d, 0xe8, 0xe2, 0x9d, 0x55, 0x5b, 0xd1,
- 0x31, 0xb9, 0x4c, 0xd3, 0x2c, 0xc3, 0x09, 0x6a, 0xa8, 0x05, 0x13, 0xf7, 0x5c, 0xaf, 0xe1, 0xdf,
- 0x0b, 0x25, 0xfd, 0x91, 0x7c, 0xd1, 0xe6, 0x5d, 0x8e, 0x99, 0xe8, 0xa3, 0xd1, 0xdc, 0x5d, 0x83,
- 0x18, 0x4e, 0x10, 0xa7, 0x0b, 0x30, 0xe8, 0x78, 0x8b, 0xe1, 0xed, 0x90, 0x04, 0x22, 0x0b, 0x36,
- 0x5b, 0x80, 0x58, 0x16, 0xe2, 0x18, 0x4e, 0x17, 0x20, 0xfb, 0x73, 0x2d, 0xf0, 0x3b, 0x3c, 0x47,
- 0x80, 0x58, 0x80, 0x58, 0x95, 0x62, 0x0d, 0x83, 0x6e, 0x50, 0xf6, 0x6f, 0xdd, 0xf7, 0xb0, 0xef,
- 0x47, 0x72, 0x4b, 0xb3, 0xbc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x2a, 0xa0, 0xb0, 0xd3, 0x6e,
- 0x37, 0x99, 0xd9, 0x87, 0xd3, 0x64, 0xa4, 0xb8, 0xca, 0xbd, 0xc8, 0x43, 0xa7, 0xd6, 0x52, 0x50,
- 0x9c, 0x51, 0x83, 0x9e, 0xd5, 0x5b, 0xa2, 0xab, 0x83, 0xac, 0xab, 0x5c, 0x0d, 0x52, 0xe3, 0xfd,
- 0x94, 0x30, 0xb4, 0x02, 0xc3, 0xe1, 0x7e, 0x58, 0x8f, 0x44, 0x0c, 0xb8, 0x9c, 0x54, 0x44, 0x35,
- 0x86, 0xa2, 0x65, 0xc2, 0xe3, 0x55, 0xb0, 0xac, 0x8b, 0xea, 0x30, 0x23, 0x28, 0x2e, 0xef, 0x38,
- 0x9e, 0x4a, 0xec, 0xc2, 0xad, 0x5f, 0xaf, 0x3e, 0x38, 0x98, 0x9f, 0x11, 0x2d, 0xeb, 0xe0, 0xc3,
- 0x83, 0xf9, 0x33, 0x55, 0xbf, 0x91, 0x01, 0xc1, 0x59, 0xd4, 0xf8, 0xe2, 0xab, 0xd7, 0xfd, 0x56,
- 0xbb, 0x1a, 0xf8, 0x5b, 0x6e, 0x93, 0x74, 0x53, 0x25, 0xd5, 0x0c, 0x4c, 0xb1, 0xf8, 0x8c, 0x32,
- 0x9c, 0xa0, 0x66, 0x7f, 0x3b, 0xe3, 0x67, 0x58, 0xe2, 0xe7, 0xa8, 0x13, 0x10, 0xd4, 0x82, 0xf1,
- 0x36, 0xdb, 0x26, 0x22, 0x72, 0xbf, 0x58, 0xeb, 0x2f, 0xf6, 0x29, 0x12, 0xb9, 0x47, 0xaf, 0x01,
- 0x25, 0xb2, 0x64, 0x6f, 0xcd, 0xaa, 0x4e, 0x0e, 0x9b, 0xd4, 0xed, 0x1f, 0x7b, 0x8c, 0xdd, 0x88,
- 0x35, 0x2e, 0xe7, 0x18, 0x16, 0xc6, 0xf6, 0xe2, 0x69, 0x35, 0x97, 0x2f, 0x70, 0x8b, 0xa7, 0x45,
- 0x18, 0xec, 0x63, 0x59, 0x17, 0x7d, 0x1a, 0x26, 0xe8, 0x4b, 0x45, 0xcb, 0xa8, 0x72, 0x2a, 0x3f,
- 0x28, 0x42, 0x9c, 0x48, 0x45, 0xcb, 0xea, 0xa1, 0x57, 0xc6, 0x09, 0x62, 0xe8, 0x0d, 0x66, 0x16,
- 0x62, 0x26, 0x6b, 0xe9, 0x41, 0x5a, 0xb7, 0x00, 0x91, 0x64, 0x35, 0x22, 0x79, 0x89, 0x60, 0xec,
- 0x47, 0x9b, 0x08, 0x06, 0xdd, 0x84, 0x71, 0x91, 0xfd, 0x58, 0xac, 0xdc, 0xa2, 0x21, 0x07, 0x1c,
- 0xc7, 0x3a, 0xf0, 0x30, 0x59, 0x80, 0xcd, 0xca, 0x68, 0x1b, 0xce, 0x69, 0xd9, 0x88, 0xae, 0x05,
- 0x0e, 0x53, 0xe6, 0xbb, 0xec, 0x38, 0xd5, 0xee, 0xea, 0x27, 0x1f, 0x1c, 0xcc, 0x9f, 0xdb, 0xe8,
- 0x86, 0x88, 0xbb, 0xd3, 0x41, 0xb7, 0xe0, 0x34, 0x77, 0xe9, 0x2d, 0x13, 0xa7, 0xd1, 0x74, 0x3d,
- 0xc5, 0x0c, 0xf0, 0x2d, 0x7f, 0xf6, 0xc1, 0xc1, 0xfc, 0xe9, 0xc5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f,
- 0x7d, 0x14, 0x4a, 0x0d, 0x2f, 0x14, 0x63, 0x30, 0x64, 0x24, 0x7c, 0x2a, 0x95, 0xd7, 0x6b, 0xea,
- 0xfb, 0xe3, 0x3f, 0x38, 0xae, 0x80, 0xb6, 0xb9, 0xac, 0x58, 0x49, 0x30, 0x86, 0x53, 0x21, 0x8d,
- 0x92, 0x42, 0x3e, 0xc3, 0xa9, 0x8f, 0x2b, 0x49, 0x94, 0xad, 0xbb, 0xe1, 0xef, 0x67, 0x10, 0x46,
- 0xaf, 0x03, 0xa2, 0x2f, 0x08, 0xb7, 0x4e, 0x16, 0xeb, 0x2c, 0x2d, 0x04, 0x13, 0xad, 0x8f, 0x98,
- 0x6e, 0x66, 0xb5, 0x14, 0x06, 0xce, 0xa8, 0x85, 0xae, 0xd3, 0x53, 0x45, 0x2f, 0x15, 0xa7, 0x96,
- 0x4a, 0xcf, 0x57, 0x26, 0xed, 0x80, 0xd4, 0x9d, 0x88, 0x34, 0x4c, 0x8a, 0x38, 0x51, 0x0f, 0x35,
- 0xe0, 0x09, 0xa7, 0x13, 0xf9, 0x4c, 0x0c, 0x6f, 0xa2, 0x6e, 0xf8, 0xbb, 0xc4, 0x63, 0x1a, 0xb0,
- 0x91, 0xa5, 0x0b, 0x94, 0xdb, 0x58, 0xec, 0x82, 0x87, 0xbb, 0x52, 0xa1, 0x5c, 0xa2, 0xca, 0xc7,
- 0x0b, 0x66, 0xa0, 0xa6, 0x8c, 0x9c, 0xbc, 0x2f, 0xc1, 0xe8, 0x8e, 0x1f, 0x46, 0xeb, 0x24, 0xba,
- 0xe7, 0x07, 0xbb, 0x22, 0xde, 0x66, 0x1c, 0xa3, 0x39, 0x06, 0x61, 0x1d, 0x8f, 0x3e, 0x03, 0x99,
- 0x7d, 0x46, 0xa5, 0xcc, 0x54, 0xe3, 0x23, 0xf1, 0x19, 0x73, 0x9d, 0x17, 0x63, 0x09, 0x97, 0xa8,
- 0x95, 0xea, 0x32, 0x53, 0x73, 0x27, 0x50, 0x2b, 0xd5, 0x65, 0x2c, 0xe1, 0x74, 0xb9, 0x86, 0x3b,
- 0x4e, 0x40, 0xaa, 0x81, 0x5f, 0x27, 0xa1, 0x16, 0x19, 0xfc, 0x71, 0x1e, 0x4d, 0x94, 0x2e, 0xd7,
- 0x5a, 0x16, 0x02, 0xce, 0xae, 0x87, 0x48, 0x3a, 0x13, 0xd7, 0x44, 0xbe, 0x7e, 0x22, 0xcd, 0xcf,
- 0xf4, 0x99, 0x8c, 0xcb, 0x83, 0x29, 0x95, 0x03, 0x8c, 0xc7, 0x0f, 0x0d, 0x67, 0x27, 0xd9, 0xda,
- 0xee, 0x3f, 0xf8, 0xa8, 0xd2, 0xf8, 0x54, 0x12, 0x94, 0x70, 0x8a, 0xb6, 0x11, 0x8b, 0x6b, 0xaa,
- 0x67, 0x2c, 0xae, 0x2b, 0x50, 0x0a, 0x3b, 0x9b, 0x0d, 0xbf, 0xe5, 0xb8, 0x1e, 0x53, 0x73, 0x6b,
- 0xef, 0x91, 0x9a, 0x04, 0xe0, 0x18, 0x07, 0xad, 0xc2, 0x88, 0x23, 0xd5, 0x39, 0x28, 0x3f, 0xfa,
- 0x8a, 0x52, 0xe2, 0xf0, 0x80, 0x04, 0x52, 0x81, 0xa3, 0xea, 0xa2, 0x57, 0x61, 0x5c, 0xb8, 0xa4,
- 0x8a, 0xf4, 0x93, 0x33, 0xa6, 0xdf, 0x50, 0x4d, 0x07, 0x62, 0x13, 0x17, 0xdd, 0x86, 0xd1, 0xc8,
- 0x6f, 0x32, 0xe7, 0x17, 0xca, 0xe6, 0x9d, 0xc9, 0x8f, 0x23, 0xb6, 0xa1, 0xd0, 0x74, 0x49, 0xaa,
- 0xaa, 0x8a, 0x75, 0x3a, 0x68, 0x83, 0xaf, 0x77, 0x16, 0x21, 0x9b, 0x84, 0xb3, 0x8f, 0xe5, 0xdf,
- 0x49, 0x2a, 0x90, 0xb6, 0xb9, 0x1d, 0x44, 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x60, 0xba, 0x1d, 0xb8,
- 0x3e, 0x5b, 0x13, 0x4a, 0x93, 0x37, 0x6b, 0xa6, 0xe7, 0xa9, 0x26, 0x11, 0x70, 0xba, 0x0e, 0xf3,
- 0x28, 0x16, 0x85, 0xb3, 0x67, 0x79, 0x86, 0x6a, 0xfe, 0xbc, 0xe3, 0x65, 0x58, 0x41, 0xd1, 0x1a,
- 0x3b, 0x89, 0xb9, 0x64, 0x62, 0x76, 0x2e, 0x3f, 0xe0, 0x8b, 0x2e, 0xc1, 0xe0, 0xcc, 0xab, 0xfa,
- 0x8b, 0x63, 0x0a, 0xa8, 0xa1, 0xa5, 0x32, 0xa4, 0x2f, 0x86, 0x70, 0xf6, 0x89, 0x2e, 0x46, 0x72,
- 0x89, 0xe7, 0x45, 0xcc, 0x10, 0x18, 0xc5, 0x21, 0x4e, 0xd0, 0x44, 0x1f, 0x87, 0x29, 0x11, 0xa6,
- 0x2e, 0x1e, 0xa6, 0x73, 0xb1, 0x49, 0x31, 0x4e, 0xc0, 0x70, 0x0a, 0x9b, 0x67, 0x0e, 0x70, 0x36,
- 0x9b, 0x44, 0x1c, 0x7d, 0x37, 0x5d, 0x6f, 0x37, 0x9c, 0x3d, 0xcf, 0xce, 0x07, 0x91, 0x39, 0x20,
- 0x09, 0xc5, 0x19, 0x35, 0xd0, 0x06, 0x4c, 0xb5, 0x03, 0x42, 0x5a, 0x8c, 0xd1, 0x17, 0xf7, 0xd9,
- 0x3c, 0x77, 0xa8, 0xa7, 0x3d, 0xa9, 0x26, 0x60, 0x87, 0x19, 0x65, 0x38, 0x45, 0x01, 0xdd, 0x83,
- 0x11, 0x7f, 0x8f, 0x04, 0x3b, 0xc4, 0x69, 0xcc, 0x5e, 0xe8, 0x62, 0xe2, 0x2e, 0x2e, 0xb7, 0x5b,
- 0x02, 0x37, 0xa1, 0xfd, 0x97, 0xc5, 0xbd, 0xb5, 0xff, 0xb2, 0x31, 0xf4, 0x43, 0x16, 0x9c, 0x95,
- 0x0a, 0x83, 0x5a, 0x9b, 0x8e, 0xfa, 0xb2, 0xef, 0x85, 0x51, 0xc0, 0x5d, 0xc0, 0x9f, 0xcc, 0x77,
- 0x8b, 0xde, 0xc8, 0xa9, 0xa4, 0x84, 0xa3, 0x67, 0xf3, 0x30, 0x42, 0x9c, 0xdf, 0x22, 0x5a, 0x86,
- 0xe9, 0x90, 0x44, 0xf2, 0x30, 0x5a, 0x0c, 0x57, 0xdf, 0x28, 0xaf, 0xcf, 0x5e, 0xe4, 0xfe, 0xeb,
- 0x74, 0x33, 0xd4, 0x92, 0x40, 0x9c, 0xc6, 0x9f, 0xfb, 0x56, 0x98, 0x4e, 0x5d, 0xff, 0x47, 0xc9,
- 0x88, 0x32, 0xb7, 0x0b, 0xe3, 0xc6, 0x10, 0x3f, 0x52, 0xed, 0xf1, 0xbf, 0x1a, 0x86, 0x92, 0xd2,
- 0x2c, 0xa2, 0x2b, 0xa6, 0xc2, 0xf8, 0x6c, 0x52, 0x61, 0x3c, 0x42, 0xdf, 0xf5, 0xba, 0x8e, 0x78,
- 0x23, 0x23, 0x6a, 0x57, 0xde, 0x86, 0xee, 0xdf, 0x1d, 0x5b, 0x13, 0xd7, 0x16, 0xfb, 0xd6, 0x3c,
- 0x0f, 0x74, 0x95, 0x00, 0x5f, 0x83, 0x69, 0xcf, 0x67, 0x3c, 0x27, 0x69, 0x48, 0x86, 0x82, 0xf1,
- 0x0d, 0x25, 0x3d, 0x0c, 0x46, 0x02, 0x01, 0xa7, 0xeb, 0xd0, 0x06, 0xf9, 0xc5, 0x9f, 0x14, 0x39,
- 0x73, 0xbe, 0x00, 0x0b, 0x28, 0xba, 0x08, 0x83, 0x6d, 0xbf, 0x51, 0xa9, 0x0a, 0x7e, 0x53, 0x8b,
- 0x15, 0xd9, 0xa8, 0x54, 0x31, 0x87, 0xa1, 0x45, 0x18, 0x62, 0x3f, 0xc2, 0xd9, 0xb1, 0xfc, 0x78,
- 0x07, 0xac, 0x86, 0x96, 0x6f, 0x86, 0x55, 0xc0, 0xa2, 0x22, 0x13, 0x7d, 0x51, 0x26, 0x9d, 0x89,
- 0xbe, 0x86, 0x1f, 0x52, 0xf4, 0x25, 0x09, 0xe0, 0x98, 0x16, 0xba, 0x0f, 0xa7, 0x8d, 0x87, 0x11,
- 0x5f, 0x22, 0x24, 0x14, 0x3e, 0xd7, 0x17, 0xbb, 0xbe, 0x88, 0x84, 0xa6, 0xfa, 0x9c, 0xe8, 0xf4,
- 0xe9, 0x4a, 0x16, 0x25, 0x9c, 0xdd, 0x00, 0x6a, 0xc2, 0x74, 0x3d, 0xd5, 0xea, 0x48, 0xff, 0xad,
- 0xaa, 0x09, 0x4d, 0xb7, 0x98, 0x26, 0x8c, 0x5e, 0x85, 0x91, 0xb7, 0xfc, 0x90, 0x9d, 0xd5, 0x82,
- 0x47, 0x96, 0x0e, 0xbb, 0x23, 0x6f, 0xdc, 0xaa, 0xb1, 0xf2, 0xc3, 0x83, 0xf9, 0xd1, 0xaa, 0xdf,
- 0x90, 0x7f, 0xb1, 0xaa, 0x80, 0xbe, 0xd7, 0x82, 0xb9, 0xf4, 0xcb, 0x4b, 0x75, 0x7a, 0xbc, 0xff,
- 0x4e, 0xdb, 0xa2, 0xd1, 0xb9, 0x95, 0x5c, 0x72, 0xb8, 0x4b, 0x53, 0xf6, 0x2f, 0x59, 0x4c, 0xea,
- 0x26, 0x34, 0x40, 0x24, 0xec, 0x34, 0x4f, 0x22, 0xcd, 0xe6, 0x8a, 0xa1, 0x9c, 0x7a, 0x68, 0xcb,
- 0x85, 0x7f, 0x6e, 0x31, 0xcb, 0x85, 0x13, 0x74, 0x51, 0x78, 0x03, 0x46, 0x22, 0x99, 0xfe, 0xb4,
- 0x4b, 0x66, 0x50, 0xad, 0x53, 0xcc, 0x7a, 0x43, 0x71, 0xac, 0x2a, 0xd3, 0xa9, 0x22, 0x63, 0xff,
- 0x23, 0x3e, 0x03, 0x12, 0x72, 0x02, 0x3a, 0x80, 0xb2, 0xa9, 0x03, 0x98, 0xef, 0xf1, 0x05, 0x39,
- 0xba, 0x80, 0x7f, 0x68, 0xf6, 0x9b, 0x49, 0x6a, 0xde, 0xed, 0x26, 0x33, 0xf6, 0x17, 0x2d, 0x80,
- 0x38, 0x14, 0x2f, 0x93, 0x2f, 0xfb, 0x81, 0xcc, 0xb1, 0x98, 0x95, 0x4d, 0xe8, 0x65, 0xca, 0xa3,
- 0xfa, 0x91, 0x5f, 0xf7, 0x9b, 0x42, 0xc3, 0xf5, 0x44, 0xac, 0x86, 0xe0, 0xe5, 0x87, 0xda, 0x6f,
- 0xac, 0xb0, 0xd1, 0xbc, 0x0c, 0xfc, 0x55, 0x8c, 0x15, 0x63, 0x46, 0xd0, 0xaf, 0x1f, 0xb1, 0xe0,
- 0x54, 0x96, 0xbd, 0x2b, 0x7d, 0xf1, 0x70, 0x99, 0x95, 0x32, 0x67, 0x52, 0xb3, 0x79, 0x47, 0x94,
- 0x63, 0x85, 0xd1, 0x77, 0xe6, 0xb0, 0xa3, 0xc5, 0xc0, 0xbd, 0x05, 0xe3, 0xd5, 0x80, 0x68, 0x97,
- 0xeb, 0x6b, 0xdc, 0x99, 0x9c, 0xf7, 0xe7, 0xd9, 0x23, 0x3b, 0x92, 0xdb, 0x3f, 0x5d, 0x80, 0x53,
- 0xdc, 0x2a, 0x60, 0x71, 0xcf, 0x77, 0x1b, 0x55, 0xbf, 0x21, 0xb2, 0xbe, 0x7d, 0x0a, 0xc6, 0xda,
- 0x9a, 0xa0, 0xb1, 0x5b, 0x3c, 0x47, 0x5d, 0x20, 0x19, 0x8b, 0x46, 0xf4, 0x52, 0x6c, 0xd0, 0x42,
- 0x0d, 0x18, 0x23, 0x7b, 0x6e, 0x5d, 0xa9, 0x96, 0x0b, 0x47, 0xbe, 0xe8, 0x54, 0x2b, 0x2b, 0x1a,
- 0x1d, 0x6c, 0x50, 0x7d, 0x04, 0xf9, 0x7c, 0xed, 0x1f, 0xb5, 0xe0, 0xb1, 0x9c, 0xe8, 0x8f, 0xb4,
- 0xb9, 0x7b, 0xcc, 0xfe, 0x42, 0x2c, 0x5b, 0xd5, 0x1c, 0xb7, 0xca, 0xc0, 0x02, 0x8a, 0x3e, 0x01,
- 0xc0, 0xad, 0x2a, 0xe8, 0x93, 0xbb, 0x57, 0x98, 0x3c, 0x23, 0xc2, 0x97, 0x16, 0xac, 0x49, 0xd6,
- 0xc7, 0x1a, 0x2d, 0xfb, 0x4b, 0x03, 0x30, 0xc8, 0x93, 0xad, 0xaf, 0xc2, 0xf0, 0x0e, 0xcf, 0x85,
- 0xd1, 0x4f, 0xda, 0x8d, 0x58, 0x18, 0xc2, 0x0b, 0xb0, 0xac, 0x8c, 0xd6, 0x60, 0x86, 0xe7, 0x12,
- 0x69, 0x96, 0x49, 0xd3, 0xd9, 0x97, 0x92, 0x3b, 0x9e, 0x87, 0x53, 0x49, 0x30, 0x2b, 0x69, 0x14,
- 0x9c, 0x55, 0x0f, 0xbd, 0x06, 0x13, 0xf4, 0x25, 0xe5, 0x77, 0x22, 0x49, 0x89, 0x67, 0x11, 0x51,
- 0x4f, 0xb7, 0x0d, 0x03, 0x8a, 0x13, 0xd8, 0xf4, 0x31, 0xdf, 0x4e, 0xc9, 0x28, 0x07, 0xe3, 0xc7,
- 0xbc, 0x29, 0x97, 0x34, 0x71, 0x99, 0xa1, 0x6b, 0x87, 0x99, 0xf5, 0x6e, 0xec, 0x04, 0x24, 0xdc,
- 0xf1, 0x9b, 0x0d, 0xc6, 0xf4, 0x0d, 0x6a, 0x86, 0xae, 0x09, 0x38, 0x4e, 0xd5, 0xa0, 0x54, 0xb6,
- 0x1c, 0xb7, 0xd9, 0x09, 0x48, 0x4c, 0x65, 0xc8, 0xa4, 0xb2, 0x9a, 0x80, 0xe3, 0x54, 0x8d, 0xde,
- 0xc2, 0xd7, 0xe1, 0xe3, 0x11, 0xbe, 0xd2, 0x05, 0x7b, 0xba, 0x1a, 0xf8, 0xf4, 0xc4, 0x96, 0xb1,
- 0x73, 0x94, 0x99, 0xf4, 0xb0, 0x74, 0xf3, 0xed, 0x12, 0x65, 0x4e, 0x18, 0x92, 0x72, 0x0a, 0x86,
- 0xa5, 0x42, 0x4d, 0x38, 0xf8, 0x4a, 0x2a, 0xe8, 0x2a, 0x8c, 0x8a, 0x54, 0x14, 0xcc, 0x9a, 0x97,
- 0xaf, 0x11, 0x66, 0x59, 0x51, 0x8e, 0x8b, 0xb1, 0x8e, 0x63, 0x7f, 0x5f, 0x01, 0x66, 0x32, 0xdc,
- 0x31, 0xf8, 0x99, 0xb8, 0xed, 0x86, 0x91, 0x4a, 0x6a, 0xa8, 0x9d, 0x89, 0xbc, 0x1c, 0x2b, 0x0c,
- 0xba, 0xf1, 0xf8, 0xa9, 0x9b, 0x3c, 0x69, 0x85, 0xb9, 0xb3, 0x80, 0x1e, 0x31, 0x3d, 0xe0, 0x05,
- 0x18, 0xe8, 0x84, 0x44, 0xc6, 0x87, 0x54, 0x77, 0x10, 0x53, 0xb8, 0x31, 0x08, 0x7d, 0x13, 0x6c,
- 0x2b, 0xdd, 0x95, 0xf6, 0x26, 0xe0, 0xda, 0x2b, 0x0e, 0xa3, 0x9d, 0x8b, 0x88, 0xe7, 0x78, 0x91,
- 0x78, 0x39, 0xc4, 0x81, 0xce, 0x58, 0x29, 0x16, 0x50, 0xfb, 0x4b, 0x45, 0x38, 0x9b, 0xeb, 0xa0,
- 0x45, 0xbb, 0xde, 0xf2, 0x3d, 0x37, 0xf2, 0x95, 0xc9, 0x0a, 0x0f, 0x6e, 0x46, 0xda, 0x3b, 0x6b,
- 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x09, 0x06, 0x99, 0xb8, 0x2e, 0x95, 0xde, 0x71, 0xa9, 0xcc, 0xa3,
- 0xdd, 0x70, 0x70, 0xdf, 0xa9, 0x73, 0x2f, 0xd2, 0xeb, 0xd8, 0x6f, 0x26, 0x4f, 0x47, 0xda, 0x5d,
- 0xdf, 0x6f, 0x62, 0x06, 0x44, 0x1f, 0x10, 0xe3, 0x95, 0xb0, 0xd1, 0xc0, 0x4e, 0xc3, 0x0f, 0xb5,
- 0x41, 0x7b, 0x1a, 0x86, 0x77, 0xc9, 0x7e, 0xe0, 0x7a, 0xdb, 0x49, 0xdb, 0x9d, 0x1b, 0xbc, 0x18,
- 0x4b, 0xb8, 0x99, 0xa9, 0x6b, 0xf8, 0xb8, 0x73, 0xde, 0x8e, 0xf4, 0xbc, 0x6b, 0x7f, 0xa0, 0x08,
- 0x93, 0x78, 0xa9, 0xfc, 0xde, 0x44, 0xdc, 0x4e, 0x4f, 0xc4, 0x71, 0xe7, 0xbc, 0xed, 0x3d, 0x1b,
- 0x3f, 0x6f, 0xc1, 0x24, 0x4b, 0x88, 0x21, 0xc2, 0x62, 0xb9, 0xbe, 0x77, 0x02, 0x7c, 0xed, 0x45,
- 0x18, 0x0c, 0x68, 0xa3, 0xc9, 0xbc, 0x8e, 0xac, 0x27, 0x98, 0xc3, 0xd0, 0x13, 0x30, 0xc0, 0xba,
- 0x40, 0x27, 0x6f, 0x8c, 0xa7, 0xc4, 0x2a, 0x3b, 0x91, 0x83, 0x59, 0x29, 0x8b, 0xf5, 0x82, 0x49,
- 0xbb, 0xe9, 0xf2, 0x4e, 0xc7, 0xca, 0xd4, 0x77, 0x87, 0xeb, 0x76, 0x66, 0xd7, 0xde, 0x59, 0xac,
- 0x97, 0x6c, 0x92, 0xdd, 0xdf, 0x8c, 0x7f, 0x54, 0x80, 0xf3, 0x99, 0xf5, 0xfa, 0x8e, 0xf5, 0xd2,
- 0xbd, 0xf6, 0xa3, 0x4c, 0x79, 0x50, 0x3c, 0x41, 0xcb, 0xc8, 0x81, 0x7e, 0x59, 0xd9, 0xc1, 0x3e,
- 0x42, 0xb0, 0x64, 0x0e, 0xd9, 0xbb, 0x24, 0x04, 0x4b, 0x66, 0xdf, 0x72, 0xde, 0xbc, 0x7f, 0x51,
- 0xc8, 0xf9, 0x16, 0xf6, 0xfa, 0xbd, 0x4c, 0xcf, 0x19, 0x06, 0x0c, 0xe5, 0x8b, 0x92, 0x9f, 0x31,
- 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x84, 0xc9, 0x96, 0xeb, 0xd1, 0xc3, 0x67, 0xdf, 0xe4, 0x30, 0x55,
- 0x84, 0xac, 0x35, 0x13, 0x8c, 0x93, 0xf8, 0xc8, 0xd5, 0xc2, 0xb3, 0x14, 0xf2, 0x33, 0xa5, 0xe7,
- 0xf6, 0x76, 0xc1, 0x54, 0x34, 0xab, 0x51, 0xcc, 0x08, 0xd5, 0xb2, 0xa6, 0x09, 0x3d, 0x8a, 0xfd,
- 0x0b, 0x3d, 0xc6, 0xb2, 0x05, 0x1e, 0x73, 0xaf, 0xc2, 0xf8, 0x43, 0x4b, 0xb9, 0xed, 0xaf, 0x16,
- 0xe1, 0xf1, 0x2e, 0xdb, 0x9e, 0x9f, 0xf5, 0xc6, 0x1c, 0x68, 0x67, 0x7d, 0x6a, 0x1e, 0xaa, 0x70,
- 0x6a, 0xab, 0xd3, 0x6c, 0xee, 0x33, 0x87, 0x01, 0xd2, 0x90, 0x18, 0x82, 0xa7, 0x94, 0x2f, 0xfd,
- 0x53, 0xab, 0x19, 0x38, 0x38, 0xb3, 0x26, 0x7d, 0x39, 0xd0, 0x9b, 0x64, 0x5f, 0x91, 0x4a, 0xbc,
- 0x1c, 0xb0, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x06, 0xd3, 0xce, 0x9e, 0xe3, 0xf2, 0x18, 0xb7, 0x92,
- 0x00, 0x7f, 0x3a, 0x28, 0xe1, 0xe4, 0x62, 0x12, 0x01, 0xa7, 0xeb, 0xa0, 0xd7, 0x01, 0xf9, 0x9b,
- 0xcc, 0xac, 0xb8, 0x71, 0x8d, 0x78, 0x42, 0x1f, 0xc8, 0xe6, 0xae, 0x18, 0x1f, 0x09, 0xb7, 0x52,
- 0x18, 0x38, 0xa3, 0x56, 0x22, 0xdc, 0xc9, 0x50, 0x7e, 0xb8, 0x93, 0xee, 0xe7, 0x62, 0xcf, 0x6c,
- 0x1b, 0xff, 0xc9, 0xa2, 0xd7, 0x17, 0x67, 0xf2, 0xcd, 0xa8, 0x7d, 0xaf, 0x32, 0x7b, 0x3e, 0x2e,
- 0xb8, 0xd4, 0x82, 0x74, 0x9c, 0xd6, 0xec, 0xf9, 0x62, 0x20, 0x36, 0x71, 0xf9, 0x82, 0x08, 0x63,
- 0xdf, 0x50, 0x83, 0xc5, 0x17, 0xa1, 0x85, 0x14, 0x06, 0xfa, 0x24, 0x0c, 0x37, 0xdc, 0x3d, 0x37,
- 0x14, 0x62, 0x9b, 0x23, 0xeb, 0x48, 0xe2, 0x73, 0xb0, 0xcc, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81,
- 0x02, 0x8c, 0xcb, 0x16, 0xdf, 0xe8, 0xf8, 0x91, 0x73, 0x02, 0xd7, 0xf2, 0x35, 0xe3, 0x5a, 0xfe,
- 0x40, 0xb7, 0xf8, 0x4a, 0xac, 0x4b, 0xb9, 0xd7, 0xf1, 0xad, 0xc4, 0x75, 0xfc, 0x54, 0x6f, 0x52,
- 0xdd, 0xaf, 0xe1, 0x7f, 0x6c, 0xc1, 0xb4, 0x81, 0x7f, 0x02, 0xb7, 0xc1, 0xaa, 0x79, 0x1b, 0x3c,
- 0xd9, 0xf3, 0x1b, 0x72, 0x6e, 0x81, 0xef, 0x2e, 0x26, 0xfa, 0xce, 0x4e, 0xff, 0xb7, 0x60, 0x60,
- 0xc7, 0x09, 0x1a, 0xdd, 0xe2, 0xc9, 0xa7, 0x2a, 0x2d, 0x5c, 0x77, 0x02, 0xa1, 0x10, 0x7d, 0x56,
- 0x25, 0x2a, 0x77, 0x82, 0xde, 0xca, 0x50, 0xd6, 0x14, 0x7a, 0x19, 0x86, 0xc2, 0xba, 0xdf, 0x56,
- 0xee, 0x02, 0x17, 0x78, 0x12, 0x73, 0x5a, 0x72, 0x78, 0x30, 0x8f, 0xcc, 0xe6, 0x68, 0x31, 0x16,
- 0xf8, 0xe8, 0x53, 0x30, 0xce, 0x7e, 0x29, 0xeb, 0xa4, 0x62, 0x7e, 0xee, 0xa9, 0x9a, 0x8e, 0xc8,
- 0x4d, 0xf7, 0x8c, 0x22, 0x6c, 0x92, 0x9a, 0xdb, 0x86, 0x92, 0xfa, 0xac, 0x47, 0xaa, 0x84, 0xfc,
- 0xf7, 0x45, 0x98, 0xc9, 0x58, 0x73, 0x28, 0x34, 0x66, 0xe2, 0x6a, 0x9f, 0x4b, 0xf5, 0x1d, 0xce,
- 0x45, 0xc8, 0x5e, 0x43, 0x0d, 0xb1, 0xb6, 0xfa, 0x6e, 0xf4, 0x76, 0x48, 0x92, 0x8d, 0xd2, 0xa2,
- 0xde, 0x8d, 0xd2, 0xc6, 0x4e, 0x6c, 0xa8, 0x69, 0x43, 0xaa, 0xa7, 0x8f, 0x74, 0x4e, 0xff, 0xb4,
- 0x08, 0xa7, 0xb2, 0x42, 0xbe, 0xa1, 0xcf, 0x27, 0xb2, 0x19, 0xbe, 0xd8, 0x6f, 0xb0, 0x38, 0x9e,
- 0xe2, 0x90, 0x0b, 0x9b, 0x97, 0x16, 0xcc, 0xfc, 0x86, 0x3d, 0x87, 0x59, 0xb4, 0xc9, 0xe2, 0x1e,
- 0x04, 0x3c, 0x0b, 0xa5, 0x3c, 0x3e, 0x3e, 0xdc, 0x77, 0x07, 0x44, 0xfa, 0xca, 0x30, 0x61, 0xf9,
- 0x20, 0x8b, 0x7b, 0x5b, 0x3e, 0xc8, 0x96, 0xe7, 0x5c, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8,
- 0x2e, 0xbd, 0xad, 0xb4, 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xc6, 0xf0, 0x4a, 0x2c,
- 0x66, 0xe5, 0x8a, 0xc5, 0x2e, 0xc0, 0x40, 0xe0, 0x37, 0x49, 0x32, 0xed, 0x1f, 0xf6, 0x9b, 0x04,
- 0x33, 0x08, 0xc5, 0x88, 0x62, 0x61, 0xc7, 0x98, 0xfe, 0x90, 0x13, 0x4f, 0xb4, 0x8b, 0x30, 0xd8,
- 0x24, 0x7b, 0xa4, 0x99, 0xcc, 0xce, 0x72, 0x93, 0x16, 0x62, 0x0e, 0xb3, 0x7f, 0x7e, 0x00, 0xce,
- 0x75, 0x8d, 0x1c, 0x42, 0x9f, 0x43, 0xdb, 0x4e, 0x44, 0xee, 0x39, 0xfb, 0xc9, 0x34, 0x0a, 0xd7,
- 0x78, 0x31, 0x96, 0x70, 0xe6, 0xae, 0xc4, 0xa3, 0x21, 0x27, 0x84, 0x88, 0x22, 0x08, 0xb2, 0x80,
- 0x9a, 0x42, 0xa9, 0xe2, 0x71, 0x08, 0xa5, 0x9e, 0x07, 0x08, 0xc3, 0x26, 0x37, 0x19, 0x6a, 0x08,
- 0x3f, 0xa8, 0x38, 0x6a, 0x76, 0xed, 0xa6, 0x80, 0x60, 0x0d, 0x0b, 0x95, 0x61, 0xaa, 0x1d, 0xf8,
- 0x11, 0x97, 0xc9, 0x96, 0xb9, 0x55, 0xdd, 0xa0, 0x19, 0xb4, 0xa1, 0x9a, 0x80, 0xe3, 0x54, 0x0d,
- 0xf4, 0x12, 0x8c, 0x8a, 0x40, 0x0e, 0x55, 0xdf, 0x6f, 0x0a, 0x31, 0x90, 0x32, 0x34, 0xab, 0xc5,
- 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x98, 0xa0, 0x77, 0x38, 0xb3, 0x1a, 0x17, 0xf6, 0x6a, 0x78, 0x89,
- 0xf0, 0x8f, 0x23, 0x7d, 0x85, 0x7f, 0x8c, 0x05, 0x63, 0xa5, 0xbe, 0x95, 0x68, 0xd0, 0x53, 0x94,
- 0xf4, 0x33, 0x03, 0x30, 0x23, 0x16, 0xce, 0xa3, 0x5e, 0x2e, 0xb7, 0xd3, 0xcb, 0xe5, 0x38, 0x44,
- 0x67, 0xef, 0xad, 0x99, 0x93, 0x5e, 0x33, 0x3f, 0x68, 0x81, 0xc9, 0x5e, 0xa1, 0xff, 0x27, 0x37,
- 0x0f, 0xcd, 0x4b, 0xb9, 0xec, 0x5a, 0x43, 0x5e, 0x20, 0xef, 0x30, 0x23, 0x8d, 0xfd, 0x1f, 0x2d,
- 0x78, 0xb2, 0x27, 0x45, 0xb4, 0x02, 0x25, 0xc6, 0x03, 0x6a, 0xaf, 0xb3, 0xa7, 0x94, 0xd5, 0xad,
- 0x04, 0xe4, 0xb0, 0xa4, 0x71, 0x4d, 0xb4, 0x92, 0x4a, 0xf8, 0xf3, 0x74, 0x46, 0xc2, 0x9f, 0xd3,
- 0xc6, 0xf0, 0x3c, 0x64, 0xc6, 0x9f, 0xef, 0xa7, 0x37, 0x8e, 0xe1, 0xf1, 0x82, 0x3e, 0x6c, 0x88,
- 0xfd, 0xec, 0x84, 0xd8, 0x0f, 0x99, 0xd8, 0xda, 0x1d, 0xf2, 0x71, 0x98, 0x62, 0x11, 0x9e, 0x98,
- 0x0d, 0xb8, 0xf0, 0xc5, 0x29, 0xc4, 0x76, 0x9e, 0x37, 0x13, 0x30, 0x9c, 0xc2, 0xb6, 0xff, 0xb0,
- 0x08, 0x43, 0x7c, 0xfb, 0x9d, 0xc0, 0x9b, 0xf0, 0x19, 0x28, 0xb9, 0xad, 0x56, 0x87, 0xe7, 0x70,
- 0x19, 0xe4, 0x0e, 0xb8, 0x74, 0x9e, 0x2a, 0xb2, 0x10, 0xc7, 0x70, 0xb4, 0x2a, 0x24, 0xce, 0x5d,
- 0x82, 0x48, 0xf2, 0x8e, 0x2f, 0x94, 0x9d, 0xc8, 0xe1, 0x0c, 0x8e, 0xba, 0x67, 0x63, 0xd9, 0x34,
- 0xfa, 0x0c, 0x40, 0x18, 0x05, 0xae, 0xb7, 0x4d, 0xcb, 0x44, 0xcc, 0xd4, 0x0f, 0x76, 0xa1, 0x56,
- 0x53, 0xc8, 0x9c, 0x66, 0x7c, 0xe6, 0x28, 0x00, 0xd6, 0x28, 0xa2, 0x05, 0xe3, 0xa6, 0x9f, 0x4b,
- 0xcc, 0x1d, 0x70, 0xaa, 0xf1, 0x9c, 0xcd, 0x7d, 0x04, 0x4a, 0x8a, 0x78, 0x2f, 0xf9, 0xd3, 0x98,
- 0xce, 0x16, 0x7d, 0x0c, 0x26, 0x13, 0x7d, 0x3b, 0x92, 0xf8, 0xea, 0x17, 0x2c, 0x98, 0xe4, 0x9d,
- 0x59, 0xf1, 0xf6, 0xc4, 0x6d, 0xf0, 0x36, 0x9c, 0x6a, 0x66, 0x9c, 0xca, 0x62, 0xfa, 0xfb, 0x3f,
- 0xc5, 0x95, 0xb8, 0x2a, 0x0b, 0x8a, 0x33, 0xdb, 0x40, 0x97, 0xe9, 0x8e, 0xa3, 0xa7, 0xae, 0xd3,
- 0x14, 0xfe, 0xb8, 0x63, 0x7c, 0xb7, 0xf1, 0x32, 0xac, 0xa0, 0xf6, 0xef, 0x58, 0x30, 0xcd, 0x7b,
- 0x7e, 0x83, 0xec, 0xab, 0xb3, 0xe9, 0xeb, 0xd9, 0x77, 0x91, 0x3d, 0xac, 0x90, 0x93, 0x3d, 0x4c,
- 0xff, 0xb4, 0x62, 0xd7, 0x4f, 0xfb, 0x69, 0x0b, 0xc4, 0x0a, 0x39, 0x01, 0x21, 0xc4, 0xb7, 0x9a,
- 0x42, 0x88, 0xb9, 0xfc, 0x4d, 0x90, 0x23, 0x7d, 0xf8, 0x73, 0x0b, 0xa6, 0x38, 0x42, 0xac, 0x2d,
- 0xff, 0xba, 0xce, 0x43, 0x3f, 0x39, 0x86, 0x6f, 0x90, 0xfd, 0x0d, 0xbf, 0xea, 0x44, 0x3b, 0xd9,
- 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xd7, 0xc9, 0x6a, 0xc8, 0x0d, 0x74, 0x84, 0xc4, 0xe5, 0x47, 0x4e,
- 0xae, 0x61, 0x7f, 0xcd, 0x02, 0xc4, 0x9b, 0x31, 0x18, 0x37, 0xca, 0x0e, 0xb1, 0x52, 0xed, 0xa2,
- 0x8b, 0x8f, 0x26, 0x05, 0xc1, 0x1a, 0xd6, 0xb1, 0x0c, 0x4f, 0xc2, 0xe4, 0xa1, 0xd8, 0xdb, 0xe4,
- 0xe1, 0x08, 0x23, 0xfa, 0xaf, 0x87, 0x20, 0xe9, 0xf5, 0x83, 0xee, 0xc0, 0x58, 0xdd, 0x69, 0x3b,
- 0x9b, 0x6e, 0xd3, 0x8d, 0x5c, 0x12, 0x76, 0x33, 0xca, 0x5a, 0xd6, 0xf0, 0x84, 0x92, 0x5a, 0x2b,
- 0xc1, 0x06, 0x1d, 0xb4, 0x00, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x49, 0xb6, 0x99, 0xac, 0x84, 0x45,
- 0x00, 0xe0, 0x96, 0x46, 0xb2, 0x14, 0x6b, 0x18, 0x19, 0x2e, 0xd6, 0xc5, 0x47, 0xec, 0x62, 0x0d,
- 0x27, 0xe6, 0x62, 0x3d, 0x70, 0x24, 0x17, 0xeb, 0x91, 0x23, 0xbb, 0x58, 0x0f, 0xf6, 0xe5, 0x62,
- 0x8d, 0xe1, 0x8c, 0xe4, 0x3d, 0xe9, 0xff, 0x55, 0xb7, 0x49, 0xc4, 0x83, 0x83, 0x87, 0x2d, 0x98,
- 0x7b, 0x70, 0x30, 0x7f, 0x06, 0x67, 0x62, 0xe0, 0x9c, 0x9a, 0xe8, 0x13, 0x30, 0xeb, 0x34, 0x9b,
- 0xfe, 0x3d, 0x35, 0xa9, 0x2b, 0x61, 0xdd, 0x69, 0x72, 0x25, 0xc4, 0x30, 0xa3, 0xfa, 0xc4, 0x83,
- 0x83, 0xf9, 0xd9, 0xc5, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0x7d, 0x14, 0x4a, 0xed, 0xc0, 0xaf, 0xaf,
- 0x69, 0xae, 0x89, 0xe7, 0xe9, 0x00, 0x56, 0x65, 0xe1, 0xe1, 0xc1, 0xfc, 0xb8, 0xfa, 0xc3, 0x2e,
- 0xfc, 0xb8, 0x42, 0x86, 0xcf, 0xf4, 0xe8, 0xb1, 0xfa, 0x4c, 0xef, 0xc2, 0x4c, 0x8d, 0x04, 0x2e,
- 0x4b, 0x73, 0xde, 0x88, 0xcf, 0xa7, 0x0d, 0x28, 0x05, 0x89, 0x13, 0xb9, 0xaf, 0xc0, 0x8e, 0x5a,
- 0x96, 0x03, 0x79, 0x02, 0xc7, 0x84, 0xec, 0xff, 0x65, 0xc1, 0xb0, 0xf0, 0xf2, 0x39, 0x01, 0xae,
- 0x71, 0xd1, 0xd0, 0x24, 0xcc, 0x67, 0x0f, 0x18, 0xeb, 0x4c, 0xae, 0x0e, 0xa1, 0x92, 0xd0, 0x21,
- 0x3c, 0xd9, 0x8d, 0x48, 0x77, 0xed, 0xc1, 0x5f, 0x2b, 0x52, 0xee, 0xdd, 0xf0, 0x37, 0x7d, 0xf4,
- 0x43, 0xb0, 0x0e, 0xc3, 0xa1, 0xf0, 0x77, 0x2c, 0xe4, 0x1b, 0xe8, 0x27, 0x27, 0x31, 0xb6, 0x63,
- 0x13, 0x1e, 0x8e, 0x92, 0x48, 0xa6, 0x23, 0x65, 0xf1, 0x11, 0x3a, 0x52, 0xf6, 0xf2, 0xc8, 0x1d,
- 0x38, 0x0e, 0x8f, 0x5c, 0xfb, 0x2b, 0xec, 0xe6, 0xd4, 0xcb, 0x4f, 0x80, 0xa9, 0xba, 0x66, 0xde,
- 0xb1, 0x76, 0x97, 0x95, 0x25, 0x3a, 0x95, 0xc3, 0x5c, 0xfd, 0x9c, 0x05, 0xe7, 0x32, 0xbe, 0x4a,
- 0xe3, 0xb4, 0x9e, 0x85, 0x11, 0xa7, 0xd3, 0x70, 0xd5, 0x5e, 0xd6, 0xf4, 0x89, 0x8b, 0xa2, 0x1c,
- 0x2b, 0x0c, 0xb4, 0x0c, 0xd3, 0xe4, 0x7e, 0xdb, 0xe5, 0xaa, 0x54, 0xdd, 0xaa, 0xb5, 0xc8, 0x5d,
- 0xc3, 0x56, 0x92, 0x40, 0x9c, 0xc6, 0x57, 0x51, 0x50, 0x8a, 0xb9, 0x51, 0x50, 0xfe, 0x9e, 0x05,
- 0xa3, 0xca, 0xe3, 0xef, 0x91, 0x8f, 0xf6, 0xc7, 0xcd, 0xd1, 0x7e, 0xbc, 0xcb, 0x68, 0xe7, 0x0c,
- 0xf3, 0x6f, 0x15, 0x54, 0x7f, 0xab, 0x7e, 0x10, 0xf5, 0xc1, 0xc1, 0x3d, 0xbc, 0x1d, 0xfe, 0x55,
- 0x18, 0x75, 0xda, 0x6d, 0x09, 0x90, 0x36, 0x68, 0x2c, 0x4c, 0x6f, 0x5c, 0x8c, 0x75, 0x1c, 0xe5,
- 0x16, 0x50, 0xcc, 0x75, 0x0b, 0x68, 0x00, 0x44, 0x4e, 0xb0, 0x4d, 0x22, 0x5a, 0x26, 0x22, 0x96,
- 0xe5, 0x9f, 0x37, 0x9d, 0xc8, 0x6d, 0x2e, 0xb8, 0x5e, 0x14, 0x46, 0xc1, 0x42, 0xc5, 0x8b, 0x6e,
- 0x05, 0xfc, 0x09, 0xa9, 0x85, 0x04, 0x52, 0xb4, 0xb0, 0x46, 0x57, 0x7a, 0xb7, 0xb3, 0x36, 0x06,
- 0x4d, 0x63, 0x86, 0x75, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x11, 0x76, 0xfb, 0xb0, 0x31, 0x3d, 0x5a,
- 0x0c, 0x9d, 0xff, 0x3a, 0xa6, 0x66, 0x83, 0x69, 0x32, 0xcb, 0x7a, 0xa4, 0x9e, 0xee, 0x87, 0x3d,
- 0x6d, 0x58, 0x77, 0x52, 0x8b, 0xc3, 0xf9, 0xa0, 0x6f, 0x4b, 0x19, 0xa8, 0x3c, 0xd7, 0xe3, 0xd6,
- 0x38, 0x82, 0x49, 0x0a, 0xcb, 0xd9, 0xc1, 0x32, 0x1a, 0x54, 0xaa, 0x62, 0x5f, 0x68, 0x39, 0x3b,
- 0x04, 0x00, 0xc7, 0x38, 0x94, 0x99, 0x52, 0x7f, 0xc2, 0x59, 0x14, 0xc7, 0xae, 0x54, 0xd8, 0x21,
- 0xd6, 0x30, 0xd0, 0x15, 0x21, 0x50, 0xe0, 0x7a, 0x81, 0xc7, 0x13, 0x02, 0x05, 0x39, 0x5c, 0x9a,
- 0x14, 0xe8, 0x2a, 0x8c, 0xaa, 0xb4, 0xbd, 0x55, 0x9e, 0x0d, 0x56, 0x2c, 0xb3, 0x95, 0xb8, 0x18,
- 0xeb, 0x38, 0x68, 0x03, 0x26, 0x43, 0x2e, 0x67, 0x53, 0x01, 0x85, 0xb9, 0xbc, 0xf2, 0x83, 0xd2,
- 0x0a, 0xa8, 0x66, 0x82, 0x0f, 0x59, 0x11, 0x3f, 0x9d, 0xa4, 0x07, 0x7a, 0x92, 0x04, 0x7a, 0x0d,
- 0x26, 0x9a, 0xbe, 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x67, 0xe3, 0x33, 0x62, 0x66, 0x7f, 0xbc,
- 0x69, 0x40, 0x71, 0x02, 0x9b, 0x32, 0x6f, 0x7a, 0x89, 0x08, 0x82, 0xed, 0x78, 0xdb, 0x24, 0x14,
- 0x49, 0x58, 0x19, 0xf3, 0x76, 0x33, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x2f, 0xc3, 0x98, 0xfc, 0x7c,
- 0x2d, 0x60, 0x43, 0xec, 0x61, 0xa1, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x69, 0xf9, 0x7f, 0x23,
- 0x70, 0xb6, 0xb6, 0xdc, 0xba, 0xf0, 0x62, 0xe6, 0xae, 0x98, 0x8b, 0xd2, 0x5f, 0x70, 0x25, 0x0b,
- 0xe9, 0xf0, 0x60, 0xfe, 0x82, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0x66,
- 0x76, 0x88, 0xd3, 0x8c, 0x76, 0x96, 0x77, 0x48, 0x7d, 0x57, 0x6e, 0x3a, 0x16, 0x06, 0x42, 0xf3,
- 0x4b, 0xb8, 0x9e, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0x9b, 0x30, 0xdb, 0xee, 0x6c, 0x36, 0xdd, 0x70,
- 0x67, 0xdd, 0x8f, 0x98, 0x29, 0x90, 0xca, 0x02, 0x2c, 0xe2, 0x45, 0xa8, 0x40, 0x1b, 0xd5, 0x1c,
- 0x3c, 0x9c, 0x4b, 0x01, 0xbd, 0x0d, 0xa7, 0x13, 0x8b, 0x41, 0x78, 0xcc, 0x4f, 0xe4, 0xa7, 0x14,
- 0xa8, 0x65, 0x55, 0x10, 0xc1, 0x27, 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xfa, 0xf8, 0xd0, 0x62, 0xb8,
- 0x86, 0xb3, 0x53, 0xb1, 0xcd, 0xb2, 0x16, 0xe8, 0x35, 0xc4, 0x06, 0x16, 0x7a, 0x05, 0xc0, 0x6d,
- 0xaf, 0x3a, 0x2d, 0xb7, 0x49, 0x1f, 0x99, 0x33, 0xac, 0x0e, 0x7d, 0x70, 0x40, 0xa5, 0x2a, 0x4b,
- 0xe9, 0xa9, 0x2e, 0xfe, 0xed, 0x63, 0x0d, 0x1b, 0x55, 0x61, 0x42, 0xfc, 0xdb, 0x17, 0x8b, 0x61,
- 0x5a, 0xb9, 0xb4, 0x4f, 0xc8, 0x1a, 0x6a, 0x05, 0x20, 0xb3, 0x84, 0xcd, 0x79, 0xa2, 0x3e, 0xda,
- 0x86, 0x73, 0x22, 0xcd, 0x34, 0xd1, 0x57, 0xb7, 0x9c, 0xbd, 0x90, 0x65, 0x00, 0x18, 0xe1, 0xce,
- 0x12, 0x8b, 0xdd, 0x10, 0x71, 0x77, 0x3a, 0x94, 0x2b, 0xd0, 0x37, 0x09, 0x77, 0x22, 0x3d, 0xcd,
- 0x8d, 0x9a, 0x28, 0x57, 0x70, 0x33, 0x09, 0xc4, 0x69, 0x7c, 0x14, 0xc2, 0x69, 0xd7, 0xcb, 0xda,
- 0x13, 0x67, 0x18, 0xa1, 0x8f, 0x71, 0xff, 0xd9, 0xee, 0xfb, 0x21, 0x13, 0xce, 0xf7, 0x43, 0x26,
- 0xed, 0x77, 0x66, 0xbb, 0xf7, 0xdb, 0x16, 0xad, 0xad, 0xf1, 0xf7, 0xe8, 0xb3, 0x30, 0xa6, 0x7f,
- 0x98, 0xe0, 0x55, 0x2e, 0x65, 0xb3, 0xbf, 0xda, 0xa9, 0xc2, 0x5f, 0x07, 0xea, 0xe4, 0xd0, 0x61,
- 0xd8, 0xa0, 0x88, 0xea, 0x19, 0x9e, 0xe6, 0x57, 0xfa, 0xe3, 0x85, 0xfa, 0x37, 0x5d, 0x23, 0x90,
- 0xbd, 0x59, 0xd0, 0x4d, 0x18, 0xa9, 0x37, 0x5d, 0xe2, 0x45, 0x95, 0x6a, 0xb7, 0xd8, 0x70, 0xcb,
- 0x02, 0x47, 0xec, 0x3e, 0x11, 0xd0, 0x9f, 0x97, 0x61, 0x45, 0xc1, 0xfe, 0xd5, 0x02, 0xcc, 0xf7,
- 0xc8, 0x0e, 0x91, 0x50, 0x64, 0x59, 0x7d, 0x29, 0xb2, 0x16, 0x65, 0x82, 0xec, 0xf5, 0x84, 0x8c,
- 0x2c, 0x91, 0xfc, 0x3a, 0x96, 0x94, 0x25, 0xf1, 0xfb, 0x76, 0x2c, 0xd0, 0x75, 0x61, 0x03, 0x3d,
- 0x5d, 0x63, 0x0c, 0x1d, 0xf8, 0x60, 0xff, 0x0f, 0xe7, 0x5c, 0x7d, 0xa6, 0xfd, 0x95, 0x02, 0x9c,
- 0x56, 0x43, 0xf8, 0xcd, 0x3b, 0x70, 0xb7, 0xd3, 0x03, 0x77, 0x0c, 0xda, 0x60, 0xfb, 0x16, 0x0c,
- 0xf1, 0x60, 0x77, 0x7d, 0x30, 0xec, 0x17, 0xcd, 0xb8, 0xb0, 0x8a, 0x47, 0x34, 0x62, 0xc3, 0x7e,
- 0xaf, 0x05, 0x93, 0x1b, 0xcb, 0xd5, 0x9a, 0x5f, 0xdf, 0x25, 0xd1, 0x22, 0x7f, 0x60, 0x61, 0xcd,
- 0x27, 0xf7, 0x61, 0x98, 0xea, 0x2c, 0x76, 0xfd, 0x02, 0x0c, 0xec, 0xf8, 0x61, 0x94, 0x34, 0x15,
- 0xb9, 0xee, 0x87, 0x11, 0x66, 0x10, 0xfb, 0x77, 0x2d, 0x18, 0xdc, 0x70, 0x5c, 0x2f, 0x92, 0x6a,
- 0x05, 0x2b, 0x47, 0xad, 0xd0, 0xcf, 0x77, 0xa1, 0x97, 0x60, 0x88, 0x6c, 0x6d, 0x91, 0x7a, 0x24,
- 0x66, 0x55, 0x06, 0x34, 0x18, 0x5a, 0x61, 0xa5, 0x94, 0x83, 0x64, 0x8d, 0xf1, 0xbf, 0x58, 0x20,
- 0xa3, 0xbb, 0x50, 0x8a, 0xdc, 0x16, 0x59, 0x6c, 0x34, 0x84, 0xb2, 0xfd, 0x21, 0x82, 0x32, 0x6c,
- 0x48, 0x02, 0x38, 0xa6, 0x65, 0x7f, 0xa9, 0x00, 0x10, 0x47, 0x09, 0xea, 0xf5, 0x89, 0x4b, 0x29,
- 0x35, 0xec, 0xa5, 0x0c, 0x35, 0x2c, 0x8a, 0x09, 0x66, 0xe8, 0x60, 0xd5, 0x30, 0x15, 0xfb, 0x1a,
- 0xa6, 0x81, 0xa3, 0x0c, 0xd3, 0x32, 0x4c, 0xc7, 0x51, 0x8e, 0xcc, 0x20, 0x6f, 0xec, 0xfa, 0xdc,
- 0x48, 0x02, 0x71, 0x1a, 0xdf, 0x26, 0x70, 0x41, 0x05, 0x7b, 0x11, 0x37, 0x1a, 0xb3, 0xe5, 0xd6,
- 0xd5, 0xda, 0x3d, 0xc6, 0x29, 0xd6, 0x33, 0x17, 0x72, 0xf5, 0xcc, 0x3f, 0x61, 0xc1, 0xa9, 0x64,
- 0x3b, 0xcc, 0x8b, 0xf7, 0x8b, 0x16, 0x9c, 0x66, 0xda, 0x76, 0xd6, 0x6a, 0x5a, 0xb7, 0xff, 0x62,
- 0xd7, 0x00, 0x36, 0x39, 0x3d, 0x8e, 0x23, 0x67, 0xac, 0x65, 0x91, 0xc6, 0xd9, 0x2d, 0xda, 0xff,
- 0xa1, 0x00, 0xb3, 0x79, 0x91, 0x6f, 0x98, 0xab, 0x87, 0x73, 0xbf, 0xb6, 0x4b, 0xee, 0x09, 0x83,
- 0xfa, 0xd8, 0xd5, 0x83, 0x17, 0x63, 0x09, 0x4f, 0x06, 0xfc, 0x2f, 0xf4, 0x19, 0xf0, 0x7f, 0x07,
- 0xa6, 0xef, 0xed, 0x10, 0xef, 0xb6, 0x17, 0x3a, 0x91, 0x1b, 0x6e, 0xb9, 0x4c, 0x33, 0xcd, 0xd7,
- 0xcd, 0x2b, 0xd2, 0xec, 0xfd, 0x6e, 0x12, 0xe1, 0xf0, 0x60, 0xfe, 0x9c, 0x51, 0x10, 0x77, 0x99,
- 0x1f, 0x24, 0x38, 0x4d, 0x34, 0x9d, 0x2f, 0x61, 0xe0, 0x11, 0xe6, 0x4b, 0xb0, 0xbf, 0x68, 0xc1,
- 0xd9, 0xdc, 0x24, 0xad, 0xe8, 0x32, 0x8c, 0x38, 0x6d, 0x97, 0x0b, 0xf7, 0xc5, 0x31, 0xca, 0x84,
- 0x48, 0xd5, 0x0a, 0x17, 0xed, 0x2b, 0xa8, 0x4a, 0x1e, 0x5f, 0xc8, 0x4d, 0x1e, 0xdf, 0x33, 0x17,
- 0xbc, 0xfd, 0x3d, 0x16, 0x08, 0x37, 0xd5, 0x3e, 0xce, 0xee, 0x4f, 0xc1, 0xd8, 0x5e, 0x3a, 0xa7,
- 0xd2, 0x85, 0x7c, 0xbf, 0x5d, 0x91, 0x49, 0x49, 0x31, 0x64, 0x46, 0xfe, 0x24, 0x83, 0x96, 0xdd,
- 0x00, 0x01, 0x2d, 0x13, 0x26, 0xba, 0xee, 0xdd, 0x9b, 0xe7, 0x01, 0x1a, 0x0c, 0x57, 0xcb, 0xc0,
- 0xaf, 0x6e, 0xe6, 0xb2, 0x82, 0x60, 0x0d, 0xcb, 0xfe, 0xb7, 0x05, 0x18, 0x95, 0x39, 0x7c, 0x3a,
- 0x5e, 0x3f, 0x02, 0xa6, 0x23, 0x25, 0xf5, 0x44, 0x57, 0xa0, 0xc4, 0x24, 0xa0, 0xd5, 0x58, 0x2e,
- 0xa7, 0xe4, 0x0f, 0x6b, 0x12, 0x80, 0x63, 0x1c, 0xba, 0x8b, 0xc2, 0xce, 0x26, 0x43, 0x4f, 0x38,
- 0x55, 0xd6, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x09, 0x98, 0xe2, 0xf5, 0x02, 0xbf, 0xed, 0x6c, 0x73,
- 0xad, 0xc9, 0xa0, 0x0a, 0xbb, 0x30, 0xb5, 0x96, 0x80, 0x1d, 0x1e, 0xcc, 0x9f, 0x4a, 0x96, 0x31,
- 0x75, 0x60, 0x8a, 0x0a, 0x33, 0x8e, 0xe2, 0x8d, 0xd0, 0xdd, 0x9f, 0xb2, 0xa9, 0x8a, 0x41, 0x58,
- 0xc7, 0xb3, 0x3f, 0x0b, 0x28, 0x9d, 0xcd, 0x08, 0xbd, 0xce, 0x2d, 0x62, 0xdd, 0x80, 0x34, 0xba,
- 0xa9, 0x07, 0xf5, 0xe0, 0x02, 0xd2, 0x1f, 0x8a, 0xd7, 0xc2, 0xaa, 0xbe, 0xfd, 0xff, 0x17, 0x61,
- 0x2a, 0xe9, 0x01, 0x8e, 0xae, 0xc3, 0x10, 0x67, 0x3d, 0x04, 0xf9, 0x2e, 0xd6, 0x27, 0x9a, 0xdf,
- 0x38, 0x3b, 0x84, 0x05, 0xf7, 0x22, 0xea, 0xa3, 0x37, 0x61, 0xb4, 0xe1, 0xdf, 0xf3, 0xee, 0x39,
- 0x41, 0x63, 0xb1, 0x5a, 0x11, 0xcb, 0x39, 0xf3, 0x39, 0x5c, 0x8e, 0xd1, 0x74, 0x5f, 0x74, 0xa6,
- 0x69, 0x8d, 0x41, 0x58, 0x27, 0x87, 0x36, 0x58, 0x08, 0xf4, 0x2d, 0x77, 0x7b, 0xcd, 0x69, 0x77,
- 0x73, 0x8f, 0x58, 0x96, 0x48, 0x1a, 0xe5, 0x71, 0x11, 0x27, 0x9d, 0x03, 0x70, 0x4c, 0x08, 0x7d,
- 0x1e, 0x66, 0xc2, 0x1c, 0x21, 0x7d, 0x5e, 0x72, 0xbb, 0x6e, 0x72, 0xeb, 0xa5, 0xc7, 0x1e, 0x1c,
- 0xcc, 0xcf, 0x64, 0x89, 0xf3, 0xb3, 0x9a, 0xb1, 0x7f, 0xe4, 0x14, 0x18, 0x9b, 0xd8, 0xc8, 0x75,
- 0x6a, 0x1d, 0x53, 0xae, 0x53, 0x0c, 0x23, 0xa4, 0xd5, 0x8e, 0xf6, 0xcb, 0x6e, 0xd0, 0x2d, 0x03,
- 0xf8, 0x8a, 0xc0, 0x49, 0xd3, 0x94, 0x10, 0xac, 0xe8, 0x64, 0x27, 0xa4, 0x2d, 0x7e, 0x1d, 0x13,
- 0xd2, 0x0e, 0x9c, 0x60, 0x42, 0xda, 0x75, 0x18, 0xde, 0x76, 0x23, 0x4c, 0xda, 0xbe, 0x60, 0xfa,
- 0x33, 0xd7, 0xe1, 0x35, 0x8e, 0x92, 0x4e, 0x7d, 0x28, 0x00, 0x58, 0x12, 0x41, 0xaf, 0xab, 0x1d,
- 0x38, 0x94, 0xff, 0x30, 0x4f, 0x9b, 0x49, 0x64, 0xee, 0x41, 0x91, 0x76, 0x76, 0xf8, 0x61, 0xd3,
- 0xce, 0xae, 0xca, 0x64, 0xb1, 0x23, 0xf9, 0xbe, 0x4c, 0x2c, 0x17, 0x6c, 0x8f, 0x14, 0xb1, 0x77,
- 0xf4, 0x04, 0xbb, 0xa5, 0xfc, 0x93, 0x40, 0xe5, 0xce, 0xed, 0x33, 0xad, 0xee, 0xf7, 0x58, 0x70,
- 0xba, 0x9d, 0x95, 0x6b, 0x5a, 0x58, 0x14, 0xbc, 0xd4, 0x77, 0x3a, 0x6b, 0xa3, 0x41, 0x26, 0x89,
- 0xcb, 0x44, 0xc3, 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x1b, 0x42, 0xb3, 0x7d, 0x31, 0x27, 0x3f,
- 0x6f, 0x97, 0xac, 0xbc, 0x1b, 0x19, 0xb9, 0x60, 0xdf, 0x9f, 0x97, 0x0b, 0xb6, 0xef, 0x0c, 0xb0,
- 0xaf, 0xab, 0xcc, 0xbc, 0xe3, 0xf9, 0x4b, 0x89, 0xe7, 0xdd, 0xed, 0x99, 0x8f, 0xf7, 0x75, 0x95,
- 0x8f, 0xb7, 0x4b, 0x7c, 0x5b, 0x9e, 0x6d, 0xb7, 0x67, 0x16, 0x5e, 0x2d, 0x93, 0xee, 0xe4, 0xf1,
- 0x64, 0xd2, 0x35, 0xae, 0x1a, 0x9e, 0xcc, 0xf5, 0x99, 0x1e, 0x57, 0x8d, 0x41, 0xb7, 0xfb, 0x65,
- 0xc3, 0xb3, 0x06, 0x4f, 0x3f, 0x54, 0xd6, 0xe0, 0x3b, 0x7a, 0x16, 0x5e, 0xd4, 0x23, 0xcd, 0x2c,
- 0x45, 0xea, 0x33, 0xf7, 0xee, 0x1d, 0xfd, 0x02, 0x9c, 0xc9, 0xa7, 0xab, 0xee, 0xb9, 0x34, 0xdd,
- 0xcc, 0x2b, 0x30, 0x95, 0xd3, 0xf7, 0xd4, 0xc9, 0xe4, 0xf4, 0x3d, 0x7d, 0xec, 0x39, 0x7d, 0xcf,
- 0x9c, 0x40, 0x4e, 0xdf, 0xc7, 0x4e, 0x30, 0xa7, 0xef, 0x1d, 0x66, 0x86, 0xc3, 0x83, 0xfd, 0x88,
- 0x78, 0xbc, 0xd9, 0xb1, 0x5f, 0xb3, 0x22, 0x02, 0xf1, 0x8f, 0x53, 0x20, 0x1c, 0x93, 0xca, 0xc8,
- 0x15, 0x3c, 0xfb, 0x08, 0x72, 0x05, 0xaf, 0xc7, 0xb9, 0x82, 0xcf, 0xe6, 0x4f, 0x75, 0x86, 0xe3,
- 0x46, 0x4e, 0x86, 0xe0, 0x3b, 0x7a, 0x66, 0xdf, 0xc7, 0xbb, 0xe8, 0x5a, 0xb2, 0x04, 0x8f, 0x5d,
- 0xf2, 0xf9, 0xbe, 0xc6, 0xf3, 0xf9, 0x3e, 0x91, 0x7f, 0x92, 0x27, 0xaf, 0x3b, 0x23, 0x8b, 0x2f,
- 0xed, 0x97, 0x8a, 0xfc, 0xc8, 0x22, 0x0f, 0xe7, 0xf4, 0x4b, 0x85, 0x8e, 0x4c, 0xf7, 0x4b, 0x81,
- 0x70, 0x4c, 0xca, 0xfe, 0xbe, 0x02, 0x9c, 0xef, 0xbe, 0xdf, 0x62, 0x69, 0x6a, 0x35, 0x56, 0x3d,
- 0x27, 0xa4, 0xa9, 0xfc, 0xcd, 0x16, 0x63, 0xf5, 0x1d, 0xc8, 0xee, 0x1a, 0x4c, 0x2b, 0x8f, 0x8f,
- 0xa6, 0x5b, 0xdf, 0x5f, 0x8f, 0x5f, 0xbe, 0xca, 0x4b, 0xbe, 0x96, 0x44, 0xc0, 0xe9, 0x3a, 0x68,
- 0x11, 0x26, 0x8d, 0xc2, 0x4a, 0x59, 0xbc, 0xcd, 0x94, 0xf8, 0xb6, 0x66, 0x82, 0x71, 0x12, 0xdf,
- 0xfe, 0xb2, 0x05, 0x8f, 0xe5, 0x24, 0xc3, 0xeb, 0x3b, 0x4e, 0xdb, 0x16, 0x4c, 0xb6, 0xcd, 0xaa,
- 0x3d, 0x42, 0x4b, 0x1a, 0x29, 0xf7, 0x54, 0x5f, 0x13, 0x00, 0x9c, 0x24, 0x6a, 0xff, 0x99, 0x05,
- 0xe7, 0xba, 0x9a, 0x30, 0x22, 0x0c, 0x67, 0xb6, 0x5b, 0xa1, 0xb3, 0x1c, 0x90, 0x06, 0xf1, 0x22,
- 0xd7, 0x69, 0xd6, 0xda, 0xa4, 0xae, 0xc9, 0xc3, 0x99, 0x2d, 0xe0, 0xb5, 0xb5, 0xda, 0x62, 0x1a,
- 0x03, 0xe7, 0xd4, 0x44, 0xab, 0x80, 0xd2, 0x10, 0x31, 0xc3, 0x2c, 0x86, 0x75, 0x9a, 0x1e, 0xce,
- 0xa8, 0x81, 0x3e, 0x02, 0xe3, 0xca, 0x34, 0x52, 0x9b, 0x71, 0x76, 0xb0, 0x63, 0x1d, 0x80, 0x4d,
- 0xbc, 0xa5, 0xcb, 0xbf, 0xfe, 0xfb, 0xe7, 0xdf, 0xf7, 0x9b, 0xbf, 0x7f, 0xfe, 0x7d, 0xbf, 0xf3,
- 0xfb, 0xe7, 0xdf, 0xf7, 0x1d, 0x0f, 0xce, 0x5b, 0xbf, 0xfe, 0xe0, 0xbc, 0xf5, 0x9b, 0x0f, 0xce,
- 0x5b, 0xbf, 0xf3, 0xe0, 0xbc, 0xf5, 0x7b, 0x0f, 0xce, 0x5b, 0x5f, 0xfa, 0x83, 0xf3, 0xef, 0xfb,
- 0x54, 0x61, 0xef, 0xea, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xd2, 0x6a, 0x19, 0xeb, 0xbc, 0x04,
- 0x01, 0x00,
+ // 14068 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x69, 0x70, 0x5c, 0xd9,
+ 0x79, 0x18, 0xaa, 0xdb, 0x8d, 0xad, 0x3f, 0xec, 0x07, 0x24, 0x07, 0xc4, 0x0c, 0x09, 0xce, 0xa5,
+ 0xc4, 0xe1, 0x68, 0x66, 0x40, 0x71, 0x16, 0x69, 0x3c, 0x23, 0x8d, 0x05, 0xa0, 0x01, 0xb2, 0x87,
+ 0x04, 0xd8, 0x73, 0x1a, 0x24, 0x25, 0x79, 0xa4, 0xd2, 0x45, 0xf7, 0x01, 0x70, 0x85, 0xee, 0x7b,
+ 0x7b, 0xee, 0xbd, 0x0d, 0x12, 0xf3, 0xe4, 0x7a, 0x7e, 0xf2, 0x2a, 0x2f, 0xaf, 0x54, 0xaf, 0xfc,
+ 0xb2, 0xd8, 0x2e, 0x57, 0xca, 0x71, 0xca, 0x56, 0x94, 0xcd, 0xb1, 0x63, 0x3b, 0x96, 0x13, 0x3b,
+ 0xbb, 0x93, 0x1f, 0x8e, 0xe3, 0x4a, 0x2c, 0x57, 0xb9, 0x82, 0xd8, 0x74, 0xaa, 0x5c, 0xfa, 0x11,
+ 0xdb, 0x89, 0x93, 0x1f, 0x41, 0x5c, 0x71, 0xea, 0xac, 0xf7, 0x9c, 0xbb, 0x74, 0x37, 0x38, 0x20,
+ 0x34, 0x52, 0xcd, 0xbf, 0xee, 0xf3, 0x7d, 0xe7, 0x3b, 0xe7, 0x9e, 0xf5, 0x3b, 0xdf, 0x0a, 0xaf,
+ 0xee, 0xbe, 0x1c, 0x2e, 0xb8, 0xfe, 0x95, 0xdd, 0xce, 0x26, 0x09, 0x3c, 0x12, 0x91, 0xf0, 0xca,
+ 0x1e, 0xf1, 0x1a, 0x7e, 0x70, 0x45, 0x00, 0x9c, 0xb6, 0x7b, 0xa5, 0xee, 0x07, 0xe4, 0xca, 0xde,
+ 0xd5, 0x2b, 0xdb, 0xc4, 0x23, 0x81, 0x13, 0x91, 0xc6, 0x42, 0x3b, 0xf0, 0x23, 0x1f, 0x21, 0x8e,
+ 0xb3, 0xe0, 0xb4, 0xdd, 0x05, 0x8a, 0xb3, 0xb0, 0x77, 0x75, 0xee, 0xb9, 0x6d, 0x37, 0xda, 0xe9,
+ 0x6c, 0x2e, 0xd4, 0xfd, 0xd6, 0x95, 0x6d, 0x7f, 0xdb, 0xbf, 0xc2, 0x50, 0x37, 0x3b, 0x5b, 0xec,
+ 0x1f, 0xfb, 0xc3, 0x7e, 0x71, 0x12, 0x73, 0x2f, 0xc6, 0xcd, 0xb4, 0x9c, 0xfa, 0x8e, 0xeb, 0x91,
+ 0x60, 0xff, 0x4a, 0x7b, 0x77, 0x9b, 0xb5, 0x1b, 0x90, 0xd0, 0xef, 0x04, 0x75, 0x92, 0x6c, 0xb8,
+ 0x6b, 0xad, 0xf0, 0x4a, 0x8b, 0x44, 0x4e, 0x46, 0x77, 0xe7, 0xae, 0xe4, 0xd5, 0x0a, 0x3a, 0x5e,
+ 0xe4, 0xb6, 0xd2, 0xcd, 0x7c, 0xb8, 0x57, 0x85, 0xb0, 0xbe, 0x43, 0x5a, 0x4e, 0xaa, 0xde, 0x0b,
+ 0x79, 0xf5, 0x3a, 0x91, 0xdb, 0xbc, 0xe2, 0x7a, 0x51, 0x18, 0x05, 0xc9, 0x4a, 0xf6, 0xd7, 0x2c,
+ 0xb8, 0xb0, 0x78, 0xb7, 0xb6, 0xd2, 0x74, 0xc2, 0xc8, 0xad, 0x2f, 0x35, 0xfd, 0xfa, 0x6e, 0x2d,
+ 0xf2, 0x03, 0x72, 0xc7, 0x6f, 0x76, 0x5a, 0xa4, 0xc6, 0x06, 0x02, 0x3d, 0x0b, 0x23, 0x7b, 0xec,
+ 0x7f, 0xa5, 0x3c, 0x6b, 0x5d, 0xb0, 0x2e, 0x97, 0x96, 0xa6, 0x7e, 0xe3, 0x60, 0xfe, 0x7d, 0x0f,
+ 0x0e, 0xe6, 0x47, 0xee, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x25, 0x18, 0xda, 0x0a, 0x37, 0xf6, 0xdb,
+ 0x64, 0xb6, 0xc0, 0x70, 0x27, 0x04, 0xee, 0xd0, 0x6a, 0x8d, 0x96, 0x62, 0x01, 0x45, 0x57, 0xa0,
+ 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0x2d, 0x5e, 0xb0, 0x2e, 0x0f, 0x2e, 0x4d, 0x0b,
+ 0xd4, 0x52, 0x55, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x71, 0xcb, 0x6b, 0xee, 0xcf,
+ 0x0e, 0x5c, 0xb0, 0x2e, 0x8f, 0xc4, 0xdd, 0xc0, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0xc7, 0x0a, 0x30,
+ 0xb2, 0xb8, 0xb5, 0xe5, 0x7a, 0x6e, 0xb4, 0x8f, 0xee, 0xc0, 0x98, 0xe7, 0x37, 0x88, 0xfc, 0xcf,
+ 0xbe, 0x62, 0xf4, 0xf9, 0x0b, 0x0b, 0xe9, 0xa5, 0xb4, 0xb0, 0xae, 0xe1, 0x2d, 0x4d, 0x3d, 0x38,
+ 0x98, 0x1f, 0xd3, 0x4b, 0xb0, 0x41, 0x07, 0x61, 0x18, 0x6d, 0xfb, 0x0d, 0x45, 0xb6, 0xc0, 0xc8,
+ 0xce, 0x67, 0x91, 0xad, 0xc6, 0x68, 0x4b, 0x93, 0x0f, 0x0e, 0xe6, 0x47, 0xb5, 0x02, 0xac, 0x13,
+ 0x41, 0x9b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x8b, 0x79, 0x74, 0x35,
+ 0xd4, 0xa5, 0x99, 0x07, 0x07, 0xf3, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0d, 0x13, 0x8b,
+ 0x51, 0xe4, 0xd4, 0x77, 0x48, 0x83, 0xcf, 0x20, 0x7a, 0x11, 0x06, 0x3c, 0xa7, 0x45, 0xc4, 0xfc,
+ 0x5e, 0x10, 0x03, 0x3b, 0xb0, 0xee, 0xb4, 0xc8, 0xe1, 0xc1, 0xfc, 0xd4, 0x6d, 0xcf, 0x7d, 0xab,
+ 0x23, 0x56, 0x05, 0x2d, 0xc3, 0x0c, 0x1b, 0x3d, 0x0f, 0xd0, 0x20, 0x7b, 0x6e, 0x9d, 0x54, 0x9d,
+ 0x68, 0x47, 0xcc, 0x37, 0x12, 0x75, 0xa1, 0xac, 0x20, 0x58, 0xc3, 0xb2, 0xef, 0x43, 0x69, 0x71,
+ 0xcf, 0x77, 0x1b, 0x55, 0xbf, 0x11, 0xa2, 0x5d, 0x98, 0x6c, 0x07, 0x64, 0x8b, 0x04, 0xaa, 0x68,
+ 0xd6, 0xba, 0x50, 0xbc, 0x3c, 0xfa, 0xfc, 0xe5, 0xcc, 0x8f, 0x35, 0x51, 0x57, 0xbc, 0x28, 0xd8,
+ 0x5f, 0x7a, 0x4c, 0xb4, 0x37, 0x99, 0x80, 0xe2, 0x24, 0x65, 0xfb, 0x9f, 0x17, 0xe0, 0xf4, 0xe2,
+ 0xdb, 0x9d, 0x80, 0x94, 0xdd, 0x70, 0x37, 0xb9, 0xc2, 0x1b, 0x6e, 0xb8, 0xbb, 0x1e, 0x8f, 0x80,
+ 0x5a, 0x5a, 0x65, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0e, 0x86, 0xe9, 0xef, 0xdb, 0xb8, 0x22, 0x3e,
+ 0x79, 0x46, 0x20, 0x8f, 0x96, 0x9d, 0xc8, 0x29, 0x73, 0x10, 0x96, 0x38, 0x68, 0x0d, 0x46, 0xeb,
+ 0x6c, 0x43, 0x6e, 0xaf, 0xf9, 0x0d, 0xc2, 0x26, 0xb3, 0xb4, 0xf4, 0x0c, 0x45, 0x5f, 0x8e, 0x8b,
+ 0x0f, 0x0f, 0xe6, 0x67, 0x79, 0xdf, 0x04, 0x09, 0x0d, 0x86, 0xf5, 0xfa, 0xc8, 0x56, 0xfb, 0x6b,
+ 0x80, 0x51, 0x82, 0x8c, 0xbd, 0x75, 0x59, 0xdb, 0x2a, 0x83, 0x6c, 0xab, 0x8c, 0x65, 0x6f, 0x13,
+ 0x74, 0x15, 0x06, 0x76, 0x5d, 0xaf, 0x31, 0x3b, 0xc4, 0x68, 0x9d, 0xa3, 0x73, 0x7e, 0xc3, 0xf5,
+ 0x1a, 0x87, 0x07, 0xf3, 0xd3, 0x46, 0x77, 0x68, 0x21, 0x66, 0xa8, 0xf6, 0x9f, 0x59, 0x30, 0xcf,
+ 0x60, 0xab, 0x6e, 0x93, 0x54, 0x49, 0x10, 0xba, 0x61, 0x44, 0xbc, 0xc8, 0x18, 0xd0, 0xe7, 0x01,
+ 0x42, 0x52, 0x0f, 0x48, 0xa4, 0x0d, 0xa9, 0x5a, 0x18, 0x35, 0x05, 0xc1, 0x1a, 0x16, 0x3d, 0x10,
+ 0xc2, 0x1d, 0x27, 0x60, 0xeb, 0x4b, 0x0c, 0xac, 0x3a, 0x10, 0x6a, 0x12, 0x80, 0x63, 0x1c, 0xe3,
+ 0x40, 0x28, 0xf6, 0x3a, 0x10, 0xd0, 0xc7, 0x60, 0x32, 0x6e, 0x2c, 0x6c, 0x3b, 0x75, 0x39, 0x80,
+ 0x6c, 0xcb, 0xd4, 0x4c, 0x10, 0x4e, 0xe2, 0xda, 0x7f, 0xd3, 0x12, 0x8b, 0x87, 0x7e, 0xf5, 0xbb,
+ 0xfc, 0x5b, 0xed, 0x5f, 0xb6, 0x60, 0x78, 0xc9, 0xf5, 0x1a, 0xae, 0xb7, 0x8d, 0x3e, 0x0b, 0x23,
+ 0xf4, 0x6e, 0x6a, 0x38, 0x91, 0x23, 0xce, 0xbd, 0x0f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x0b, 0xed,
+ 0xdd, 0x6d, 0x5a, 0x10, 0x2e, 0x50, 0x6c, 0xba, 0xdb, 0x6e, 0x6d, 0x7e, 0x8e, 0xd4, 0xa3, 0x35,
+ 0x12, 0x39, 0xf1, 0xe7, 0xc4, 0x65, 0x58, 0x51, 0x45, 0x37, 0x60, 0x28, 0x72, 0x82, 0x6d, 0x12,
+ 0x89, 0x03, 0x30, 0xf3, 0xa0, 0xe2, 0x35, 0x31, 0xdd, 0x91, 0xc4, 0xab, 0x93, 0xf8, 0x5a, 0xd8,
+ 0x60, 0x55, 0xb1, 0x20, 0x61, 0xff, 0xc8, 0x30, 0x9c, 0x5d, 0xae, 0x55, 0x72, 0xd6, 0xd5, 0x25,
+ 0x18, 0x6a, 0x04, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x65, 0x56, 0x8a, 0x05, 0x14, 0xbd,
+ 0x0c, 0x63, 0xfc, 0x42, 0xba, 0xee, 0x78, 0x8d, 0xa6, 0x1c, 0xe2, 0x53, 0x02, 0x7b, 0xec, 0x8e,
+ 0x06, 0xc3, 0x06, 0xe6, 0x11, 0x17, 0xd5, 0xa5, 0xc4, 0x66, 0xcc, 0xbb, 0xec, 0xbe, 0x68, 0xc1,
+ 0x14, 0x6f, 0x66, 0x31, 0x8a, 0x02, 0x77, 0xb3, 0x13, 0x91, 0x70, 0x76, 0x90, 0x9d, 0x74, 0xcb,
+ 0x59, 0xa3, 0x95, 0x3b, 0x02, 0x0b, 0x77, 0x12, 0x54, 0xf8, 0x21, 0x38, 0x2b, 0xda, 0x9d, 0x4a,
+ 0x82, 0x71, 0xaa, 0x59, 0xf4, 0xdd, 0x16, 0xcc, 0xd5, 0x7d, 0x2f, 0x0a, 0xfc, 0x66, 0x93, 0x04,
+ 0xd5, 0xce, 0x66, 0xd3, 0x0d, 0x77, 0xf8, 0x3a, 0xc5, 0x64, 0x8b, 0x9d, 0x04, 0x39, 0x73, 0xa8,
+ 0x90, 0xc4, 0x1c, 0x9e, 0x7f, 0x70, 0x30, 0x3f, 0xb7, 0x9c, 0x4b, 0x0a, 0x77, 0x69, 0x06, 0xed,
+ 0x02, 0xa2, 0x57, 0x69, 0x2d, 0x72, 0xb6, 0x49, 0xdc, 0xf8, 0x70, 0xff, 0x8d, 0x9f, 0x79, 0x70,
+ 0x30, 0x8f, 0xd6, 0x53, 0x24, 0x70, 0x06, 0x59, 0xf4, 0x16, 0x9c, 0xa2, 0xa5, 0xa9, 0x6f, 0x1d,
+ 0xe9, 0xbf, 0xb9, 0xd9, 0x07, 0x07, 0xf3, 0xa7, 0xd6, 0x33, 0x88, 0xe0, 0x4c, 0xd2, 0xe8, 0xbb,
+ 0x2c, 0x38, 0x1b, 0x7f, 0xfe, 0xca, 0xfd, 0xb6, 0xe3, 0x35, 0xe2, 0x86, 0x4b, 0xfd, 0x37, 0x4c,
+ 0xcf, 0xe4, 0xb3, 0xcb, 0x79, 0x94, 0x70, 0x7e, 0x23, 0x73, 0xcb, 0x70, 0x3a, 0x73, 0xb5, 0xa0,
+ 0x29, 0x28, 0xee, 0x12, 0xce, 0x05, 0x95, 0x30, 0xfd, 0x89, 0x4e, 0xc1, 0xe0, 0x9e, 0xd3, 0xec,
+ 0x88, 0x8d, 0x82, 0xf9, 0x9f, 0x57, 0x0a, 0x2f, 0x5b, 0xf6, 0xbf, 0x28, 0xc2, 0xe4, 0x72, 0xad,
+ 0xf2, 0x50, 0xbb, 0x50, 0xbf, 0x86, 0x0a, 0x5d, 0xaf, 0xa1, 0xf8, 0x52, 0x2b, 0xe6, 0x5e, 0x6a,
+ 0xff, 0x77, 0xc6, 0x16, 0x1a, 0x60, 0x5b, 0xe8, 0xdb, 0x72, 0xb6, 0xd0, 0x31, 0x6f, 0x9c, 0xbd,
+ 0x9c, 0x55, 0x34, 0xc8, 0x26, 0x33, 0x93, 0x63, 0xb9, 0xe9, 0xd7, 0x9d, 0x66, 0xf2, 0xe8, 0x3b,
+ 0xe2, 0x52, 0x3a, 0x9e, 0x79, 0xac, 0xc3, 0xd8, 0xb2, 0xd3, 0x76, 0x36, 0xdd, 0xa6, 0x1b, 0xb9,
+ 0x24, 0x44, 0x4f, 0x41, 0xd1, 0x69, 0x34, 0x18, 0xb7, 0x55, 0x5a, 0x3a, 0xfd, 0xe0, 0x60, 0xbe,
+ 0xb8, 0xd8, 0xa0, 0xd7, 0x3e, 0x28, 0xac, 0x7d, 0x4c, 0x31, 0xd0, 0x07, 0x61, 0xa0, 0x11, 0xf8,
+ 0xed, 0xd9, 0x02, 0xc3, 0xa4, 0xbb, 0x6e, 0xa0, 0x1c, 0xf8, 0xed, 0x04, 0x2a, 0xc3, 0xb1, 0x7f,
+ 0xbd, 0x00, 0x4f, 0x2c, 0x93, 0xf6, 0xce, 0x6a, 0x2d, 0xe7, 0xfc, 0xbe, 0x0c, 0x23, 0x2d, 0xdf,
+ 0x73, 0x23, 0x3f, 0x08, 0x45, 0xd3, 0x6c, 0x45, 0xac, 0x89, 0x32, 0xac, 0xa0, 0xe8, 0x02, 0x0c,
+ 0xb4, 0x63, 0xa6, 0x72, 0x4c, 0x32, 0xa4, 0x8c, 0x9d, 0x64, 0x10, 0x8a, 0xd1, 0x09, 0x49, 0x20,
+ 0x56, 0x8c, 0xc2, 0xb8, 0x1d, 0x92, 0x00, 0x33, 0x48, 0x7c, 0x33, 0xd3, 0x3b, 0x5b, 0x9c, 0xd0,
+ 0x89, 0x9b, 0x99, 0x42, 0xb0, 0x86, 0x85, 0xaa, 0x50, 0x0a, 0x13, 0x33, 0xdb, 0xd7, 0x36, 0x1d,
+ 0x67, 0x57, 0xb7, 0x9a, 0xc9, 0x98, 0x88, 0x71, 0xa3, 0x0c, 0xf5, 0xbc, 0xba, 0xbf, 0x5a, 0x00,
+ 0xc4, 0x87, 0xf0, 0x9b, 0x6c, 0xe0, 0x6e, 0xa7, 0x07, 0xae, 0xff, 0x2d, 0x71, 0x5c, 0xa3, 0xf7,
+ 0xdf, 0x2d, 0x78, 0x62, 0xd9, 0xf5, 0x1a, 0x24, 0xc8, 0x59, 0x80, 0x8f, 0xe6, 0x2d, 0x7b, 0x34,
+ 0xa6, 0xc1, 0x58, 0x62, 0x03, 0xc7, 0xb0, 0xc4, 0xec, 0x3f, 0xb1, 0x00, 0xf1, 0xcf, 0x7e, 0xd7,
+ 0x7d, 0xec, 0xed, 0xf4, 0xc7, 0x1e, 0xc3, 0xb2, 0xb0, 0x6f, 0xc2, 0xc4, 0x72, 0xd3, 0x25, 0x5e,
+ 0x54, 0xa9, 0x2e, 0xfb, 0xde, 0x96, 0xbb, 0x8d, 0x5e, 0x81, 0x89, 0xc8, 0x6d, 0x11, 0xbf, 0x13,
+ 0xd5, 0x48, 0xdd, 0xf7, 0xd8, 0x4b, 0xd2, 0xba, 0x3c, 0xb8, 0x84, 0x1e, 0x1c, 0xcc, 0x4f, 0x6c,
+ 0x18, 0x10, 0x9c, 0xc0, 0xb4, 0x7f, 0x8f, 0x8e, 0x9f, 0xdf, 0x6a, 0xfb, 0x1e, 0xf1, 0xa2, 0x65,
+ 0xdf, 0x6b, 0x70, 0x89, 0xc3, 0x2b, 0x30, 0x10, 0xd1, 0xf1, 0xe0, 0x63, 0x77, 0x49, 0x6e, 0x14,
+ 0x3a, 0x0a, 0x87, 0x07, 0xf3, 0x67, 0xd2, 0x35, 0xd8, 0x38, 0xb1, 0x3a, 0xe8, 0xdb, 0x60, 0x28,
+ 0x8c, 0x9c, 0xa8, 0x13, 0x8a, 0xd1, 0x7c, 0x52, 0x8e, 0x66, 0x8d, 0x95, 0x1e, 0x1e, 0xcc, 0x4f,
+ 0xaa, 0x6a, 0xbc, 0x08, 0x8b, 0x0a, 0xe8, 0x69, 0x18, 0x6e, 0x91, 0x30, 0x74, 0xb6, 0xe5, 0x6d,
+ 0x38, 0x29, 0xea, 0x0e, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0xe8, 0x22, 0x0c, 0x92, 0x20, 0xf0, 0x03,
+ 0xb1, 0x47, 0xc7, 0x05, 0xe2, 0xe0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x5b, 0x0b, 0x26, 0x55,
+ 0x5f, 0x79, 0x5b, 0x27, 0xf0, 0x2a, 0xf8, 0x14, 0x40, 0x5d, 0x7e, 0x60, 0xc8, 0x6e, 0x8f, 0xd1,
+ 0xe7, 0x2f, 0x65, 0x5e, 0xd4, 0xa9, 0x61, 0x8c, 0x29, 0xab, 0xa2, 0x10, 0x6b, 0xd4, 0xec, 0x7f,
+ 0x64, 0xc1, 0x4c, 0xe2, 0x8b, 0x6e, 0xba, 0x61, 0x84, 0xde, 0x4c, 0x7d, 0xd5, 0x42, 0x7f, 0x5f,
+ 0x45, 0x6b, 0xb3, 0x6f, 0x52, 0x4b, 0x59, 0x96, 0x68, 0x5f, 0x74, 0x1d, 0x06, 0xdd, 0x88, 0xb4,
+ 0xe4, 0xc7, 0x5c, 0xec, 0xfa, 0x31, 0xbc, 0x57, 0xf1, 0x8c, 0x54, 0x68, 0x4d, 0xcc, 0x09, 0xd8,
+ 0xbf, 0x5e, 0x84, 0x12, 0x5f, 0xb6, 0x6b, 0x4e, 0xfb, 0x04, 0xe6, 0xe2, 0x19, 0x28, 0xb9, 0xad,
+ 0x56, 0x27, 0x72, 0x36, 0xc5, 0x71, 0x3e, 0xc2, 0xb7, 0x56, 0x45, 0x16, 0xe2, 0x18, 0x8e, 0x2a,
+ 0x30, 0xc0, 0xba, 0xc2, 0xbf, 0xf2, 0xa9, 0xec, 0xaf, 0x14, 0x7d, 0x5f, 0x28, 0x3b, 0x91, 0xc3,
+ 0x39, 0x29, 0x75, 0x8f, 0xd0, 0x22, 0xcc, 0x48, 0x20, 0x07, 0x60, 0xd3, 0xf5, 0x9c, 0x60, 0x9f,
+ 0x96, 0xcd, 0x16, 0x19, 0xc1, 0xe7, 0xba, 0x13, 0x5c, 0x52, 0xf8, 0x9c, 0xac, 0xfa, 0xb0, 0x18,
+ 0x80, 0x35, 0xa2, 0x73, 0x1f, 0x81, 0x92, 0x42, 0x3e, 0x0a, 0x43, 0x34, 0xf7, 0x31, 0x98, 0x4c,
+ 0xb4, 0xd5, 0xab, 0xfa, 0x98, 0xce, 0x4f, 0xfd, 0x0a, 0x3b, 0x32, 0x44, 0xaf, 0x57, 0xbc, 0x3d,
+ 0x71, 0xe4, 0xbe, 0x0d, 0xa7, 0x9a, 0x19, 0x27, 0x99, 0x98, 0xd7, 0xfe, 0x4f, 0xbe, 0x27, 0xc4,
+ 0x67, 0x9f, 0xca, 0x82, 0xe2, 0xcc, 0x36, 0x28, 0x8f, 0xe0, 0xb7, 0xe9, 0x06, 0x71, 0x9a, 0x3a,
+ 0xbb, 0x7d, 0x4b, 0x94, 0x61, 0x05, 0xa5, 0xe7, 0xdd, 0x29, 0xd5, 0xf9, 0x1b, 0x64, 0xbf, 0x46,
+ 0x9a, 0xa4, 0x1e, 0xf9, 0xc1, 0x37, 0xb4, 0xfb, 0xe7, 0xf8, 0xe8, 0xf3, 0xe3, 0x72, 0x54, 0x10,
+ 0x28, 0xde, 0x20, 0xfb, 0x7c, 0x2a, 0xf4, 0xaf, 0x2b, 0x76, 0xfd, 0xba, 0x9f, 0xb3, 0x60, 0x5c,
+ 0x7d, 0xdd, 0x09, 0x9c, 0x0b, 0x4b, 0xe6, 0xb9, 0x70, 0xae, 0xeb, 0x02, 0xcf, 0x39, 0x11, 0xbe,
+ 0x5a, 0x80, 0xb3, 0x0a, 0x87, 0xbe, 0x0d, 0xf8, 0x1f, 0xb1, 0xaa, 0xae, 0x40, 0xc9, 0x53, 0x52,
+ 0x2b, 0xcb, 0x14, 0x17, 0xc5, 0x32, 0xab, 0x18, 0x87, 0xb2, 0x78, 0x5e, 0x2c, 0x5a, 0x1a, 0xd3,
+ 0xc5, 0xb9, 0x42, 0x74, 0xbb, 0x04, 0xc5, 0x8e, 0xdb, 0x10, 0x17, 0xcc, 0x87, 0xe4, 0x68, 0xdf,
+ 0xae, 0x94, 0x0f, 0x0f, 0xe6, 0x9f, 0xcc, 0x53, 0x25, 0xd0, 0x9b, 0x2d, 0x5c, 0xb8, 0x5d, 0x29,
+ 0x63, 0x5a, 0x19, 0x2d, 0xc2, 0xa4, 0xd4, 0x96, 0xdc, 0xa1, 0xec, 0x96, 0xef, 0x89, 0x7b, 0x48,
+ 0xc9, 0x64, 0xb1, 0x09, 0xc6, 0x49, 0x7c, 0x54, 0x86, 0xa9, 0xdd, 0xce, 0x26, 0x69, 0x92, 0x88,
+ 0x7f, 0xf0, 0x0d, 0xc2, 0x25, 0x96, 0xa5, 0xf8, 0x65, 0x76, 0x23, 0x01, 0xc7, 0xa9, 0x1a, 0xf6,
+ 0x5f, 0xb0, 0xfb, 0x40, 0x8c, 0x5e, 0x35, 0xf0, 0xe9, 0xc2, 0xa2, 0xd4, 0xbf, 0x91, 0xcb, 0xb9,
+ 0x9f, 0x55, 0x71, 0x83, 0xec, 0x6f, 0xf8, 0x94, 0x33, 0xcf, 0x5e, 0x15, 0xc6, 0x9a, 0x1f, 0xe8,
+ 0xba, 0xe6, 0x7f, 0xa1, 0x00, 0xa7, 0xd5, 0x08, 0x18, 0x4c, 0xe0, 0x37, 0xfb, 0x18, 0x5c, 0x85,
+ 0xd1, 0x06, 0xd9, 0x72, 0x3a, 0xcd, 0x48, 0x89, 0xcf, 0x07, 0xb9, 0x0a, 0xa5, 0x1c, 0x17, 0x63,
+ 0x1d, 0xe7, 0x08, 0xc3, 0xf6, 0x3f, 0x46, 0xd9, 0x45, 0x1c, 0x39, 0x74, 0x8d, 0xab, 0x5d, 0x63,
+ 0xe5, 0xee, 0x9a, 0x8b, 0x30, 0xe8, 0xb6, 0x28, 0x63, 0x56, 0x30, 0xf9, 0xad, 0x0a, 0x2d, 0xc4,
+ 0x1c, 0x86, 0x3e, 0x00, 0xc3, 0x75, 0xbf, 0xd5, 0x72, 0xbc, 0x06, 0xbb, 0xf2, 0x4a, 0x4b, 0xa3,
+ 0x94, 0x77, 0x5b, 0xe6, 0x45, 0x58, 0xc2, 0xd0, 0x13, 0x30, 0xe0, 0x04, 0xdb, 0x5c, 0x86, 0x51,
+ 0x5a, 0x1a, 0xa1, 0x2d, 0x2d, 0x06, 0xdb, 0x21, 0x66, 0xa5, 0xf4, 0x09, 0x76, 0xcf, 0x0f, 0x76,
+ 0x5d, 0x6f, 0xbb, 0xec, 0x06, 0x62, 0x4b, 0xa8, 0xbb, 0xf0, 0xae, 0x82, 0x60, 0x0d, 0x0b, 0xad,
+ 0xc2, 0x60, 0xdb, 0x0f, 0xa2, 0x70, 0x76, 0x88, 0x0d, 0xf7, 0x93, 0x39, 0x07, 0x11, 0xff, 0xda,
+ 0xaa, 0x1f, 0x44, 0xf1, 0x07, 0xd0, 0x7f, 0x21, 0xe6, 0xd5, 0xd1, 0x4d, 0x18, 0x26, 0xde, 0xde,
+ 0x6a, 0xe0, 0xb7, 0x66, 0x67, 0xf2, 0x29, 0xad, 0x70, 0x14, 0xbe, 0xcc, 0x62, 0x1e, 0x55, 0x14,
+ 0x63, 0x49, 0x02, 0x7d, 0x1b, 0x14, 0x89, 0xb7, 0x37, 0x3b, 0xcc, 0x28, 0xcd, 0xe5, 0x50, 0xba,
+ 0xe3, 0x04, 0xf1, 0x99, 0xbf, 0xe2, 0xed, 0x61, 0x5a, 0x07, 0x7d, 0x12, 0x4a, 0xf2, 0xc0, 0x08,
+ 0x85, 0xb0, 0x2e, 0x73, 0xc1, 0xca, 0x63, 0x06, 0x93, 0xb7, 0x3a, 0x6e, 0x40, 0x5a, 0xc4, 0x8b,
+ 0xc2, 0xf8, 0x84, 0x94, 0xd0, 0x10, 0xc7, 0xd4, 0xd0, 0x27, 0xa5, 0x84, 0x78, 0xcd, 0xef, 0x78,
+ 0x51, 0x38, 0x5b, 0x62, 0xdd, 0xcb, 0xd4, 0xdd, 0xdd, 0x89, 0xf1, 0x92, 0x22, 0x64, 0x5e, 0x19,
+ 0x1b, 0xa4, 0xd0, 0xa7, 0x61, 0x9c, 0xff, 0xe7, 0x1a, 0xb0, 0x70, 0xf6, 0x34, 0xa3, 0x7d, 0x21,
+ 0x9f, 0x36, 0x47, 0x5c, 0x3a, 0x2d, 0x88, 0x8f, 0xeb, 0xa5, 0x21, 0x36, 0xa9, 0x21, 0x0c, 0xe3,
+ 0x4d, 0x77, 0x8f, 0x78, 0x24, 0x0c, 0xab, 0x81, 0xbf, 0x49, 0x66, 0x81, 0x0d, 0xcc, 0xd9, 0x6c,
+ 0x8d, 0x99, 0xbf, 0x49, 0x96, 0xa6, 0x29, 0xcd, 0x9b, 0x7a, 0x1d, 0x6c, 0x92, 0x40, 0xb7, 0x61,
+ 0x82, 0xbe, 0xd8, 0xdc, 0x98, 0xe8, 0x68, 0x2f, 0xa2, 0xec, 0x5d, 0x85, 0x8d, 0x4a, 0x38, 0x41,
+ 0x04, 0xdd, 0x82, 0xb1, 0x30, 0x72, 0x82, 0xa8, 0xd3, 0xe6, 0x44, 0xcf, 0xf4, 0x22, 0xca, 0x14,
+ 0xae, 0x35, 0xad, 0x0a, 0x36, 0x08, 0xa0, 0xd7, 0xa1, 0xd4, 0x74, 0xb7, 0x48, 0x7d, 0xbf, 0xde,
+ 0x24, 0xb3, 0x63, 0x8c, 0x5a, 0xe6, 0xa1, 0x72, 0x53, 0x22, 0x71, 0x3e, 0x57, 0xfd, 0xc5, 0x71,
+ 0x75, 0x74, 0x07, 0xce, 0x44, 0x24, 0x68, 0xb9, 0x9e, 0x43, 0x0f, 0x03, 0xf1, 0xb4, 0x62, 0x8a,
+ 0xcc, 0x71, 0xb6, 0xdb, 0xce, 0x8b, 0xd9, 0x38, 0xb3, 0x91, 0x89, 0x85, 0x73, 0x6a, 0xa3, 0xfb,
+ 0x30, 0x9b, 0x01, 0xf1, 0x9b, 0x6e, 0x7d, 0x7f, 0xf6, 0x14, 0xa3, 0xfc, 0x51, 0x41, 0x79, 0x76,
+ 0x23, 0x07, 0xef, 0xb0, 0x0b, 0x0c, 0xe7, 0x52, 0x47, 0xb7, 0x60, 0x92, 0x9d, 0x40, 0xd5, 0x4e,
+ 0xb3, 0x29, 0x1a, 0x9c, 0x60, 0x0d, 0x7e, 0x40, 0xde, 0xc7, 0x15, 0x13, 0x7c, 0x78, 0x30, 0x0f,
+ 0xf1, 0x3f, 0x9c, 0xac, 0x8d, 0x36, 0x99, 0xce, 0xac, 0x13, 0xb8, 0xd1, 0x3e, 0x3d, 0x37, 0xc8,
+ 0xfd, 0x68, 0x76, 0xb2, 0xab, 0xbc, 0x42, 0x47, 0x55, 0x8a, 0x35, 0xbd, 0x10, 0x27, 0x09, 0xd2,
+ 0x23, 0x35, 0x8c, 0x1a, 0xae, 0x37, 0x3b, 0xc5, 0xdf, 0x25, 0xf2, 0x44, 0xaa, 0xd1, 0x42, 0xcc,
+ 0x61, 0x4c, 0x5f, 0x46, 0x7f, 0xdc, 0xa2, 0x37, 0xd7, 0x34, 0x43, 0x8c, 0xf5, 0x65, 0x12, 0x80,
+ 0x63, 0x1c, 0xca, 0x4c, 0x46, 0xd1, 0xfe, 0x2c, 0x62, 0xa8, 0xea, 0x60, 0xd9, 0xd8, 0xf8, 0x24,
+ 0xa6, 0xe5, 0xf6, 0x26, 0x4c, 0xa8, 0x83, 0x90, 0x8d, 0x09, 0x9a, 0x87, 0x41, 0xc6, 0x3e, 0x09,
+ 0xe9, 0x5a, 0x89, 0x76, 0x81, 0xb1, 0x56, 0x98, 0x97, 0xb3, 0x2e, 0xb8, 0x6f, 0x93, 0xa5, 0xfd,
+ 0x88, 0xf0, 0x37, 0x7d, 0x51, 0xeb, 0x82, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0xdf, 0x9c, 0x0d, 0x8d,
+ 0x4f, 0xdb, 0x3e, 0xee, 0x97, 0x67, 0x61, 0x64, 0xc7, 0x0f, 0x23, 0x8a, 0xcd, 0xda, 0x18, 0x8c,
+ 0x19, 0xcf, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xf4, 0x2a, 0x8c, 0xd7, 0xf5, 0x06, 0xc4, 0xe5, 0xa8,
+ 0x8e, 0x11, 0xa3, 0x75, 0x6c, 0xe2, 0xa2, 0x97, 0x61, 0x84, 0xd9, 0x80, 0xd4, 0xfd, 0xa6, 0xe0,
+ 0xda, 0xe4, 0x0d, 0x3f, 0x52, 0x15, 0xe5, 0x87, 0xda, 0x6f, 0xac, 0xb0, 0xd1, 0x25, 0x18, 0xa2,
+ 0x5d, 0xa8, 0x54, 0xc5, 0xb5, 0xa4, 0x04, 0x45, 0xd7, 0x59, 0x29, 0x16, 0x50, 0xfb, 0xff, 0x2b,
+ 0x68, 0xa3, 0x4c, 0xdf, 0xc3, 0x04, 0x55, 0x61, 0xf8, 0x9e, 0xe3, 0x46, 0xae, 0xb7, 0x2d, 0xf8,
+ 0x8f, 0xa7, 0xbb, 0xde, 0x51, 0xac, 0xd2, 0x5d, 0x5e, 0x81, 0xdf, 0xa2, 0xe2, 0x0f, 0x96, 0x64,
+ 0x28, 0xc5, 0xa0, 0xe3, 0x79, 0x94, 0x62, 0xa1, 0x5f, 0x8a, 0x98, 0x57, 0xe0, 0x14, 0xc5, 0x1f,
+ 0x2c, 0xc9, 0xa0, 0x37, 0x01, 0xe4, 0x0e, 0x23, 0x0d, 0x61, 0x7b, 0xf1, 0x6c, 0x6f, 0xa2, 0x1b,
+ 0xaa, 0xce, 0xd2, 0x04, 0xbd, 0xa3, 0xe3, 0xff, 0x58, 0xa3, 0x67, 0x47, 0x8c, 0x4f, 0x4b, 0x77,
+ 0x06, 0x7d, 0x07, 0x5d, 0xe2, 0x4e, 0x10, 0x91, 0xc6, 0x62, 0x24, 0x06, 0xe7, 0x83, 0xfd, 0x3d,
+ 0x52, 0x36, 0xdc, 0x16, 0xd1, 0xb7, 0x83, 0x20, 0x82, 0x63, 0x7a, 0xf6, 0x2f, 0x15, 0x61, 0x36,
+ 0xaf, 0xbb, 0x74, 0xd1, 0x91, 0xfb, 0x6e, 0xb4, 0x4c, 0xd9, 0x2b, 0xcb, 0x5c, 0x74, 0x2b, 0xa2,
+ 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, 0xa1, 0xbb, 0x2d, 0xdf, 0x98, 0x83, 0xf1, 0xec, 0xd7, 0x58, 0x29,
+ 0x16, 0x50, 0x8a, 0x17, 0x10, 0x27, 0x14, 0xc6, 0x3d, 0xda, 0x2a, 0xc1, 0xac, 0x14, 0x0b, 0xa8,
+ 0x2e, 0xed, 0x1a, 0xe8, 0x21, 0xed, 0x32, 0x86, 0x68, 0xf0, 0x78, 0x87, 0x08, 0x7d, 0x06, 0x60,
+ 0xcb, 0xf5, 0xdc, 0x70, 0x87, 0x51, 0x1f, 0x3a, 0x32, 0x75, 0xc5, 0x9c, 0xad, 0x2a, 0x2a, 0x58,
+ 0xa3, 0x88, 0x5e, 0x82, 0x51, 0xb5, 0x01, 0x2b, 0x65, 0xa6, 0xe9, 0xd4, 0x2c, 0x47, 0xe2, 0xd3,
+ 0xa8, 0x8c, 0x75, 0x3c, 0xfb, 0x73, 0xc9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3,
+ 0x5b, 0xe8, 0x3e, 0xbe, 0xf6, 0xd7, 0x8b, 0x30, 0x69, 0x34, 0xd6, 0x09, 0xfb, 0x38, 0xb3, 0xae,
+ 0xd1, 0x03, 0xdc, 0x89, 0x88, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0xf2, 0x74, 0x07, 0xf0,
+ 0xfa, 0xe8, 0x33, 0x50, 0x6a, 0x3a, 0x21, 0x93, 0x9c, 0x11, 0xb1, 0xef, 0xfa, 0x21, 0x16, 0x3f,
+ 0x4c, 0x9c, 0x30, 0xd2, 0x6e, 0x4d, 0x4e, 0x3b, 0x26, 0x49, 0x6f, 0x1a, 0xca, 0x9f, 0x48, 0xeb,
+ 0x31, 0xd5, 0x09, 0xca, 0xc4, 0xec, 0x63, 0x0e, 0x43, 0x2f, 0xc3, 0x58, 0x40, 0xd8, 0xaa, 0x58,
+ 0xa6, 0xdc, 0x1c, 0x5b, 0x66, 0x83, 0x31, 0xdb, 0x87, 0x35, 0x18, 0x36, 0x30, 0xe3, 0xb7, 0xc1,
+ 0x50, 0x97, 0xb7, 0xc1, 0xd3, 0x30, 0xcc, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xc2, 0x8b, 0xb1,
+ 0x84, 0x27, 0x17, 0xcc, 0x48, 0x7f, 0x0b, 0x86, 0xbe, 0x3e, 0xc4, 0xa2, 0x66, 0x5a, 0xe6, 0x11,
+ 0x7e, 0xca, 0x89, 0x25, 0x8f, 0x25, 0xcc, 0xfe, 0x20, 0x4c, 0x94, 0x1d, 0xd2, 0xf2, 0xbd, 0x15,
+ 0xaf, 0xd1, 0xf6, 0x5d, 0x2f, 0x42, 0xb3, 0x30, 0xc0, 0x2e, 0x11, 0x7e, 0x04, 0x0c, 0xd0, 0x86,
+ 0x30, 0x2b, 0xb1, 0xb7, 0xe1, 0x74, 0xd9, 0xbf, 0xe7, 0xdd, 0x73, 0x82, 0xc6, 0x62, 0xb5, 0xa2,
+ 0xbd, 0xaf, 0xd7, 0xe5, 0xfb, 0x8e, 0x1b, 0x6d, 0x65, 0x1e, 0xbd, 0x5a, 0x4d, 0xce, 0xd6, 0xae,
+ 0xba, 0x4d, 0x92, 0x23, 0x05, 0xf9, 0xcb, 0x05, 0xa3, 0xa5, 0x18, 0x5f, 0x69, 0xb5, 0xac, 0x5c,
+ 0xad, 0xd6, 0x1b, 0x30, 0xb2, 0xe5, 0x92, 0x66, 0x03, 0x93, 0x2d, 0xb1, 0x12, 0x9f, 0xca, 0xb7,
+ 0x43, 0x59, 0xa5, 0x98, 0x52, 0xea, 0xc5, 0x5f, 0x87, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e,
+ 0x4c, 0xc9, 0x07, 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x74, 0xb7, 0x57, 0x88, 0x49, 0xfc, 0xd4, 0x83,
+ 0x83, 0xf9, 0x29, 0x9c, 0x20, 0x83, 0x53, 0x84, 0xe9, 0x73, 0xb0, 0x45, 0x4f, 0xe0, 0x01, 0x36,
+ 0xfc, 0xec, 0x39, 0xc8, 0x5e, 0xb6, 0xac, 0xd4, 0xfe, 0x09, 0x0b, 0x1e, 0x4b, 0x8d, 0x8c, 0x78,
+ 0xe1, 0x1f, 0xf3, 0x2c, 0x24, 0x5f, 0xdc, 0x85, 0xde, 0x2f, 0x6e, 0xfb, 0x6f, 0x59, 0x70, 0x6a,
+ 0xa5, 0xd5, 0x8e, 0xf6, 0xcb, 0xae, 0xa9, 0x82, 0xfa, 0x08, 0x0c, 0xb5, 0x48, 0xc3, 0xed, 0xb4,
+ 0xc4, 0xcc, 0xcd, 0xcb, 0x53, 0x6a, 0x8d, 0x95, 0x1e, 0x1e, 0xcc, 0x8f, 0xd7, 0x22, 0x3f, 0x70,
+ 0xb6, 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x9d, 0xf5, 0xee, 0xdb, 0xe4, 0xa6, 0xdb, 0x72, 0xa5, 0x5d,
+ 0x51, 0x57, 0x99, 0xdd, 0x82, 0x1c, 0xd0, 0x85, 0x37, 0x3a, 0x8e, 0x17, 0xb9, 0xd1, 0xbe, 0xd0,
+ 0x1e, 0x49, 0x22, 0x38, 0xa6, 0x67, 0x7f, 0xcd, 0x82, 0x49, 0xb9, 0xee, 0x17, 0x1b, 0x8d, 0x80,
+ 0x84, 0x21, 0x9a, 0x83, 0x82, 0xdb, 0x16, 0xbd, 0x04, 0xd1, 0xcb, 0x42, 0xa5, 0x8a, 0x0b, 0x6e,
+ 0x5b, 0xb2, 0x65, 0xec, 0x20, 0x2c, 0x9a, 0x8a, 0xb4, 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x19,
+ 0x46, 0x3c, 0xbf, 0xc1, 0x6d, 0xbb, 0xf8, 0x95, 0xc6, 0x16, 0xd8, 0xba, 0x28, 0xc3, 0x0a, 0x8a,
+ 0xaa, 0x50, 0xe2, 0x66, 0x4f, 0xf1, 0xa2, 0xed, 0xcb, 0x78, 0x8a, 0x7d, 0xd9, 0x86, 0xac, 0x89,
+ 0x63, 0x22, 0xf6, 0xaf, 0x59, 0x30, 0x26, 0xbf, 0xac, 0x4f, 0x9e, 0x93, 0x6e, 0xad, 0x98, 0xdf,
+ 0x8c, 0xb7, 0x16, 0xe5, 0x19, 0x19, 0xc4, 0x60, 0x15, 0x8b, 0x47, 0x62, 0x15, 0xaf, 0xc2, 0xa8,
+ 0xd3, 0x6e, 0x57, 0x4d, 0x3e, 0x93, 0x2d, 0xa5, 0xc5, 0xb8, 0x18, 0xeb, 0x38, 0xf6, 0x8f, 0x17,
+ 0x60, 0x42, 0x7e, 0x41, 0xad, 0xb3, 0x19, 0x92, 0x08, 0x6d, 0x40, 0xc9, 0xe1, 0xb3, 0x44, 0xe4,
+ 0x22, 0xbf, 0x98, 0x2d, 0x47, 0x30, 0xa6, 0x34, 0xbe, 0xf0, 0x17, 0x65, 0x6d, 0x1c, 0x13, 0x42,
+ 0x4d, 0x98, 0xf6, 0xfc, 0x88, 0x1d, 0xfe, 0x0a, 0xde, 0x4d, 0xb5, 0x93, 0xa4, 0x7e, 0x56, 0x50,
+ 0x9f, 0x5e, 0x4f, 0x52, 0xc1, 0x69, 0xc2, 0x68, 0x45, 0xca, 0x66, 0x8a, 0xf9, 0xc2, 0x00, 0x7d,
+ 0xe2, 0xb2, 0x45, 0x33, 0xf6, 0xaf, 0x5a, 0x50, 0x92, 0x68, 0x27, 0xa1, 0xc5, 0x5b, 0x83, 0xe1,
+ 0x90, 0x4d, 0x82, 0x1c, 0x1a, 0xbb, 0x5b, 0xc7, 0xf9, 0x7c, 0xc5, 0x77, 0x1a, 0xff, 0x1f, 0x62,
+ 0x49, 0x83, 0x89, 0xe6, 0x55, 0xf7, 0xdf, 0x25, 0xa2, 0x79, 0xd5, 0x9f, 0x9c, 0x4b, 0xe9, 0x8f,
+ 0x58, 0x9f, 0x35, 0x59, 0x17, 0x65, 0xbd, 0xda, 0x01, 0xd9, 0x72, 0xef, 0x27, 0x59, 0xaf, 0x2a,
+ 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0xb1, 0xba, 0x94, 0xc9, 0xc6, 0x3b, 0xfc, 0x52, 0x57, 0xfd,
+ 0x80, 0x52, 0x25, 0x71, 0x59, 0xc8, 0xb2, 0x56, 0x1f, 0x1b, 0xd4, 0x4c, 0x33, 0x82, 0x62, 0x2f,
+ 0x33, 0x82, 0x98, 0x6e, 0xbe, 0x52, 0xfd, 0x27, 0x2d, 0x18, 0xe2, 0xb2, 0xb8, 0xfe, 0x44, 0xa1,
+ 0x9a, 0x66, 0x2d, 0x1e, 0xbb, 0x3b, 0xb4, 0x50, 0x68, 0xca, 0xd0, 0x1a, 0x94, 0xd8, 0x0f, 0x26,
+ 0x4b, 0x2c, 0xe6, 0x5b, 0xdd, 0xf3, 0x56, 0xf5, 0x0e, 0xde, 0x91, 0xd5, 0x70, 0x4c, 0xc1, 0xfe,
+ 0xd1, 0x22, 0x3d, 0xdd, 0x62, 0x54, 0xe3, 0xd2, 0xb7, 0x1e, 0xdd, 0xa5, 0x5f, 0x78, 0x54, 0x97,
+ 0xfe, 0x36, 0x4c, 0xd6, 0x35, 0x3d, 0x5c, 0x3c, 0x93, 0x97, 0xbb, 0x2e, 0x12, 0x4d, 0x65, 0xc7,
+ 0xa5, 0x2c, 0xcb, 0x26, 0x11, 0x9c, 0xa4, 0x8a, 0xbe, 0x03, 0xc6, 0xf8, 0x3c, 0x8b, 0x56, 0xb8,
+ 0x25, 0xc6, 0x07, 0xf2, 0xd7, 0x8b, 0xde, 0x04, 0x97, 0xca, 0x69, 0xd5, 0xb1, 0x41, 0xcc, 0xfe,
+ 0x53, 0x0b, 0xd0, 0x4a, 0x7b, 0x87, 0xb4, 0x48, 0xe0, 0x34, 0x63, 0x71, 0xfa, 0x0f, 0x5a, 0x30,
+ 0x4b, 0x52, 0xc5, 0xcb, 0x7e, 0xab, 0x25, 0x1e, 0x2d, 0x39, 0xef, 0xea, 0x95, 0x9c, 0x3a, 0xca,
+ 0x2d, 0x61, 0x36, 0x0f, 0x03, 0xe7, 0xb6, 0x87, 0xd6, 0x60, 0x86, 0xdf, 0x92, 0x0a, 0xa0, 0xd9,
+ 0x5e, 0x3f, 0x2e, 0x08, 0xcf, 0x6c, 0xa4, 0x51, 0x70, 0x56, 0x3d, 0xfb, 0x7b, 0xc6, 0x20, 0xb7,
+ 0x17, 0xef, 0xe9, 0x11, 0xde, 0xd3, 0x23, 0xbc, 0xa7, 0x47, 0x78, 0x4f, 0x8f, 0xf0, 0x9e, 0x1e,
+ 0xe1, 0x5b, 0x5e, 0x8f, 0xf0, 0xff, 0x5b, 0x70, 0x5a, 0x5d, 0x03, 0xc6, 0xc3, 0xf7, 0xf3, 0x30,
+ 0xc3, 0xb7, 0xdb, 0x72, 0xd3, 0x71, 0x5b, 0x1b, 0xa4, 0xd5, 0x6e, 0x3a, 0x91, 0xd4, 0xba, 0x5f,
+ 0xcd, 0x5c, 0xb9, 0x09, 0x8b, 0x55, 0xa3, 0xe2, 0xd2, 0x63, 0xf4, 0x7a, 0xca, 0x00, 0xe0, 0xac,
+ 0x66, 0xec, 0x5f, 0x1a, 0x81, 0xc1, 0x95, 0x3d, 0xe2, 0x45, 0x27, 0xf0, 0x44, 0xa8, 0xc3, 0x84,
+ 0xeb, 0xed, 0xf9, 0xcd, 0x3d, 0xd2, 0xe0, 0xf0, 0xa3, 0xbc, 0x64, 0xcf, 0x08, 0xd2, 0x13, 0x15,
+ 0x83, 0x04, 0x4e, 0x90, 0x7c, 0x14, 0xd2, 0xe4, 0x6b, 0x30, 0xc4, 0x0f, 0x71, 0x21, 0x4a, 0xce,
+ 0x3c, 0xb3, 0xd9, 0x20, 0x8a, 0xab, 0x29, 0x96, 0x74, 0xf3, 0x4b, 0x42, 0x54, 0x47, 0x9f, 0x83,
+ 0x89, 0x2d, 0x37, 0x08, 0xa3, 0x0d, 0xb7, 0x45, 0xc2, 0xc8, 0x69, 0xb5, 0x1f, 0x42, 0x7a, 0xac,
+ 0xc6, 0x61, 0xd5, 0xa0, 0x84, 0x13, 0x94, 0xd1, 0x36, 0x8c, 0x37, 0x1d, 0xbd, 0xa9, 0xe1, 0x23,
+ 0x37, 0xa5, 0x6e, 0x87, 0x9b, 0x3a, 0x21, 0x6c, 0xd2, 0xa5, 0xdb, 0xa9, 0xce, 0x04, 0xa0, 0x23,
+ 0x4c, 0x2c, 0xa0, 0xb6, 0x13, 0x97, 0x7c, 0x72, 0x18, 0x65, 0x74, 0x98, 0x81, 0x6c, 0xc9, 0x64,
+ 0x74, 0x34, 0x33, 0xd8, 0xcf, 0x42, 0x89, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0x05, 0x73, 0xa5, 0xbf,
+ 0xbe, 0xae, 0xb9, 0xf5, 0xc0, 0x37, 0xe5, 0xf6, 0x2b, 0x92, 0x12, 0x8e, 0x89, 0xa2, 0x65, 0x18,
+ 0x0a, 0x49, 0xe0, 0x92, 0x50, 0x5c, 0x35, 0x5d, 0xa6, 0x91, 0xa1, 0x71, 0xdf, 0x12, 0xfe, 0x1b,
+ 0x8b, 0xaa, 0x74, 0x79, 0x39, 0x4c, 0xa4, 0xc9, 0x2e, 0x03, 0x6d, 0x79, 0x2d, 0xb2, 0x52, 0x2c,
+ 0xa0, 0xe8, 0x75, 0x18, 0x0e, 0x48, 0x93, 0x29, 0x86, 0xc6, 0xfb, 0x5f, 0xe4, 0x5c, 0xcf, 0xc4,
+ 0xeb, 0x61, 0x49, 0x00, 0xdd, 0x00, 0x14, 0x10, 0xca, 0x28, 0xb9, 0xde, 0xb6, 0x32, 0x1b, 0x15,
+ 0x07, 0xad, 0x62, 0x48, 0x71, 0x8c, 0x21, 0xdd, 0x7c, 0x70, 0x46, 0x35, 0x74, 0x0d, 0xa6, 0x55,
+ 0x69, 0xc5, 0x0b, 0x23, 0x87, 0x1e, 0x70, 0x93, 0x8c, 0x96, 0x92, 0x53, 0xe0, 0x24, 0x02, 0x4e,
+ 0xd7, 0xb1, 0xbf, 0x6c, 0x01, 0x1f, 0xe7, 0x13, 0x78, 0x9d, 0xbf, 0x66, 0xbe, 0xce, 0xcf, 0xe6,
+ 0xce, 0x5c, 0xce, 0xcb, 0xfc, 0xcb, 0x16, 0x8c, 0x6a, 0x33, 0x1b, 0xaf, 0x59, 0xab, 0xcb, 0x9a,
+ 0xed, 0xc0, 0x14, 0x5d, 0xe9, 0xb7, 0x36, 0x43, 0x12, 0xec, 0x91, 0x06, 0x5b, 0x98, 0x85, 0x87,
+ 0x5b, 0x98, 0xca, 0x44, 0xed, 0x66, 0x82, 0x20, 0x4e, 0x35, 0x61, 0x7f, 0x56, 0x76, 0x55, 0x59,
+ 0xf4, 0xd5, 0xd5, 0x9c, 0x27, 0x2c, 0xfa, 0xd4, 0xac, 0xe2, 0x18, 0x87, 0x6e, 0xb5, 0x1d, 0x3f,
+ 0x8c, 0x92, 0x16, 0x7d, 0xd7, 0xfd, 0x30, 0xc2, 0x0c, 0x62, 0xbf, 0x00, 0xb0, 0x72, 0x9f, 0xd4,
+ 0xf9, 0x8a, 0xd5, 0x1f, 0x0f, 0x56, 0xfe, 0xe3, 0xc1, 0xfe, 0x6d, 0x0b, 0x26, 0x56, 0x97, 0x8d,
+ 0x9b, 0x6b, 0x01, 0x80, 0xbf, 0x78, 0xee, 0xde, 0x5d, 0x97, 0xea, 0x70, 0xae, 0xd1, 0x54, 0xa5,
+ 0x58, 0xc3, 0x40, 0x67, 0xa1, 0xd8, 0xec, 0x78, 0x42, 0x7c, 0x38, 0x4c, 0xaf, 0xc7, 0x9b, 0x1d,
+ 0x0f, 0xd3, 0x32, 0xcd, 0xa5, 0xa0, 0xd8, 0xb7, 0x4b, 0x41, 0x4f, 0xd7, 0x7e, 0x34, 0x0f, 0x83,
+ 0xf7, 0xee, 0xb9, 0x0d, 0xee, 0x40, 0x29, 0x54, 0xf5, 0x77, 0xef, 0x56, 0xca, 0x21, 0xe6, 0xe5,
+ 0xf6, 0x97, 0x8a, 0x30, 0xb7, 0xda, 0x24, 0xf7, 0xdf, 0xa1, 0x13, 0x69, 0xbf, 0x0e, 0x11, 0x47,
+ 0x13, 0xc4, 0x1c, 0xd5, 0xe9, 0xa5, 0xf7, 0x78, 0x6c, 0xc1, 0x30, 0x37, 0x68, 0x93, 0x2e, 0xa5,
+ 0xaf, 0x66, 0xb5, 0x9e, 0x3f, 0x20, 0x0b, 0xdc, 0x30, 0x4e, 0x78, 0xc4, 0xa9, 0x0b, 0x53, 0x94,
+ 0x62, 0x49, 0x7c, 0xee, 0x15, 0x18, 0xd3, 0x31, 0x8f, 0xe4, 0x7e, 0xf6, 0xff, 0x14, 0x61, 0x8a,
+ 0xf6, 0xe0, 0x91, 0x4e, 0xc4, 0xed, 0xf4, 0x44, 0x1c, 0xb7, 0x0b, 0x52, 0xef, 0xd9, 0x78, 0x33,
+ 0x39, 0x1b, 0x57, 0xf3, 0x66, 0xe3, 0xa4, 0xe7, 0xe0, 0xbb, 0x2d, 0x98, 0x59, 0x6d, 0xfa, 0xf5,
+ 0xdd, 0x84, 0x9b, 0xd0, 0x4b, 0x30, 0x4a, 0x8f, 0xe3, 0xd0, 0xf0, 0x60, 0x37, 0x62, 0x1a, 0x08,
+ 0x10, 0xd6, 0xf1, 0xb4, 0x6a, 0xb7, 0x6f, 0x57, 0xca, 0x59, 0xa1, 0x10, 0x04, 0x08, 0xeb, 0x78,
+ 0xf6, 0x6f, 0x5a, 0x70, 0xee, 0xda, 0xf2, 0x4a, 0xbc, 0x14, 0x53, 0xd1, 0x18, 0x2e, 0xc1, 0x50,
+ 0xbb, 0xa1, 0x75, 0x25, 0x16, 0xaf, 0x96, 0x59, 0x2f, 0x04, 0xf4, 0xdd, 0x12, 0x69, 0xe4, 0x67,
+ 0x2d, 0x98, 0xb9, 0xe6, 0x46, 0xf4, 0x76, 0x4d, 0xc6, 0x05, 0xa0, 0xd7, 0x6b, 0xe8, 0x46, 0x7e,
+ 0xb0, 0x9f, 0x8c, 0x0b, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, 0x97, 0x99, 0x52, 0x17,
+ 0x4c, 0x45, 0x13, 0x16, 0xe5, 0x58, 0x61, 0xd0, 0x0f, 0x6b, 0xb8, 0x01, 0x93, 0xd1, 0xed, 0x8b,
+ 0x13, 0x56, 0x7d, 0x58, 0x59, 0x02, 0x70, 0x8c, 0x63, 0xff, 0xb1, 0x05, 0xf3, 0xd7, 0x9a, 0x9d,
+ 0x30, 0x22, 0xc1, 0x56, 0x98, 0x73, 0x3a, 0xbe, 0x00, 0x25, 0x22, 0x25, 0xe2, 0xa2, 0xd7, 0x8a,
+ 0x63, 0x54, 0xa2, 0x72, 0x1e, 0x9e, 0x40, 0xe1, 0xf5, 0xe1, 0x74, 0x78, 0x34, 0xaf, 0xb1, 0x55,
+ 0x40, 0x44, 0x6f, 0x4b, 0x8f, 0xd7, 0xc0, 0x1c, 0xbf, 0x57, 0x52, 0x50, 0x9c, 0x51, 0xc3, 0xfe,
+ 0x09, 0x0b, 0x4e, 0xab, 0x0f, 0x7e, 0xd7, 0x7d, 0xa6, 0xfd, 0xf3, 0x05, 0x18, 0xbf, 0xbe, 0xb1,
+ 0x51, 0xbd, 0x46, 0x22, 0x71, 0x6d, 0xf7, 0xd6, 0x73, 0x63, 0x4d, 0x5d, 0xd7, 0xed, 0x31, 0xd7,
+ 0x89, 0xdc, 0xe6, 0x02, 0x0f, 0xfb, 0xb3, 0x50, 0xf1, 0xa2, 0x5b, 0x41, 0x2d, 0x0a, 0x5c, 0x6f,
+ 0x3b, 0x53, 0xc1, 0x27, 0x99, 0x8b, 0x62, 0x1e, 0x73, 0x81, 0x5e, 0x80, 0x21, 0x16, 0x77, 0x48,
+ 0x4e, 0xc2, 0xe3, 0xea, 0x2d, 0xc4, 0x4a, 0x0f, 0x0f, 0xe6, 0x4b, 0xb7, 0x71, 0x85, 0xff, 0xc1,
+ 0x02, 0x15, 0xdd, 0x86, 0xd1, 0x9d, 0x28, 0x6a, 0x5f, 0x27, 0x4e, 0x83, 0x04, 0xf2, 0x38, 0x3c,
+ 0x9f, 0x75, 0x1c, 0xd2, 0x41, 0xe0, 0x68, 0xf1, 0x09, 0x12, 0x97, 0x85, 0x58, 0xa7, 0x63, 0xd7,
+ 0x00, 0x62, 0xd8, 0x31, 0x69, 0x2a, 0xec, 0x3f, 0xb4, 0x60, 0x98, 0x87, 0x80, 0x08, 0xd0, 0x47,
+ 0x61, 0x80, 0xdc, 0x27, 0x75, 0xc1, 0xf1, 0x66, 0x76, 0x38, 0xe6, 0xb4, 0xb8, 0xc4, 0x95, 0xfe,
+ 0xc7, 0xac, 0x16, 0xba, 0x0e, 0xc3, 0xb4, 0xb7, 0xd7, 0x54, 0x3c, 0x8c, 0x27, 0xf3, 0xbe, 0x58,
+ 0x4d, 0x3b, 0x67, 0xce, 0x44, 0x11, 0x96, 0xd5, 0x99, 0x7a, 0xb8, 0xde, 0xae, 0xd1, 0x13, 0x3b,
+ 0xea, 0xc6, 0x58, 0x6c, 0x2c, 0x57, 0x39, 0x92, 0xa0, 0xc6, 0xd5, 0xc3, 0xb2, 0x10, 0xc7, 0x44,
+ 0xec, 0x0d, 0x28, 0xd1, 0x49, 0x5d, 0x6c, 0xba, 0x4e, 0x77, 0x8d, 0xf7, 0x33, 0x50, 0x92, 0xfa,
+ 0xec, 0x50, 0xb8, 0x7e, 0x33, 0xaa, 0x52, 0xdd, 0x1d, 0xe2, 0x18, 0x6e, 0x6f, 0xc1, 0x29, 0x66,
+ 0x9d, 0xe8, 0x44, 0x3b, 0xc6, 0x1e, 0xeb, 0xbd, 0x98, 0x9f, 0x15, 0x0f, 0x48, 0x3e, 0x33, 0xb3,
+ 0x9a, 0x77, 0xe5, 0x98, 0xa4, 0x18, 0x3f, 0x26, 0xed, 0xaf, 0x0f, 0xc0, 0xe3, 0x95, 0x5a, 0x7e,
+ 0x74, 0x90, 0x97, 0x61, 0x8c, 0xf3, 0xa5, 0x74, 0x69, 0x3b, 0x4d, 0xd1, 0xae, 0x12, 0xb5, 0x6e,
+ 0x68, 0x30, 0x6c, 0x60, 0xa2, 0x73, 0x50, 0x74, 0xdf, 0xf2, 0x92, 0xbe, 0x47, 0x95, 0x37, 0xd6,
+ 0x31, 0x2d, 0xa7, 0x60, 0xca, 0xe2, 0xf2, 0xbb, 0x43, 0x81, 0x15, 0x9b, 0xfb, 0x1a, 0x4c, 0xb8,
+ 0x61, 0x3d, 0x74, 0x2b, 0x1e, 0x3d, 0x67, 0xb4, 0x93, 0x4a, 0x09, 0x37, 0x68, 0xa7, 0x15, 0x14,
+ 0x27, 0xb0, 0xb5, 0x8b, 0x6c, 0xb0, 0x6f, 0x36, 0xb9, 0xa7, 0x2f, 0x34, 0x7d, 0x01, 0xb4, 0xd9,
+ 0xd7, 0x85, 0x4c, 0x66, 0x2e, 0x5e, 0x00, 0xfc, 0x83, 0x43, 0x2c, 0x61, 0xf4, 0xe5, 0x58, 0xdf,
+ 0x71, 0xda, 0x8b, 0x9d, 0x68, 0xa7, 0xec, 0x86, 0x75, 0x7f, 0x8f, 0x04, 0xfb, 0xec, 0xd1, 0x3f,
+ 0x12, 0xbf, 0x1c, 0x15, 0x60, 0xf9, 0xfa, 0x62, 0x95, 0x62, 0xe2, 0x74, 0x1d, 0xb4, 0x08, 0x93,
+ 0xb2, 0xb0, 0x46, 0x42, 0x76, 0x85, 0x8d, 0x32, 0x32, 0xca, 0x1b, 0x48, 0x14, 0x2b, 0x22, 0x49,
+ 0x7c, 0x93, 0x93, 0x86, 0xe3, 0xe0, 0xa4, 0x3f, 0x02, 0xe3, 0xae, 0xe7, 0x46, 0xae, 0x13, 0xf9,
+ 0x5c, 0xe1, 0xc3, 0xdf, 0xf7, 0x4c, 0x92, 0x5d, 0xd1, 0x01, 0xd8, 0xc4, 0xb3, 0xff, 0xf3, 0x00,
+ 0x4c, 0xb3, 0x69, 0x7b, 0x6f, 0x85, 0x7d, 0x2b, 0xad, 0xb0, 0xdb, 0xe9, 0x15, 0x76, 0x1c, 0x4f,
+ 0x84, 0x87, 0x5e, 0x66, 0x9f, 0x83, 0x92, 0x72, 0x80, 0x92, 0x1e, 0x90, 0x56, 0x8e, 0x07, 0x64,
+ 0x6f, 0xee, 0x43, 0xda, 0x90, 0x15, 0x33, 0x6d, 0xc8, 0xfe, 0xaa, 0x05, 0xb1, 0x06, 0x03, 0x5d,
+ 0x87, 0x52, 0xdb, 0x67, 0xa6, 0x91, 0x81, 0xb4, 0x37, 0x7e, 0x3c, 0xf3, 0xa2, 0xe2, 0x97, 0x22,
+ 0xff, 0xf8, 0xaa, 0xac, 0x81, 0xe3, 0xca, 0x68, 0x09, 0x86, 0xdb, 0x01, 0xa9, 0x45, 0x2c, 0x48,
+ 0x48, 0x4f, 0x3a, 0x7c, 0x8d, 0x70, 0x7c, 0x2c, 0x2b, 0xda, 0xbf, 0x60, 0x01, 0x70, 0x33, 0x2d,
+ 0xc7, 0xdb, 0x26, 0x27, 0x20, 0xb5, 0x2e, 0xc3, 0x40, 0xd8, 0x26, 0xf5, 0x6e, 0x46, 0xab, 0x71,
+ 0x7f, 0x6a, 0x6d, 0x52, 0x8f, 0x07, 0x9c, 0xfe, 0xc3, 0xac, 0xb6, 0xfd, 0xbd, 0x00, 0x13, 0x31,
+ 0x5a, 0x25, 0x22, 0x2d, 0xf4, 0x9c, 0x11, 0x34, 0xe0, 0x6c, 0x22, 0x68, 0x40, 0x89, 0x61, 0x6b,
+ 0x02, 0xd2, 0xcf, 0x41, 0xb1, 0xe5, 0xdc, 0x17, 0x12, 0xb0, 0x67, 0xba, 0x77, 0x83, 0xd2, 0x5f,
+ 0x58, 0x73, 0xee, 0xf3, 0x47, 0xe2, 0x33, 0x72, 0x81, 0xac, 0x39, 0xf7, 0x0f, 0xb9, 0x69, 0x2a,
+ 0x3b, 0xa4, 0x6e, 0xba, 0x61, 0xf4, 0x85, 0xff, 0x14, 0xff, 0x67, 0xcb, 0x8e, 0x36, 0xc2, 0xda,
+ 0x72, 0x3d, 0x61, 0x81, 0xd4, 0x57, 0x5b, 0xae, 0x97, 0x6c, 0xcb, 0xf5, 0xfa, 0x68, 0xcb, 0xf5,
+ 0xd0, 0xdb, 0x30, 0x2c, 0x0c, 0x04, 0x45, 0x90, 0x9e, 0x2b, 0x7d, 0xb4, 0x27, 0xec, 0x0b, 0x79,
+ 0x9b, 0x57, 0xe4, 0x23, 0x58, 0x94, 0xf6, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x92, 0x05, 0x13, 0xe2,
+ 0x37, 0x26, 0x6f, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xc3, 0xfd, 0xf7, 0x41, 0x54, 0xe4, 0x5d,
+ 0xf9, 0xb0, 0x3c, 0x66, 0x4d, 0x60, 0xcf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0x8e, 0x05, 0xa7, 0x5a,
+ 0xce, 0x7d, 0xde, 0x22, 0x2f, 0xc3, 0x4e, 0xe4, 0xfa, 0x42, 0xd1, 0xfe, 0xd1, 0xfe, 0xa6, 0x3f,
+ 0x55, 0x9d, 0x77, 0x52, 0x6a, 0x03, 0x4f, 0x65, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, 0xe6, 0xb6,
+ 0x60, 0x44, 0xae, 0xb7, 0x0c, 0x51, 0x43, 0x59, 0x67, 0xac, 0x8f, 0x6c, 0x9f, 0xa9, 0x3b, 0xe3,
+ 0xd3, 0x76, 0xc4, 0x5a, 0x7b, 0xa4, 0xed, 0x7c, 0x0e, 0xc6, 0xf4, 0x35, 0xf6, 0x48, 0xdb, 0x7a,
+ 0x0b, 0x66, 0x32, 0xd6, 0xd2, 0x23, 0x6d, 0xf2, 0x1e, 0x9c, 0xcd, 0x5d, 0x1f, 0x8f, 0xb2, 0x61,
+ 0xfb, 0xe7, 0x2d, 0xfd, 0x1c, 0x3c, 0x01, 0xd5, 0xc1, 0xb2, 0xa9, 0x3a, 0x38, 0xdf, 0x7d, 0xe7,
+ 0xe4, 0xe8, 0x0f, 0xde, 0xd4, 0x3b, 0x4d, 0x4f, 0x75, 0xf4, 0x3a, 0x0c, 0x35, 0x69, 0x89, 0x34,
+ 0x33, 0xb5, 0x7b, 0xef, 0xc8, 0x98, 0x97, 0x62, 0xe5, 0x21, 0x16, 0x14, 0xec, 0x5f, 0xb6, 0x60,
+ 0xe0, 0x04, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x5c, 0x2e, 0x69, 0x11, 0x3f, 0x78, 0x01, 0x3b, 0xf7,
+ 0x56, 0xee, 0x47, 0xc4, 0x0b, 0xd9, 0x53, 0x31, 0x73, 0x60, 0x7e, 0xda, 0x82, 0x99, 0x9b, 0xbe,
+ 0xd3, 0x58, 0x72, 0x9a, 0x8e, 0x57, 0x27, 0x41, 0xc5, 0xdb, 0x3e, 0x92, 0x8d, 0x74, 0xa1, 0xa7,
+ 0x8d, 0xf4, 0xb2, 0x34, 0x31, 0x1a, 0xc8, 0x9f, 0x3f, 0xca, 0x48, 0x26, 0xc3, 0xa8, 0x18, 0xc6,
+ 0xb0, 0x3b, 0x80, 0xf4, 0x5e, 0x0a, 0x8f, 0x15, 0x0c, 0xc3, 0x2e, 0xef, 0xaf, 0x98, 0xc4, 0xa7,
+ 0xb2, 0x19, 0xbc, 0xd4, 0xe7, 0x69, 0xbe, 0x18, 0xbc, 0x00, 0x4b, 0x42, 0xf6, 0xcb, 0x90, 0xe9,
+ 0xf6, 0xde, 0x5b, 0xf8, 0x60, 0x7f, 0x12, 0xa6, 0x59, 0xcd, 0x23, 0x3e, 0x8c, 0xed, 0x84, 0x6c,
+ 0x33, 0x23, 0x20, 0x9e, 0xfd, 0x45, 0x0b, 0x26, 0xd7, 0x13, 0x71, 0xc2, 0x2e, 0x31, 0x6d, 0x68,
+ 0x86, 0x48, 0xbd, 0xc6, 0x4a, 0xb1, 0x80, 0x1e, 0xbb, 0x24, 0xeb, 0x2f, 0x2c, 0x88, 0x23, 0x51,
+ 0x9c, 0x00, 0xfb, 0xb6, 0x6c, 0xb0, 0x6f, 0x99, 0x12, 0x16, 0xd5, 0x9d, 0x3c, 0xee, 0x0d, 0xdd,
+ 0x50, 0x31, 0x9a, 0xba, 0x08, 0x57, 0x62, 0x32, 0x7c, 0x29, 0x4e, 0x98, 0x81, 0x9c, 0x64, 0xd4,
+ 0x26, 0xfb, 0x77, 0x0a, 0x80, 0x14, 0x6e, 0xdf, 0x31, 0xa4, 0xd2, 0x35, 0x8e, 0x27, 0x86, 0xd4,
+ 0x1e, 0x20, 0xa6, 0xcf, 0x0f, 0x1c, 0x2f, 0xe4, 0x64, 0x5d, 0x21, 0xbb, 0x3b, 0x9a, 0xb1, 0xc0,
+ 0x9c, 0x68, 0x12, 0xdd, 0x4c, 0x51, 0xc3, 0x19, 0x2d, 0x68, 0x76, 0x1a, 0x83, 0xfd, 0xda, 0x69,
+ 0x0c, 0xf5, 0xf0, 0x4a, 0xfb, 0x39, 0x0b, 0xc6, 0xd5, 0x30, 0xbd, 0x4b, 0x6c, 0xc6, 0x55, 0x7f,
+ 0x72, 0x0e, 0xd0, 0xaa, 0xd6, 0x65, 0x76, 0xb1, 0x7c, 0x3b, 0xf3, 0x2e, 0x74, 0x9a, 0xee, 0xdb,
+ 0x44, 0x45, 0xf0, 0x9b, 0x17, 0xde, 0x82, 0xa2, 0xf4, 0xf0, 0x60, 0x7e, 0x5c, 0xfd, 0xe3, 0x11,
+ 0x83, 0xe3, 0x2a, 0xf4, 0x48, 0x9e, 0x4c, 0x2c, 0x45, 0xf4, 0x12, 0x0c, 0xb6, 0x77, 0x9c, 0x90,
+ 0x24, 0x7c, 0x6b, 0x06, 0xab, 0xb4, 0xf0, 0xf0, 0x60, 0x7e, 0x42, 0x55, 0x60, 0x25, 0x98, 0x63,
+ 0xf7, 0x1f, 0x99, 0x2b, 0xbd, 0x38, 0x7b, 0x46, 0xe6, 0xfa, 0x53, 0x0b, 0x06, 0xd6, 0xfd, 0xc6,
+ 0x49, 0x1c, 0x01, 0xaf, 0x19, 0x47, 0xc0, 0x13, 0x79, 0xc1, 0xdc, 0x73, 0x77, 0xff, 0x6a, 0x62,
+ 0xf7, 0x9f, 0xcf, 0xa5, 0xd0, 0x7d, 0xe3, 0xb7, 0x60, 0x94, 0x85, 0x88, 0x17, 0x7e, 0x44, 0x2f,
+ 0x18, 0x1b, 0x7e, 0x3e, 0xb1, 0xe1, 0x27, 0x35, 0x54, 0x6d, 0xa7, 0x3f, 0x0d, 0xc3, 0xc2, 0x31,
+ 0x25, 0xe9, 0xa4, 0x29, 0x70, 0xb1, 0x84, 0xdb, 0x3f, 0x59, 0x04, 0x23, 0x24, 0x3d, 0xfa, 0x55,
+ 0x0b, 0x16, 0x02, 0x6e, 0xb0, 0xda, 0x28, 0x77, 0x02, 0xd7, 0xdb, 0xae, 0xd5, 0x77, 0x48, 0xa3,
+ 0xd3, 0x74, 0xbd, 0xed, 0xca, 0xb6, 0xe7, 0xab, 0xe2, 0x95, 0xfb, 0xa4, 0xde, 0x61, 0x4a, 0xb0,
+ 0x1e, 0xf1, 0xef, 0x95, 0xe1, 0xf7, 0xf3, 0x0f, 0x0e, 0xe6, 0x17, 0xf0, 0x91, 0x68, 0xe3, 0x23,
+ 0xf6, 0x05, 0xfd, 0xa6, 0x05, 0x57, 0x78, 0xa4, 0xf6, 0xfe, 0xfb, 0xdf, 0xe5, 0xb5, 0x5c, 0x95,
+ 0xa4, 0x62, 0x22, 0x1b, 0x24, 0x68, 0x2d, 0x7d, 0x44, 0x0c, 0xe8, 0x95, 0xea, 0xd1, 0xda, 0xc2,
+ 0x47, 0xed, 0x9c, 0xfd, 0x4f, 0x8a, 0x30, 0x2e, 0x22, 0x38, 0x89, 0x3b, 0xe0, 0x25, 0x63, 0x49,
+ 0x3c, 0x99, 0x58, 0x12, 0xd3, 0x06, 0xf2, 0xf1, 0x1c, 0xff, 0x21, 0x4c, 0xd3, 0xc3, 0xf9, 0x3a,
+ 0x71, 0x82, 0x68, 0x93, 0x38, 0xdc, 0xfc, 0xaa, 0x78, 0xe4, 0xd3, 0x5f, 0x89, 0xe7, 0x6e, 0x26,
+ 0x89, 0xe1, 0x34, 0xfd, 0x6f, 0xa5, 0x3b, 0xc7, 0x83, 0xa9, 0x54, 0x10, 0xae, 0x4f, 0x41, 0x49,
+ 0x79, 0x55, 0x88, 0x43, 0xa7, 0x7b, 0x2c, 0xbb, 0x24, 0x05, 0x2e, 0x42, 0x8b, 0x3d, 0x7a, 0x62,
+ 0x72, 0xf6, 0xdf, 0x2b, 0x18, 0x0d, 0xf2, 0x49, 0x5c, 0x87, 0x11, 0x27, 0x0c, 0xdd, 0x6d, 0x8f,
+ 0x34, 0xc4, 0x8e, 0x7d, 0x7f, 0xde, 0x8e, 0x35, 0x9a, 0x61, 0x9e, 0x2d, 0x8b, 0xa2, 0x26, 0x56,
+ 0x34, 0xd0, 0x75, 0x6e, 0xe4, 0xb6, 0x27, 0xdf, 0x7b, 0xfd, 0x51, 0x03, 0x69, 0x06, 0xb7, 0x47,
+ 0xb0, 0xa8, 0x8f, 0x3e, 0xcd, 0xad, 0x10, 0x6f, 0x78, 0xfe, 0x3d, 0xef, 0x9a, 0xef, 0xcb, 0x28,
+ 0x09, 0xfd, 0x11, 0x9c, 0x96, 0xb6, 0x87, 0xaa, 0x3a, 0x36, 0xa9, 0xf5, 0x17, 0xd5, 0xf2, 0xf3,
+ 0x30, 0x43, 0x49, 0x9b, 0x4e, 0xcc, 0x21, 0x22, 0x30, 0x29, 0xc2, 0x83, 0xc9, 0x32, 0x31, 0x76,
+ 0x99, 0x4f, 0x39, 0xb3, 0x76, 0x2c, 0x47, 0xbe, 0x61, 0x92, 0xc0, 0x49, 0x9a, 0xf6, 0xcf, 0x58,
+ 0xc0, 0x1c, 0x3a, 0x4f, 0x80, 0x1f, 0xf9, 0x98, 0xc9, 0x8f, 0xcc, 0xe6, 0x0d, 0x72, 0x0e, 0x2b,
+ 0xf2, 0x22, 0x5f, 0x59, 0xd5, 0xc0, 0xbf, 0xbf, 0x2f, 0x4c, 0x47, 0x7a, 0xbf, 0x3f, 0xec, 0xff,
+ 0x65, 0xf1, 0x43, 0x4c, 0xf9, 0x3c, 0xa0, 0xef, 0x84, 0x91, 0xba, 0xd3, 0x76, 0xea, 0x3c, 0x7f,
+ 0x4a, 0xae, 0x44, 0xcf, 0xa8, 0xb4, 0xb0, 0x2c, 0x6a, 0x70, 0x09, 0x95, 0x0c, 0x33, 0x37, 0x22,
+ 0x8b, 0x7b, 0x4a, 0xa5, 0x54, 0x93, 0x73, 0xbb, 0x30, 0x6e, 0x10, 0x7b, 0xa4, 0xe2, 0x8c, 0xef,
+ 0xe4, 0x57, 0xac, 0x0a, 0x8b, 0xd8, 0x82, 0x69, 0x4f, 0xfb, 0x4f, 0x2f, 0x14, 0xf9, 0xb8, 0x7c,
+ 0x7f, 0xaf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, 0xaf, 0x68, 0x82, 0x0c, 0x4e, 0x53, 0xb6, 0x7f, 0xca,
+ 0x82, 0xc7, 0x74, 0x44, 0xcd, 0x1d, 0xa5, 0x97, 0x8e, 0xa0, 0x0c, 0x23, 0x7e, 0x9b, 0x04, 0x4e,
+ 0xe4, 0x07, 0xe2, 0xd6, 0xb8, 0x2c, 0x07, 0xfd, 0x96, 0x28, 0x3f, 0x14, 0xd1, 0xc7, 0x25, 0x75,
+ 0x59, 0x8e, 0x55, 0x4d, 0xfa, 0xfa, 0x64, 0x83, 0x11, 0x0a, 0xc7, 0x23, 0x76, 0x06, 0x30, 0x75,
+ 0x79, 0x88, 0x05, 0xc4, 0xfe, 0xba, 0xc5, 0x17, 0x96, 0xde, 0x75, 0xf4, 0x16, 0x4c, 0xb5, 0x9c,
+ 0xa8, 0xbe, 0xb3, 0x72, 0xbf, 0x1d, 0x70, 0x8d, 0x8b, 0x1c, 0xa7, 0x67, 0x7a, 0x8d, 0x93, 0xf6,
+ 0x91, 0xb1, 0x61, 0xe5, 0x5a, 0x82, 0x18, 0x4e, 0x91, 0x47, 0x9b, 0x30, 0xca, 0xca, 0x98, 0x4f,
+ 0x5d, 0xd8, 0x8d, 0x35, 0xc8, 0x6b, 0x4d, 0x59, 0x1c, 0xac, 0xc5, 0x74, 0xb0, 0x4e, 0xd4, 0xfe,
+ 0x4a, 0x91, 0xef, 0x76, 0xc6, 0xca, 0x3f, 0x0d, 0xc3, 0x6d, 0xbf, 0xb1, 0x5c, 0x29, 0x63, 0x31,
+ 0x0b, 0xea, 0x1a, 0xa9, 0xf2, 0x62, 0x2c, 0xe1, 0xe8, 0x32, 0x8c, 0x88, 0x9f, 0x52, 0x43, 0xc6,
+ 0xce, 0x66, 0x81, 0x17, 0x62, 0x05, 0x45, 0xcf, 0x03, 0xb4, 0x03, 0x7f, 0xcf, 0x6d, 0xb0, 0x58,
+ 0x0f, 0x45, 0xd3, 0x58, 0xa8, 0xaa, 0x20, 0x58, 0xc3, 0x42, 0xaf, 0xc2, 0x78, 0xc7, 0x0b, 0x39,
+ 0x3b, 0xa2, 0x45, 0x76, 0x55, 0x66, 0x2c, 0xb7, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x22, 0x0c, 0x45,
+ 0x0e, 0x33, 0x7e, 0x19, 0xcc, 0x37, 0xbe, 0xdd, 0xa0, 0x18, 0x7a, 0xaa, 0x0e, 0x5a, 0x01, 0x8b,
+ 0x8a, 0xe8, 0x53, 0xd2, 0xbd, 0x95, 0x1f, 0xec, 0xc2, 0xea, 0xbd, 0xbf, 0x4b, 0x40, 0x73, 0x6e,
+ 0x15, 0xd6, 0xf4, 0x06, 0x2d, 0xf4, 0x0a, 0x00, 0xb9, 0x1f, 0x91, 0xc0, 0x73, 0x9a, 0xca, 0xb6,
+ 0x4c, 0xf1, 0x05, 0x65, 0x7f, 0xdd, 0x8f, 0x6e, 0x87, 0x64, 0x45, 0x61, 0x60, 0x0d, 0xdb, 0xfe,
+ 0xcd, 0x12, 0x40, 0xcc, 0xb7, 0xa3, 0xb7, 0x53, 0x07, 0xd7, 0xb3, 0xdd, 0x39, 0xfd, 0xe3, 0x3b,
+ 0xb5, 0xd0, 0xf7, 0x59, 0x30, 0xea, 0x34, 0x9b, 0x7e, 0xdd, 0xe1, 0xb1, 0x77, 0x0b, 0xdd, 0x0f,
+ 0x4e, 0xd1, 0xfe, 0x62, 0x5c, 0x83, 0x77, 0xe1, 0x05, 0xb9, 0x42, 0x35, 0x48, 0xcf, 0x5e, 0xe8,
+ 0x0d, 0xa3, 0x0f, 0xc9, 0xa7, 0x62, 0xd1, 0x18, 0x4a, 0xf5, 0x54, 0x2c, 0xb1, 0x3b, 0x42, 0x7f,
+ 0x25, 0xde, 0x36, 0x5e, 0x89, 0x03, 0xf9, 0xfe, 0x7b, 0x06, 0xfb, 0xda, 0xeb, 0x81, 0x88, 0xaa,
+ 0xba, 0x2f, 0xff, 0x60, 0xbe, 0xb3, 0x9c, 0xf6, 0x4e, 0xea, 0xe1, 0xc7, 0xff, 0x39, 0x98, 0x6c,
+ 0x98, 0x4c, 0x80, 0x58, 0x89, 0x4f, 0xe5, 0xd1, 0x4d, 0xf0, 0x0c, 0xf1, 0xb5, 0x9f, 0x00, 0xe0,
+ 0x24, 0x61, 0x54, 0xe5, 0xa1, 0x1d, 0x2a, 0xde, 0x96, 0x2f, 0x3c, 0x2f, 0xec, 0xdc, 0xb9, 0xdc,
+ 0x0f, 0x23, 0xd2, 0xa2, 0x98, 0xf1, 0xed, 0xbe, 0x2e, 0xea, 0x62, 0x45, 0x05, 0xbd, 0x0e, 0x43,
+ 0xcc, 0x5b, 0x2a, 0x9c, 0x1d, 0xc9, 0x97, 0x38, 0x9b, 0xb1, 0xca, 0xe2, 0x0d, 0xc9, 0xfe, 0x86,
+ 0x58, 0x50, 0x40, 0xd7, 0xa5, 0x2f, 0x62, 0x58, 0xf1, 0x6e, 0x87, 0x84, 0xf9, 0x22, 0x96, 0x96,
+ 0xde, 0x1f, 0xbb, 0x19, 0xf2, 0xf2, 0xcc, 0x84, 0x5e, 0x46, 0x4d, 0xca, 0x45, 0x89, 0xff, 0x32,
+ 0x4f, 0xd8, 0x2c, 0xe4, 0x77, 0xcf, 0xcc, 0x25, 0x16, 0x0f, 0xe7, 0x1d, 0x93, 0x04, 0x4e, 0xd2,
+ 0xa4, 0x1c, 0x29, 0xdf, 0xf5, 0xc2, 0x77, 0xa3, 0xd7, 0xd9, 0xc1, 0x1f, 0xe2, 0xec, 0x36, 0xe2,
+ 0x25, 0x58, 0xd4, 0x3f, 0x51, 0xf6, 0x60, 0xce, 0x83, 0xa9, 0xe4, 0x16, 0x7d, 0xa4, 0xec, 0xc8,
+ 0x1f, 0x0e, 0xc0, 0x84, 0xb9, 0xa4, 0xd0, 0x15, 0x28, 0x09, 0x22, 0x2a, 0xb6, 0xbf, 0xda, 0x25,
+ 0x6b, 0x12, 0x80, 0x63, 0x1c, 0x96, 0xd2, 0x81, 0x55, 0xd7, 0x8c, 0x75, 0xe3, 0x94, 0x0e, 0x0a,
+ 0x82, 0x35, 0x2c, 0xfa, 0xb0, 0xda, 0xf4, 0xfd, 0x48, 0x5d, 0x48, 0x6a, 0xdd, 0x2d, 0xb1, 0x52,
+ 0x2c, 0xa0, 0xf4, 0x22, 0xda, 0x25, 0x81, 0x47, 0x9a, 0x66, 0x14, 0x60, 0x75, 0x11, 0xdd, 0xd0,
+ 0x81, 0xd8, 0xc4, 0xa5, 0xd7, 0xa9, 0x1f, 0xb2, 0x85, 0x2c, 0x9e, 0x6f, 0xb1, 0xf1, 0x73, 0x8d,
+ 0xbb, 0x43, 0x4b, 0x38, 0xfa, 0x24, 0x3c, 0xa6, 0x22, 0x1d, 0x61, 0xae, 0xcd, 0x90, 0x2d, 0x0e,
+ 0x19, 0xd2, 0x96, 0xc7, 0x96, 0xb3, 0xd1, 0x70, 0x5e, 0x7d, 0xf4, 0x1a, 0x4c, 0x08, 0x16, 0x5f,
+ 0x52, 0x1c, 0x36, 0x0d, 0x6c, 0x6e, 0x18, 0x50, 0x9c, 0xc0, 0x96, 0x71, 0x8c, 0x19, 0x97, 0x2d,
+ 0x29, 0x8c, 0xa4, 0xe3, 0x18, 0xeb, 0x70, 0x9c, 0xaa, 0x81, 0x16, 0x61, 0x92, 0xf3, 0x60, 0xae,
+ 0xb7, 0xcd, 0xe7, 0x44, 0xb8, 0x56, 0xa9, 0x2d, 0x75, 0xcb, 0x04, 0xe3, 0x24, 0x3e, 0x7a, 0x19,
+ 0xc6, 0x9c, 0xa0, 0xbe, 0xe3, 0x46, 0xa4, 0x1e, 0x75, 0x02, 0xee, 0x73, 0xa5, 0x59, 0x28, 0x2d,
+ 0x6a, 0x30, 0x6c, 0x60, 0xda, 0x6f, 0xc3, 0x4c, 0x46, 0x9c, 0x04, 0xba, 0x70, 0x9c, 0xb6, 0x2b,
+ 0xbf, 0x29, 0x61, 0xc6, 0xbc, 0x58, 0xad, 0xc8, 0xaf, 0xd1, 0xb0, 0xe8, 0xea, 0x64, 0xf1, 0x14,
+ 0xb4, 0xb4, 0x80, 0x6a, 0x75, 0xae, 0x4a, 0x00, 0x8e, 0x71, 0xec, 0xff, 0x56, 0x80, 0xc9, 0x0c,
+ 0xdd, 0x0a, 0x4b, 0x4d, 0x97, 0x78, 0xa4, 0xc4, 0x99, 0xe8, 0xcc, 0xb0, 0xd8, 0x85, 0x23, 0x84,
+ 0xc5, 0x2e, 0xf6, 0x0a, 0x8b, 0x3d, 0xf0, 0x4e, 0xc2, 0x62, 0x9b, 0x23, 0x36, 0xd8, 0xd7, 0x88,
+ 0x65, 0x84, 0xd2, 0x1e, 0x3a, 0x62, 0x28, 0x6d, 0x63, 0xd0, 0x87, 0xfb, 0x18, 0xf4, 0x1f, 0x2d,
+ 0xc0, 0x54, 0xd2, 0x92, 0xf2, 0x04, 0xe4, 0xb6, 0xaf, 0x1b, 0x72, 0xdb, 0xcb, 0xfd, 0xb8, 0xc2,
+ 0xe6, 0xca, 0x70, 0x71, 0x42, 0x86, 0xfb, 0xc1, 0xbe, 0xa8, 0x75, 0x97, 0xe7, 0xfe, 0xf5, 0x02,
+ 0x9c, 0xce, 0xf4, 0xc5, 0x3d, 0x81, 0xb1, 0xb9, 0x65, 0x8c, 0xcd, 0x73, 0x7d, 0xbb, 0x09, 0xe7,
+ 0x0e, 0xd0, 0xdd, 0xc4, 0x00, 0x5d, 0xe9, 0x9f, 0x64, 0xf7, 0x51, 0xfa, 0x5a, 0x11, 0xce, 0x67,
+ 0xd6, 0x8b, 0xc5, 0x9e, 0xab, 0x86, 0xd8, 0xf3, 0xf9, 0x84, 0xd8, 0xd3, 0xee, 0x5e, 0xfb, 0x78,
+ 0xe4, 0xa0, 0xc2, 0x5d, 0x96, 0x39, 0xfd, 0x3f, 0xa4, 0x0c, 0xd4, 0x70, 0x97, 0x55, 0x84, 0xb0,
+ 0x49, 0xf7, 0x5b, 0x49, 0xf6, 0xf9, 0xaf, 0x2d, 0x38, 0x9b, 0x39, 0x37, 0x27, 0x20, 0xeb, 0x5a,
+ 0x37, 0x65, 0x5d, 0x4f, 0xf7, 0xbd, 0x5a, 0x73, 0x84, 0x5f, 0x5f, 0x19, 0xcc, 0xf9, 0x16, 0xf6,
+ 0x92, 0xbf, 0x05, 0xa3, 0x4e, 0xbd, 0x4e, 0xc2, 0x70, 0xcd, 0x6f, 0xa8, 0xc8, 0xbf, 0xcf, 0xb1,
+ 0x77, 0x56, 0x5c, 0x7c, 0x78, 0x30, 0x3f, 0x97, 0x24, 0x11, 0x83, 0xb1, 0x4e, 0x01, 0x7d, 0x1a,
+ 0x46, 0x42, 0x71, 0x6f, 0x8a, 0xb9, 0x7f, 0xa1, 0xcf, 0xc1, 0x71, 0x36, 0x49, 0xd3, 0x0c, 0x4d,
+ 0xa4, 0x24, 0x15, 0x8a, 0xa4, 0x19, 0xc6, 0xa4, 0x70, 0xac, 0x61, 0x4c, 0x9e, 0x07, 0xd8, 0x53,
+ 0x8f, 0x81, 0xa4, 0xfc, 0x41, 0x7b, 0x26, 0x68, 0x58, 0xe8, 0xe3, 0x30, 0x15, 0xf2, 0xd8, 0x7d,
+ 0xcb, 0x4d, 0x27, 0x64, 0xce, 0x32, 0x62, 0x15, 0xb2, 0xf0, 0x47, 0xb5, 0x04, 0x0c, 0xa7, 0xb0,
+ 0xd1, 0xaa, 0x6c, 0x95, 0x05, 0x1a, 0xe4, 0x0b, 0xf3, 0x52, 0xdc, 0xa2, 0x48, 0x8c, 0x7b, 0x2a,
+ 0x39, 0xfc, 0x6c, 0xe0, 0xb5, 0x9a, 0xe8, 0xd3, 0x00, 0x74, 0xf9, 0x08, 0x39, 0xc4, 0x70, 0xfe,
+ 0xe1, 0x49, 0x4f, 0x95, 0x46, 0xa6, 0x6d, 0x2f, 0xf3, 0x70, 0x2d, 0x2b, 0x22, 0x58, 0x23, 0x88,
+ 0xb6, 0x60, 0x3c, 0xfe, 0x17, 0xe7, 0x8d, 0x3c, 0x62, 0x0b, 0x4c, 0xee, 0x5d, 0xd6, 0xe9, 0x60,
+ 0x93, 0xac, 0xfd, 0xa3, 0x03, 0xf0, 0x78, 0x97, 0xb3, 0x18, 0x2d, 0x9a, 0xfa, 0xde, 0x67, 0x92,
+ 0x8f, 0xf8, 0xb9, 0xcc, 0xca, 0xc6, 0xab, 0x3e, 0xb1, 0xe4, 0x0b, 0xef, 0x78, 0xc9, 0xff, 0x90,
+ 0xa5, 0x89, 0x57, 0xb8, 0x65, 0xe9, 0xc7, 0x8e, 0x78, 0xc7, 0x1c, 0xa3, 0xbc, 0x65, 0x2b, 0x43,
+ 0x68, 0xf1, 0x7c, 0xdf, 0xdd, 0xe9, 0x5b, 0x8a, 0x71, 0xb2, 0xd2, 0xe8, 0xdf, 0xb6, 0xe0, 0x5c,
+ 0xd7, 0xe0, 0x20, 0xdf, 0x84, 0x8c, 0x89, 0xfd, 0x05, 0x0b, 0x9e, 0xcc, 0xac, 0x61, 0x98, 0x33,
+ 0x5d, 0x81, 0x52, 0x9d, 0x16, 0x6a, 0xde, 0xa0, 0xb1, 0x9b, 0xbc, 0x04, 0xe0, 0x18, 0xc7, 0xb0,
+ 0x5a, 0x2a, 0xf4, 0xb4, 0x5a, 0xfa, 0x35, 0x0b, 0x52, 0x87, 0xcb, 0x09, 0xdc, 0x72, 0x15, 0xf3,
+ 0x96, 0x7b, 0x7f, 0x3f, 0xa3, 0x99, 0x73, 0xc1, 0xfd, 0xc9, 0x24, 0x9c, 0xc9, 0xf1, 0x86, 0xda,
+ 0x83, 0xe9, 0xed, 0x3a, 0x31, 0xfd, 0x6c, 0xbb, 0xc5, 0x9f, 0xe9, 0xea, 0x94, 0xcb, 0x52, 0x84,
+ 0x4e, 0xa7, 0x50, 0x70, 0xba, 0x09, 0xf4, 0x05, 0x0b, 0x4e, 0x39, 0xf7, 0xc2, 0x15, 0xca, 0xad,
+ 0xb8, 0xf5, 0xa5, 0xa6, 0x5f, 0xdf, 0xa5, 0x57, 0x81, 0xdc, 0x08, 0x2f, 0x66, 0x4a, 0x90, 0xee,
+ 0xd6, 0x52, 0xf8, 0x46, 0xf3, 0x2c, 0x67, 0x6a, 0x16, 0x16, 0xce, 0x6c, 0x0b, 0x61, 0x11, 0x48,
+ 0x9f, 0xbe, 0x85, 0xba, 0x78, 0x82, 0x67, 0xb9, 0xad, 0xf1, 0xeb, 0x57, 0x42, 0xb0, 0xa2, 0x83,
+ 0x3e, 0x0b, 0xa5, 0x6d, 0xe9, 0x4b, 0x9a, 0x71, 0xbd, 0xc7, 0x03, 0xd9, 0xdd, 0xc3, 0x96, 0xab,
+ 0x81, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0xd7, 0xa0, 0xe8, 0x6d, 0x85, 0xdd, 0xd2, 0x8e, 0x26, 0xec,
+ 0xfd, 0x78, 0xbc, 0x85, 0xf5, 0xd5, 0x1a, 0xa6, 0x15, 0xd1, 0x75, 0x28, 0x06, 0x9b, 0x0d, 0x21,
+ 0xfe, 0xcc, 0xdc, 0xa4, 0x78, 0xa9, 0x9c, 0xd3, 0x2b, 0x46, 0x09, 0x2f, 0x95, 0x31, 0x25, 0x81,
+ 0xaa, 0x30, 0xc8, 0x5c, 0x88, 0xc4, 0x65, 0x9a, 0xf9, 0x6c, 0xe8, 0xe2, 0x8a, 0xc7, 0x83, 0x32,
+ 0x30, 0x04, 0xcc, 0x09, 0xa1, 0x0d, 0x18, 0xaa, 0xb3, 0x14, 0x95, 0xe2, 0xf6, 0xfc, 0x50, 0xa6,
+ 0xa0, 0xb3, 0x4b, 0xee, 0x4e, 0x21, 0xf7, 0x63, 0x18, 0x58, 0xd0, 0x62, 0x54, 0x49, 0x7b, 0x67,
+ 0x2b, 0x14, 0x29, 0x95, 0xb3, 0xa9, 0x76, 0x49, 0x49, 0x2b, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1,
+ 0x57, 0xa0, 0xb0, 0x55, 0x17, 0xee, 0x41, 0x99, 0x12, 0x4f, 0x33, 0x64, 0xc6, 0xd2, 0xd0, 0x83,
+ 0x83, 0xf9, 0xc2, 0xea, 0x32, 0x2e, 0x6c, 0xd5, 0xd1, 0x3a, 0x0c, 0x6f, 0x71, 0x27, 0x7b, 0x21,
+ 0xd4, 0x7c, 0x2a, 0xdb, 0xff, 0x3f, 0xe5, 0x87, 0xcf, 0x3d, 0x63, 0x04, 0x00, 0x4b, 0x22, 0x2c,
+ 0x2e, 0xbd, 0x0a, 0x16, 0x20, 0x62, 0x95, 0x2d, 0x1c, 0x2d, 0xc0, 0x03, 0x67, 0x6e, 0xe2, 0x90,
+ 0x03, 0x58, 0xa3, 0x48, 0x57, 0xb5, 0x23, 0xf3, 0xda, 0x8b, 0xa0, 0x36, 0x99, 0xab, 0xba, 0x47,
+ 0xca, 0x7f, 0xbe, 0xaa, 0x15, 0x12, 0x8e, 0x89, 0xa2, 0x5d, 0x18, 0xdf, 0x0b, 0xdb, 0x3b, 0x44,
+ 0x6e, 0x69, 0x16, 0xe3, 0x26, 0xe7, 0x5e, 0xbe, 0x23, 0x10, 0xdd, 0x20, 0xea, 0x38, 0xcd, 0xd4,
+ 0x29, 0xc4, 0x78, 0xa8, 0x3b, 0x3a, 0x31, 0x6c, 0xd2, 0xa6, 0xc3, 0xff, 0x56, 0xc7, 0xdf, 0xdc,
+ 0x8f, 0x88, 0x08, 0x31, 0x96, 0x39, 0xfc, 0x6f, 0x70, 0x94, 0xf4, 0xf0, 0x0b, 0x00, 0x96, 0x44,
+ 0xd0, 0x1d, 0x31, 0x3c, 0xec, 0xf4, 0x9c, 0xca, 0x8f, 0x03, 0xba, 0x28, 0x91, 0x72, 0x06, 0x85,
+ 0x9d, 0x96, 0x31, 0x29, 0x76, 0x4a, 0xb6, 0x77, 0xfc, 0xc8, 0xf7, 0x12, 0x27, 0xf4, 0x74, 0xfe,
+ 0x29, 0x59, 0xcd, 0xc0, 0x4f, 0x9f, 0x92, 0x59, 0x58, 0x38, 0xb3, 0x2d, 0xd4, 0x80, 0x89, 0xb6,
+ 0x1f, 0x44, 0xf7, 0xfc, 0x40, 0xae, 0x2f, 0xd4, 0x45, 0x28, 0x63, 0x60, 0x8a, 0x16, 0x59, 0xf4,
+ 0x3e, 0x13, 0x82, 0x13, 0x34, 0xd1, 0x27, 0x60, 0x38, 0xac, 0x3b, 0x4d, 0x52, 0xb9, 0x35, 0x3b,
+ 0x93, 0x7f, 0xfd, 0xd4, 0x38, 0x4a, 0xce, 0xea, 0xe2, 0x41, 0xec, 0x39, 0x0a, 0x96, 0xe4, 0xd0,
+ 0x2a, 0x0c, 0xb2, 0xbc, 0x63, 0x2c, 0x1e, 0x5e, 0x4e, 0x38, 0xd3, 0x94, 0xf5, 0x35, 0x3f, 0x9b,
+ 0x58, 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x6f, 0x13, 0x3f, 0x9c, 0x3d, 0x9d, 0xbf, 0x07, 0xc4,
+ 0x93, 0xe6, 0x56, 0xad, 0xdb, 0x1e, 0x50, 0x48, 0x38, 0x26, 0x4a, 0x4f, 0x66, 0x7a, 0x9a, 0x9e,
+ 0xe9, 0x62, 0x36, 0x94, 0x7b, 0x96, 0xb2, 0x93, 0x99, 0x9e, 0xa4, 0x94, 0x84, 0xfd, 0xfb, 0xc3,
+ 0x69, 0x9e, 0x85, 0xbd, 0x66, 0xbf, 0xc7, 0x4a, 0x29, 0x3a, 0x3f, 0xdc, 0xaf, 0x70, 0xed, 0x18,
+ 0x59, 0xf0, 0x2f, 0x58, 0x70, 0xa6, 0x9d, 0xf9, 0x21, 0x82, 0x01, 0xe8, 0x4f, 0x46, 0xc7, 0x3f,
+ 0x5d, 0xc5, 0x4e, 0xcc, 0x86, 0xe3, 0x9c, 0x96, 0x92, 0xcf, 0x9c, 0xe2, 0x3b, 0x7e, 0xe6, 0xac,
+ 0xc1, 0x08, 0x63, 0x32, 0x7b, 0xa4, 0x6c, 0x4e, 0xbe, 0xf9, 0x18, 0x2b, 0xb1, 0x2c, 0x2a, 0x62,
+ 0x45, 0x02, 0xfd, 0xb0, 0x05, 0xe7, 0x92, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0xc0, 0x45, 0xfe, 0x90,
+ 0x5e, 0x15, 0xdf, 0x9f, 0xe2, 0xff, 0x0d, 0xe4, 0xc3, 0x5e, 0x08, 0xb8, 0x7b, 0x63, 0xa8, 0x9c,
+ 0xf1, 0x92, 0x1f, 0x32, 0xb5, 0x17, 0x7d, 0xbc, 0xe6, 0x5f, 0x84, 0xb1, 0x96, 0xdf, 0xf1, 0x22,
+ 0x61, 0x65, 0x24, 0x2c, 0x1e, 0x98, 0xa6, 0x7f, 0x4d, 0x2b, 0xc7, 0x06, 0x56, 0x42, 0x06, 0x30,
+ 0xf2, 0xd0, 0x32, 0x80, 0x37, 0x61, 0xcc, 0xd3, 0xcc, 0x62, 0x05, 0x3f, 0x70, 0x29, 0x3f, 0x58,
+ 0xaa, 0x6e, 0x44, 0xcb, 0x7b, 0xa9, 0x97, 0x60, 0x83, 0xda, 0xc9, 0x3e, 0xf8, 0xbe, 0x6c, 0x65,
+ 0x30, 0xf5, 0x5c, 0x04, 0xf0, 0x51, 0x53, 0x04, 0x70, 0x29, 0x29, 0x02, 0x48, 0x49, 0xae, 0x8d,
+ 0xd7, 0x7f, 0xff, 0xb9, 0x60, 0xfa, 0x0d, 0xb8, 0x68, 0x37, 0xe1, 0x42, 0xaf, 0x6b, 0x89, 0x99,
+ 0x9b, 0x35, 0x94, 0x9e, 0x32, 0x36, 0x37, 0x6b, 0x54, 0xca, 0x98, 0x41, 0xfa, 0x0d, 0xe5, 0x63,
+ 0xff, 0x17, 0x0b, 0x8a, 0x55, 0xbf, 0x71, 0x02, 0x0f, 0xde, 0x8f, 0x19, 0x0f, 0xde, 0xc7, 0xb3,
+ 0x2f, 0xc4, 0x46, 0xae, 0xdc, 0x7d, 0x25, 0x21, 0x77, 0x3f, 0x97, 0x47, 0xa0, 0xbb, 0x94, 0xfd,
+ 0xa7, 0x8b, 0x30, 0x5a, 0xf5, 0x1b, 0xca, 0xd6, 0xfb, 0x9f, 0x3d, 0x8c, 0xad, 0x77, 0x6e, 0x46,
+ 0x03, 0x8d, 0x32, 0xb3, 0x52, 0x93, 0x6e, 0xae, 0xdf, 0x64, 0x26, 0xdf, 0x77, 0x89, 0xbb, 0xbd,
+ 0x13, 0x91, 0x46, 0xf2, 0x73, 0x4e, 0xce, 0xe4, 0xfb, 0xf7, 0x0b, 0x30, 0x99, 0x68, 0x1d, 0x35,
+ 0x61, 0xbc, 0xa9, 0x4b, 0x75, 0xc5, 0x3a, 0x7d, 0x28, 0x81, 0xb0, 0x30, 0x99, 0xd5, 0x8a, 0xb0,
+ 0x49, 0x1c, 0x2d, 0x00, 0x28, 0x35, 0xa7, 0x14, 0xeb, 0x31, 0xae, 0x5f, 0xe9, 0x41, 0x43, 0xac,
+ 0x61, 0xa0, 0x97, 0x60, 0x34, 0xf2, 0xdb, 0x7e, 0xd3, 0xdf, 0xde, 0xbf, 0x41, 0x64, 0xf0, 0x28,
+ 0x65, 0x08, 0xb7, 0x11, 0x83, 0xb0, 0x8e, 0x87, 0xee, 0xc3, 0xb4, 0x22, 0x52, 0x3b, 0x06, 0x49,
+ 0x37, 0x93, 0x2a, 0xac, 0x27, 0x29, 0xe2, 0x74, 0x23, 0xf6, 0xcf, 0x16, 0xf9, 0x10, 0x7b, 0x91,
+ 0xfb, 0xde, 0x6e, 0x78, 0x77, 0xef, 0x86, 0xaf, 0x59, 0x30, 0x45, 0x5b, 0x67, 0x56, 0x3e, 0xf2,
+ 0x9a, 0x57, 0xe1, 0x99, 0xad, 0x2e, 0xe1, 0x99, 0x2f, 0xd1, 0x53, 0xb3, 0xe1, 0x77, 0x22, 0x21,
+ 0xbb, 0xd3, 0x8e, 0x45, 0x5a, 0x8a, 0x05, 0x54, 0xe0, 0x91, 0x20, 0x10, 0x9e, 0x89, 0x3a, 0x1e,
+ 0x09, 0x02, 0x2c, 0xa0, 0x32, 0x7a, 0xf3, 0x40, 0x76, 0xf4, 0x66, 0x1e, 0x84, 0x53, 0xd8, 0x83,
+ 0x08, 0x86, 0x4b, 0x0b, 0xc2, 0x29, 0x0d, 0x45, 0x62, 0x1c, 0xfb, 0xe7, 0x8b, 0x30, 0x56, 0xf5,
+ 0x1b, 0xb1, 0x8a, 0xf3, 0x45, 0x43, 0xc5, 0x79, 0x21, 0xa1, 0xe2, 0x9c, 0xd2, 0x71, 0xdf, 0x53,
+ 0x68, 0x7e, 0xa3, 0x14, 0x9a, 0xff, 0xd8, 0x62, 0xb3, 0x56, 0x5e, 0xaf, 0x71, 0xa3, 0x31, 0x74,
+ 0x15, 0x46, 0xd9, 0x01, 0xc3, 0x5c, 0x61, 0xa5, 0xde, 0x8f, 0x65, 0x25, 0x5a, 0x8f, 0x8b, 0xb1,
+ 0x8e, 0x83, 0x2e, 0xc3, 0x48, 0x48, 0x9c, 0xa0, 0xbe, 0xa3, 0x4e, 0x57, 0xa1, 0xa4, 0xe3, 0x65,
+ 0x58, 0x41, 0xd1, 0x1b, 0x71, 0xfc, 0xc7, 0x62, 0xbe, 0x6b, 0x9d, 0xde, 0x1f, 0xbe, 0x45, 0xf2,
+ 0x83, 0x3e, 0xda, 0x77, 0x01, 0xa5, 0xf1, 0xfb, 0x08, 0x7c, 0x36, 0x6f, 0x06, 0x3e, 0x2b, 0xa5,
+ 0x82, 0x9e, 0xfd, 0xb9, 0x05, 0x13, 0x55, 0xbf, 0x41, 0xb7, 0xee, 0xb7, 0xd2, 0x3e, 0xd5, 0x83,
+ 0xdf, 0x0e, 0x75, 0x09, 0x7e, 0x7b, 0x11, 0x06, 0xab, 0x7e, 0xa3, 0x52, 0xed, 0xe6, 0xd7, 0x6e,
+ 0xff, 0x0d, 0x0b, 0x86, 0xab, 0x7e, 0xe3, 0x04, 0xd4, 0x02, 0x1f, 0x35, 0xd5, 0x02, 0x8f, 0xe5,
+ 0xac, 0x9b, 0x1c, 0x4d, 0xc0, 0x5f, 0x1b, 0x80, 0x71, 0xda, 0x4f, 0x7f, 0x5b, 0x4e, 0xa5, 0x31,
+ 0x6c, 0x56, 0x1f, 0xc3, 0x46, 0xb9, 0x70, 0xbf, 0xd9, 0xf4, 0xef, 0x25, 0xa7, 0x75, 0x95, 0x95,
+ 0x62, 0x01, 0x45, 0xcf, 0xc2, 0x48, 0x3b, 0x20, 0x7b, 0xae, 0x2f, 0xd8, 0x5b, 0x4d, 0xc9, 0x52,
+ 0x15, 0xe5, 0x58, 0x61, 0xd0, 0x67, 0x61, 0xe8, 0x7a, 0xf4, 0x2a, 0xaf, 0xfb, 0x5e, 0x83, 0x4b,
+ 0xce, 0x8b, 0x22, 0x43, 0x83, 0x56, 0x8e, 0x0d, 0x2c, 0x74, 0x17, 0x4a, 0xec, 0x3f, 0x3b, 0x76,
+ 0x8e, 0x9e, 0xeb, 0x53, 0xe4, 0x7e, 0x13, 0x04, 0x70, 0x4c, 0x0b, 0x3d, 0x0f, 0x10, 0xc9, 0x28,
+ 0xe7, 0xa1, 0x08, 0x72, 0xa5, 0x9e, 0x02, 0x2a, 0xfe, 0x79, 0x88, 0x35, 0x2c, 0xf4, 0x0c, 0x94,
+ 0x22, 0xc7, 0x6d, 0xde, 0x74, 0x3d, 0x12, 0x32, 0x89, 0x78, 0x51, 0xa6, 0x60, 0x13, 0x85, 0x38,
+ 0x86, 0x53, 0x56, 0x8c, 0x45, 0x80, 0xe0, 0x99, 0x82, 0x47, 0x18, 0x36, 0x63, 0xc5, 0x6e, 0xaa,
+ 0x52, 0xac, 0x61, 0xa0, 0x1d, 0x78, 0xc2, 0xf5, 0x58, 0x36, 0x03, 0x52, 0xdb, 0x75, 0xdb, 0x1b,
+ 0x37, 0x6b, 0x77, 0x48, 0xe0, 0x6e, 0xed, 0x2f, 0x39, 0xf5, 0x5d, 0xe2, 0xc9, 0x2c, 0x8e, 0xef,
+ 0x17, 0x5d, 0x7c, 0xa2, 0xd2, 0x05, 0x17, 0x77, 0xa5, 0x64, 0xbf, 0x0c, 0xa7, 0xab, 0x7e, 0xa3,
+ 0xea, 0x07, 0xd1, 0xaa, 0x1f, 0xdc, 0x73, 0x82, 0x86, 0x5c, 0x29, 0xf3, 0x32, 0x1a, 0x03, 0x3d,
+ 0x0a, 0x07, 0xf9, 0x41, 0x61, 0x44, 0x5a, 0x78, 0x81, 0x31, 0x5f, 0x47, 0xf4, 0x21, 0xaa, 0x33,
+ 0x36, 0x40, 0xa5, 0xf6, 0xb8, 0xe6, 0x44, 0x04, 0xdd, 0x62, 0x29, 0x8b, 0xe3, 0x1b, 0x51, 0x54,
+ 0x7f, 0x5a, 0x4b, 0x59, 0x1c, 0x03, 0x33, 0xaf, 0x50, 0xb3, 0xbe, 0xfd, 0x5f, 0x07, 0xd9, 0xe1,
+ 0x98, 0x48, 0x0f, 0x81, 0x3e, 0x03, 0x13, 0x21, 0xb9, 0xe9, 0x7a, 0x9d, 0xfb, 0x52, 0x1a, 0xd1,
+ 0xc5, 0x0b, 0xac, 0xb6, 0xa2, 0x63, 0x72, 0x99, 0xa6, 0x59, 0x86, 0x13, 0xd4, 0x50, 0x0b, 0x26,
+ 0xee, 0xb9, 0x5e, 0xc3, 0xbf, 0x17, 0x4a, 0xfa, 0x23, 0xf9, 0xa2, 0xcd, 0xbb, 0x1c, 0x33, 0xd1,
+ 0x47, 0xa3, 0xb9, 0xbb, 0x06, 0x31, 0x9c, 0x20, 0x4e, 0x17, 0x60, 0xd0, 0xf1, 0x16, 0xc3, 0xdb,
+ 0x21, 0x09, 0x44, 0xf2, 0x69, 0xb6, 0x00, 0xb1, 0x2c, 0xc4, 0x31, 0x9c, 0x2e, 0x40, 0xf6, 0xe7,
+ 0x5a, 0xe0, 0x77, 0x78, 0x2e, 0x02, 0xb1, 0x00, 0xb1, 0x2a, 0xc5, 0x1a, 0x06, 0xdd, 0xa0, 0xec,
+ 0xdf, 0xba, 0xef, 0x61, 0xdf, 0x8f, 0xe4, 0x96, 0x66, 0xe9, 0x4e, 0xb5, 0x72, 0x6c, 0x60, 0xa1,
+ 0x55, 0x40, 0x61, 0xa7, 0xdd, 0x6e, 0x32, 0xf3, 0x12, 0xa7, 0xc9, 0x48, 0x71, 0x95, 0x7b, 0x91,
+ 0x87, 0x68, 0xad, 0xa5, 0xa0, 0x38, 0xa3, 0x06, 0x3d, 0xab, 0xb7, 0x44, 0x57, 0x07, 0x59, 0x57,
+ 0xb9, 0x1a, 0xa4, 0xc6, 0xfb, 0x29, 0x61, 0x68, 0x05, 0x86, 0xc3, 0xfd, 0xb0, 0x1e, 0x89, 0x58,
+ 0x73, 0x39, 0x19, 0x80, 0x6a, 0x0c, 0x45, 0x4b, 0x40, 0xc7, 0xab, 0x60, 0x59, 0x17, 0xd5, 0x61,
+ 0x46, 0x50, 0x5c, 0xde, 0x71, 0x3c, 0x95, 0x4f, 0x85, 0x5b, 0xd9, 0x5e, 0x7d, 0x70, 0x30, 0x3f,
+ 0x23, 0x5a, 0xd6, 0xc1, 0x87, 0x07, 0xf3, 0x67, 0xaa, 0x7e, 0x23, 0x03, 0x82, 0xb3, 0xa8, 0xf1,
+ 0xc5, 0x57, 0xaf, 0xfb, 0xad, 0x76, 0x35, 0xf0, 0xb7, 0xdc, 0x26, 0xe9, 0xa6, 0x4a, 0xaa, 0x19,
+ 0x98, 0x62, 0xf1, 0x19, 0x65, 0x38, 0x41, 0xcd, 0xfe, 0x4e, 0xc6, 0xcf, 0xb0, 0x7c, 0xcb, 0x51,
+ 0x27, 0x20, 0xa8, 0x05, 0xe3, 0x6d, 0xb6, 0x4d, 0x44, 0x86, 0x00, 0xb1, 0xd6, 0x5f, 0xec, 0x53,
+ 0x24, 0x72, 0x8f, 0x5e, 0x03, 0xa6, 0x99, 0x4a, 0x55, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x4f, 0x3c,
+ 0xc6, 0x6e, 0xc4, 0x1a, 0x97, 0x73, 0x0c, 0x0b, 0xa3, 0x7e, 0xf1, 0xb4, 0x9a, 0xcb, 0x17, 0xb8,
+ 0xc5, 0xd3, 0x22, 0x1c, 0x03, 0xb0, 0xac, 0x8b, 0x3e, 0x0d, 0x13, 0xf4, 0xa5, 0xa2, 0x6e, 0xa5,
+ 0x70, 0xf6, 0x54, 0x7e, 0xf0, 0x05, 0x85, 0xa5, 0x67, 0x0f, 0xd1, 0x2b, 0xe3, 0x04, 0x31, 0xf4,
+ 0x06, 0x33, 0x0b, 0x91, 0xa4, 0x0b, 0xfd, 0x90, 0xd6, 0x2d, 0x40, 0x24, 0x59, 0x8d, 0x08, 0xea,
+ 0xc0, 0x4c, 0x3a, 0xd7, 0x58, 0x38, 0x6b, 0xe7, 0xb3, 0x7c, 0xe9, 0x74, 0x61, 0x71, 0x9a, 0x87,
+ 0x34, 0x2c, 0xc4, 0x59, 0xf4, 0xd1, 0x4d, 0x18, 0x17, 0x49, 0x87, 0xc5, 0xca, 0x2d, 0x1a, 0x72,
+ 0xc0, 0x71, 0xac, 0x03, 0x0f, 0x93, 0x05, 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0x9c, 0x96, 0x04, 0xe8,
+ 0x5a, 0xe0, 0x30, 0x65, 0xbe, 0xcb, 0x8e, 0x53, 0xed, 0xae, 0x7e, 0xf2, 0xc1, 0xc1, 0xfc, 0xb9,
+ 0x8d, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x0b, 0x4e, 0x73, 0xd7, 0xe1, 0x32, 0x71, 0x1a, 0x4d,
+ 0xd7, 0x53, 0xcc, 0x00, 0xdf, 0xf2, 0x67, 0x1f, 0x1c, 0xcc, 0x9f, 0x5e, 0xcc, 0x42, 0xc0, 0xd9,
+ 0xf5, 0xd0, 0x47, 0xa1, 0xd4, 0xf0, 0x42, 0x31, 0x06, 0x43, 0x46, 0x9e, 0xa5, 0x52, 0x79, 0xbd,
+ 0xa6, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x9b, 0xcb, 0x8a, 0x95, 0x04, 0x63, 0x38, 0x15,
+ 0x3a, 0x29, 0x29, 0xe4, 0x33, 0x9c, 0x07, 0xb9, 0x92, 0x44, 0xd9, 0xd4, 0x1b, 0x7e, 0x85, 0x06,
+ 0x61, 0xf4, 0x3a, 0x20, 0xfa, 0x82, 0x70, 0xeb, 0x64, 0xb1, 0xce, 0xd2, 0x4f, 0x30, 0xd1, 0xfa,
+ 0x88, 0xe9, 0xce, 0x56, 0x4b, 0x61, 0xe0, 0x8c, 0x5a, 0xe8, 0x3a, 0x3d, 0x55, 0xf4, 0x52, 0x71,
+ 0x6a, 0xa9, 0xac, 0x78, 0x65, 0xd2, 0x0e, 0x48, 0xdd, 0x89, 0x48, 0xc3, 0xa4, 0x88, 0x13, 0xf5,
+ 0x50, 0x03, 0x9e, 0x70, 0x3a, 0x91, 0xcf, 0xc4, 0xf0, 0x26, 0xea, 0x86, 0xbf, 0x4b, 0x3c, 0xa6,
+ 0x01, 0x1b, 0x59, 0xba, 0x40, 0xb9, 0x8d, 0xc5, 0x2e, 0x78, 0xb8, 0x2b, 0x15, 0xca, 0x25, 0xaa,
+ 0x34, 0xb8, 0x60, 0x06, 0x84, 0xca, 0x48, 0x85, 0xfb, 0x12, 0x8c, 0xee, 0xf8, 0x61, 0xb4, 0x4e,
+ 0xa2, 0x7b, 0x7e, 0xb0, 0x2b, 0xe2, 0x7a, 0xc6, 0xb1, 0xa0, 0x63, 0x10, 0xd6, 0xf1, 0xe8, 0x33,
+ 0x90, 0xd9, 0x67, 0x54, 0xca, 0x4c, 0x35, 0x3e, 0x12, 0x9f, 0x31, 0xd7, 0x79, 0x31, 0x96, 0x70,
+ 0x89, 0x5a, 0xa9, 0x2e, 0x33, 0x35, 0x77, 0x02, 0xb5, 0x52, 0x5d, 0xc6, 0x12, 0x4e, 0x97, 0x6b,
+ 0xb8, 0xe3, 0x04, 0xa4, 0x1a, 0xf8, 0x75, 0x12, 0x6a, 0x11, 0xc8, 0x1f, 0xe7, 0x51, 0x4b, 0xe9,
+ 0x72, 0xad, 0x65, 0x21, 0xe0, 0xec, 0x7a, 0x88, 0xa4, 0x13, 0x60, 0x4d, 0xe4, 0xeb, 0x27, 0xd2,
+ 0xfc, 0x4c, 0x9f, 0x39, 0xb0, 0x3c, 0x98, 0x52, 0xa9, 0xb7, 0x78, 0x9c, 0xd2, 0x70, 0x76, 0x92,
+ 0xad, 0xed, 0xfe, 0x83, 0x9c, 0x2a, 0x8d, 0x4f, 0x25, 0x41, 0x09, 0xa7, 0x68, 0x1b, 0x31, 0xbf,
+ 0xa6, 0x7a, 0xc6, 0xfc, 0xba, 0x02, 0xa5, 0xb0, 0xb3, 0xd9, 0xf0, 0x5b, 0x8e, 0xeb, 0x31, 0x35,
+ 0xb7, 0xf6, 0x1e, 0xa9, 0x49, 0x00, 0x8e, 0x71, 0xd0, 0x2a, 0x8c, 0x38, 0x52, 0x9d, 0x83, 0xf2,
+ 0xa3, 0xbc, 0x28, 0x25, 0x0e, 0x0f, 0x7c, 0x20, 0x15, 0x38, 0xaa, 0x2e, 0x7a, 0x15, 0xc6, 0x85,
+ 0xeb, 0xab, 0xc8, 0xfa, 0x38, 0x63, 0xfa, 0x27, 0xd5, 0x74, 0x20, 0x36, 0x71, 0xd1, 0x6d, 0x18,
+ 0x8d, 0xfc, 0x26, 0x73, 0xb2, 0xa1, 0x6c, 0xde, 0x99, 0xfc, 0x78, 0x65, 0x1b, 0x0a, 0x4d, 0x97,
+ 0xa4, 0xaa, 0xaa, 0x58, 0xa7, 0x83, 0x36, 0xf8, 0x7a, 0x67, 0x91, 0xb8, 0x49, 0x38, 0xfb, 0x58,
+ 0xfe, 0x9d, 0xa4, 0x02, 0x76, 0x9b, 0xdb, 0x41, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x0d, 0xa6, 0xdb,
+ 0x81, 0xeb, 0xb3, 0x35, 0xa1, 0x34, 0x79, 0xb3, 0x66, 0x1a, 0xa0, 0x6a, 0x12, 0x01, 0xa7, 0xeb,
+ 0x30, 0xcf, 0x65, 0x51, 0x38, 0x7b, 0x96, 0x27, 0x86, 0xe6, 0xcf, 0x3b, 0x5e, 0x86, 0x15, 0x14,
+ 0xad, 0xb1, 0x93, 0x98, 0x4b, 0x26, 0x66, 0xe7, 0xf2, 0x03, 0xcb, 0xe8, 0x12, 0x0c, 0xce, 0xbc,
+ 0xaa, 0xbf, 0x38, 0xa6, 0x80, 0x1a, 0x5a, 0x06, 0x41, 0xfa, 0x62, 0x08, 0x67, 0x9f, 0xe8, 0x62,
+ 0x24, 0x97, 0x78, 0x5e, 0xc4, 0x0c, 0x81, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x71, 0x98, 0x12,
+ 0xe1, 0xf0, 0xe2, 0x61, 0x3a, 0x17, 0x9b, 0x2e, 0xe3, 0x04, 0x0c, 0xa7, 0xb0, 0x79, 0x86, 0x02,
+ 0x67, 0xb3, 0x49, 0xc4, 0xd1, 0x77, 0xd3, 0xf5, 0x76, 0xc3, 0xd9, 0xf3, 0xec, 0x7c, 0x10, 0x19,
+ 0x0a, 0x92, 0x50, 0x9c, 0x51, 0x03, 0x6d, 0xc0, 0x54, 0x3b, 0x20, 0xa4, 0xc5, 0x18, 0x7d, 0x71,
+ 0x9f, 0xcd, 0x73, 0xc7, 0x7d, 0xda, 0x93, 0x6a, 0x02, 0x76, 0x98, 0x51, 0x86, 0x53, 0x14, 0xd0,
+ 0x3d, 0x18, 0xf1, 0xf7, 0x48, 0xb0, 0x43, 0x9c, 0xc6, 0xec, 0x85, 0x2e, 0xa6, 0xf4, 0xe2, 0x72,
+ 0xbb, 0x25, 0x70, 0x13, 0xda, 0x7f, 0x59, 0xdc, 0x5b, 0xfb, 0x2f, 0x1b, 0x43, 0x3f, 0x62, 0xc1,
+ 0x59, 0xa9, 0x30, 0xa8, 0xb5, 0xe9, 0xa8, 0x2f, 0xfb, 0x5e, 0x18, 0x05, 0xdc, 0xd5, 0xfc, 0xc9,
+ 0x7c, 0xf7, 0xeb, 0x8d, 0x9c, 0x4a, 0x4a, 0x38, 0x7a, 0x36, 0x0f, 0x23, 0xc4, 0xf9, 0x2d, 0xa2,
+ 0x65, 0x98, 0x0e, 0x49, 0x24, 0x0f, 0xa3, 0xc5, 0x70, 0xf5, 0x8d, 0xf2, 0xfa, 0xec, 0x45, 0xee,
+ 0x27, 0x4f, 0x37, 0x43, 0x2d, 0x09, 0xc4, 0x69, 0xfc, 0xb9, 0x6f, 0x87, 0xe9, 0xd4, 0xf5, 0x7f,
+ 0x94, 0xcc, 0x2b, 0x73, 0xbb, 0x30, 0x6e, 0x0c, 0xf1, 0x23, 0xd5, 0x1e, 0xff, 0xcb, 0x61, 0x28,
+ 0x29, 0xcd, 0x22, 0xba, 0x62, 0x2a, 0x8c, 0xcf, 0x26, 0x15, 0xc6, 0x23, 0xf4, 0x5d, 0xaf, 0xeb,
+ 0x88, 0x37, 0x32, 0xa2, 0x83, 0xe5, 0x6d, 0xe8, 0xfe, 0xdd, 0xbe, 0x35, 0x71, 0x6d, 0xb1, 0x6f,
+ 0xcd, 0xf3, 0x40, 0x57, 0x09, 0xf0, 0x35, 0x98, 0xf6, 0x7c, 0xc6, 0x73, 0x92, 0x86, 0x64, 0x28,
+ 0x18, 0xdf, 0x50, 0xd2, 0xc3, 0x6d, 0x24, 0x10, 0x70, 0xba, 0x0e, 0x6d, 0x90, 0x5f, 0xfc, 0x49,
+ 0x91, 0x33, 0xe7, 0x0b, 0xb0, 0x80, 0xa2, 0x8b, 0x30, 0xd8, 0xf6, 0x1b, 0x95, 0xaa, 0xe0, 0x37,
+ 0xb5, 0x98, 0x94, 0x8d, 0x4a, 0x15, 0x73, 0x18, 0x5a, 0x84, 0x21, 0xf6, 0x23, 0x9c, 0x1d, 0xcb,
+ 0x8f, 0xab, 0xc0, 0x6a, 0x68, 0x79, 0x6d, 0x58, 0x05, 0x2c, 0x2a, 0x32, 0xd1, 0x17, 0x65, 0xd2,
+ 0x99, 0xe8, 0x6b, 0xf8, 0x21, 0x45, 0x5f, 0x92, 0x00, 0x8e, 0x69, 0xa1, 0xfb, 0x70, 0xda, 0x78,
+ 0x18, 0xf1, 0x25, 0x42, 0x42, 0xe1, 0xdb, 0x7d, 0xb1, 0xeb, 0x8b, 0x48, 0x68, 0xaa, 0xcf, 0x89,
+ 0x4e, 0x9f, 0xae, 0x64, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0x4c, 0xd7, 0x53, 0xad, 0x8e, 0xf4,
+ 0xdf, 0xaa, 0x9a, 0xd0, 0x74, 0x8b, 0x69, 0xc2, 0xe8, 0x55, 0x18, 0x79, 0xcb, 0x0f, 0xd9, 0x59,
+ 0x2d, 0x78, 0x64, 0xe9, 0x18, 0x3c, 0xf2, 0xc6, 0xad, 0x1a, 0x2b, 0x3f, 0x3c, 0x98, 0x1f, 0xad,
+ 0xfa, 0x0d, 0xf9, 0x17, 0xab, 0x0a, 0xe8, 0xfb, 0x2d, 0x98, 0x4b, 0xbf, 0xbc, 0x54, 0xa7, 0xc7,
+ 0xfb, 0xef, 0xb4, 0x2d, 0x1a, 0x9d, 0x5b, 0xc9, 0x25, 0x87, 0xbb, 0x34, 0x65, 0xff, 0x8a, 0xc5,
+ 0xa4, 0x6e, 0x42, 0x03, 0x44, 0xc2, 0x4e, 0xf3, 0x24, 0xd2, 0x79, 0xae, 0x18, 0xca, 0xa9, 0x87,
+ 0xb6, 0x5c, 0xf8, 0xa7, 0x16, 0xb3, 0x5c, 0x38, 0x41, 0x17, 0x85, 0x37, 0x60, 0x24, 0x92, 0x69,
+ 0x56, 0xbb, 0x64, 0x20, 0xd5, 0x3a, 0xc5, 0xac, 0x37, 0x14, 0xc7, 0xaa, 0x32, 0xaa, 0x2a, 0x32,
+ 0xf6, 0x3f, 0xe0, 0x33, 0x20, 0x21, 0x27, 0xa0, 0x03, 0x28, 0x9b, 0x3a, 0x80, 0xf9, 0x1e, 0x5f,
+ 0x90, 0xa3, 0x0b, 0xf8, 0xfb, 0x66, 0xbf, 0x99, 0xa4, 0xe6, 0xdd, 0x6e, 0x32, 0x63, 0x7f, 0xd1,
+ 0x02, 0x88, 0x43, 0xfe, 0x32, 0xf9, 0xb2, 0x1f, 0xc8, 0x5c, 0x8e, 0x59, 0x59, 0x8b, 0x5e, 0xa6,
+ 0x3c, 0xaa, 0x1f, 0xf9, 0x75, 0xbf, 0x29, 0x34, 0x5c, 0x4f, 0xc4, 0x6a, 0x08, 0x5e, 0x7e, 0xa8,
+ 0xfd, 0xc6, 0x0a, 0x1b, 0xcd, 0xcb, 0x00, 0x63, 0xc5, 0x58, 0x31, 0x66, 0x04, 0x17, 0xfb, 0x31,
+ 0x0b, 0x4e, 0x65, 0xd9, 0xbb, 0xd2, 0x17, 0x0f, 0x97, 0x59, 0x29, 0x73, 0x26, 0x35, 0x9b, 0x77,
+ 0x44, 0x39, 0x56, 0x18, 0x7d, 0x67, 0x28, 0x3b, 0x5a, 0xac, 0xdd, 0x5b, 0x30, 0x5e, 0x0d, 0x88,
+ 0x76, 0xb9, 0xbe, 0xc6, 0x9d, 0xd6, 0x79, 0x7f, 0x9e, 0x3d, 0xb2, 0xc3, 0xba, 0xfd, 0x95, 0x02,
+ 0x9c, 0xe2, 0x56, 0x01, 0x8b, 0x7b, 0xbe, 0xdb, 0xa8, 0xfa, 0x0d, 0x91, 0x5d, 0xee, 0x53, 0x30,
+ 0xd6, 0xd6, 0x04, 0x8d, 0xdd, 0xe2, 0x46, 0xea, 0x02, 0xc9, 0x58, 0x34, 0xa2, 0x97, 0x62, 0x83,
+ 0x16, 0x6a, 0xc0, 0x18, 0xd9, 0x73, 0xeb, 0x4a, 0xb5, 0x5c, 0x38, 0xf2, 0x45, 0xa7, 0x5a, 0x59,
+ 0xd1, 0xe8, 0x60, 0x83, 0xea, 0x23, 0xc8, 0x1b, 0x6c, 0xff, 0xb8, 0x05, 0x8f, 0xe5, 0x44, 0x99,
+ 0xa4, 0xcd, 0xdd, 0x63, 0xf6, 0x17, 0x62, 0xd9, 0xaa, 0xe6, 0xb8, 0x55, 0x06, 0x16, 0x50, 0xf4,
+ 0x09, 0x00, 0x6e, 0x55, 0x41, 0x9f, 0xdc, 0xbd, 0xc2, 0xf1, 0x19, 0x91, 0xc4, 0xb4, 0xa0, 0x50,
+ 0xb2, 0x3e, 0xd6, 0x68, 0xd9, 0x5f, 0x1a, 0x80, 0x41, 0x9e, 0xe3, 0x7c, 0x15, 0x86, 0x77, 0x78,
+ 0xce, 0x8d, 0x7e, 0xd2, 0x7b, 0xc4, 0xc2, 0x10, 0x5e, 0x80, 0x65, 0x65, 0xb4, 0x06, 0x33, 0x3c,
+ 0x67, 0x49, 0xb3, 0x4c, 0x9a, 0xce, 0xbe, 0x94, 0xdc, 0xf1, 0x7c, 0x9f, 0x4a, 0x82, 0x59, 0x49,
+ 0xa3, 0xe0, 0xac, 0x7a, 0xe8, 0x35, 0x98, 0xa0, 0x2f, 0x29, 0xbf, 0x13, 0x49, 0x4a, 0x3c, 0x5b,
+ 0x89, 0x7a, 0xba, 0x6d, 0x18, 0x50, 0x9c, 0xc0, 0xa6, 0x8f, 0xf9, 0x76, 0x4a, 0x46, 0x39, 0x18,
+ 0x3f, 0xe6, 0x4d, 0xb9, 0xa4, 0x89, 0xcb, 0x0c, 0x5d, 0x3b, 0xcc, 0xac, 0x77, 0x63, 0x27, 0x20,
+ 0xe1, 0x8e, 0xdf, 0x6c, 0x30, 0xa6, 0x6f, 0x50, 0x33, 0x74, 0x4d, 0xc0, 0x71, 0xaa, 0x06, 0xa5,
+ 0xb2, 0xe5, 0xb8, 0xcd, 0x4e, 0x40, 0x62, 0x2a, 0x43, 0x26, 0x95, 0xd5, 0x04, 0x1c, 0xa7, 0x6a,
+ 0xf4, 0x16, 0xbe, 0x0e, 0x1f, 0x8f, 0xf0, 0x95, 0x2e, 0xd8, 0xd3, 0xd5, 0xc0, 0xa7, 0x27, 0xb6,
+ 0x8c, 0xd1, 0xa3, 0xcc, 0xa4, 0x87, 0xa5, 0x3b, 0x71, 0x97, 0x68, 0x76, 0xc2, 0x90, 0x94, 0x53,
+ 0x30, 0x2c, 0x15, 0x6a, 0xc2, 0x91, 0x58, 0x52, 0x41, 0x57, 0x61, 0x54, 0xa4, 0xbc, 0x60, 0xd6,
+ 0xbc, 0x7c, 0x8d, 0x30, 0xcb, 0x8a, 0x72, 0x5c, 0x8c, 0x75, 0x1c, 0xfb, 0x07, 0x0a, 0x30, 0x93,
+ 0xe1, 0x8e, 0xc1, 0xcf, 0xc4, 0x6d, 0x37, 0x8c, 0x54, 0xf2, 0x44, 0xed, 0x4c, 0xe4, 0xe5, 0x58,
+ 0x61, 0xd0, 0x8d, 0xc7, 0x4f, 0xdd, 0xe4, 0x49, 0x2b, 0xcc, 0x9d, 0x05, 0xf4, 0x88, 0x69, 0x08,
+ 0x2f, 0xc0, 0x40, 0x27, 0x24, 0x32, 0x0e, 0xa5, 0xba, 0x83, 0x98, 0xc2, 0x8d, 0x41, 0xe8, 0x9b,
+ 0x60, 0x5b, 0xe9, 0xae, 0xb4, 0x37, 0x01, 0xd7, 0x5e, 0x71, 0x18, 0xed, 0x5c, 0x44, 0x3c, 0xc7,
+ 0x8b, 0xc4, 0xcb, 0x21, 0x0e, 0xa8, 0xc6, 0x4a, 0xb1, 0x80, 0xda, 0x5f, 0x2a, 0xc2, 0xd9, 0x5c,
+ 0x07, 0x2d, 0xda, 0xf5, 0x96, 0xef, 0xb9, 0x91, 0xaf, 0x4c, 0x56, 0x78, 0x10, 0x35, 0xd2, 0xde,
+ 0x59, 0x13, 0xe5, 0x58, 0x61, 0xa0, 0x4b, 0x30, 0xc8, 0xc4, 0x75, 0xa9, 0x34, 0x92, 0x4b, 0x65,
+ 0x1e, 0x55, 0x87, 0x83, 0xfb, 0x4e, 0xd1, 0x7b, 0x91, 0x5e, 0xc7, 0x7e, 0x33, 0x79, 0x3a, 0xd2,
+ 0xee, 0xfa, 0x7e, 0x13, 0x33, 0x20, 0xfa, 0x80, 0x18, 0xaf, 0x84, 0x8d, 0x06, 0x76, 0x1a, 0x7e,
+ 0xa8, 0x0d, 0xda, 0xd3, 0x30, 0xbc, 0x4b, 0xf6, 0x03, 0xd7, 0xdb, 0x4e, 0xda, 0xee, 0xdc, 0xe0,
+ 0xc5, 0x58, 0xc2, 0xcd, 0x8c, 0x60, 0xc3, 0xc7, 0x9d, 0x5b, 0x77, 0xa4, 0xe7, 0x5d, 0xfb, 0x43,
+ 0x45, 0x98, 0xc4, 0x4b, 0xe5, 0xf7, 0x26, 0xe2, 0x76, 0x7a, 0x22, 0x8e, 0x3b, 0xb7, 0x6e, 0xef,
+ 0xd9, 0xf8, 0x45, 0x0b, 0x26, 0x59, 0xe2, 0x0d, 0x11, 0x7e, 0xcb, 0xf5, 0xbd, 0x13, 0xe0, 0x6b,
+ 0x2f, 0xc2, 0x60, 0x40, 0x1b, 0x4d, 0xe6, 0x8f, 0x64, 0x3d, 0xc1, 0x1c, 0x86, 0x9e, 0x80, 0x01,
+ 0xd6, 0x05, 0x3a, 0x79, 0x63, 0x3c, 0xf5, 0x56, 0xd9, 0x89, 0x1c, 0xcc, 0x4a, 0x59, 0x4c, 0x19,
+ 0x4c, 0xda, 0x4d, 0x97, 0x77, 0x3a, 0x56, 0xa6, 0xbe, 0x3b, 0x5c, 0xb7, 0x33, 0xbb, 0xf6, 0xce,
+ 0x62, 0xca, 0x64, 0x93, 0xec, 0xfe, 0x66, 0xfc, 0xe3, 0x02, 0x9c, 0xcf, 0xac, 0xd7, 0x77, 0x4c,
+ 0x99, 0xee, 0xb5, 0x1f, 0x65, 0x6a, 0x85, 0xe2, 0x09, 0x5a, 0x46, 0x0e, 0xf4, 0xcb, 0xca, 0x0e,
+ 0xf6, 0x11, 0xea, 0x25, 0x73, 0xc8, 0xde, 0x25, 0xa1, 0x5e, 0x32, 0xfb, 0x96, 0xf3, 0xe6, 0xfd,
+ 0x8b, 0x42, 0xce, 0xb7, 0xb0, 0xd7, 0xef, 0x65, 0x7a, 0xce, 0x30, 0x60, 0x28, 0x5f, 0x94, 0xfc,
+ 0x8c, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, 0x4c, 0xb6, 0x5c, 0x8f, 0x1e, 0x3e, 0xfb, 0x26, 0x87,
+ 0xa9, 0x22, 0x71, 0xad, 0x99, 0x60, 0x9c, 0xc4, 0x47, 0xae, 0x16, 0x06, 0xa6, 0x90, 0x9f, 0x91,
+ 0x3d, 0xb7, 0xb7, 0x0b, 0xa6, 0xa2, 0x59, 0x8d, 0x62, 0x46, 0x48, 0x98, 0x35, 0x4d, 0xe8, 0x51,
+ 0xec, 0x5f, 0xe8, 0x31, 0x96, 0x2d, 0xf0, 0x98, 0x7b, 0x15, 0xc6, 0x1f, 0x5a, 0xca, 0x6d, 0x7f,
+ 0xad, 0x08, 0x8f, 0x77, 0xd9, 0xf6, 0xfc, 0xac, 0x37, 0xe6, 0x40, 0x3b, 0xeb, 0x53, 0xf3, 0x50,
+ 0x85, 0x53, 0x5b, 0x9d, 0x66, 0x73, 0x9f, 0x39, 0x0c, 0x90, 0x86, 0xc4, 0x10, 0x3c, 0xa5, 0x7c,
+ 0xe9, 0x9f, 0x5a, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xe9, 0xcb, 0x81, 0xde, 0x24, 0xfb, 0x8a, 0x54,
+ 0xe2, 0xe5, 0x80, 0x75, 0x20, 0x36, 0x71, 0xd1, 0x35, 0x98, 0x76, 0xf6, 0x1c, 0x97, 0xc7, 0xd2,
+ 0x95, 0x04, 0xf8, 0xd3, 0x41, 0x09, 0x27, 0x17, 0x93, 0x08, 0x38, 0x5d, 0x07, 0xbd, 0x0e, 0xc8,
+ 0xdf, 0x64, 0x66, 0xc5, 0x8d, 0x6b, 0xc4, 0x13, 0xfa, 0x40, 0x36, 0x77, 0xc5, 0xf8, 0x48, 0xb8,
+ 0x95, 0xc2, 0xc0, 0x19, 0xb5, 0x12, 0xe1, 0x4e, 0x86, 0xf2, 0xc3, 0x9d, 0x74, 0x3f, 0x17, 0x7b,
+ 0x66, 0xf5, 0xf8, 0x8f, 0x16, 0xbd, 0xbe, 0x38, 0x93, 0x6f, 0x46, 0x07, 0x7c, 0x95, 0xd9, 0xf3,
+ 0x71, 0xc1, 0xa5, 0x16, 0xa4, 0xe3, 0xb4, 0x66, 0xcf, 0x17, 0x03, 0xb1, 0x89, 0xcb, 0x17, 0x44,
+ 0x18, 0xfb, 0x86, 0x1a, 0x2c, 0xbe, 0x08, 0x61, 0xa4, 0x30, 0xd0, 0x27, 0x61, 0xb8, 0xe1, 0xee,
+ 0xb9, 0xa1, 0x10, 0xdb, 0x1c, 0x59, 0x47, 0x12, 0x9f, 0x83, 0x65, 0x4e, 0x06, 0x4b, 0x7a, 0xf6,
+ 0x0f, 0x15, 0x60, 0x5c, 0xb6, 0xf8, 0x46, 0xc7, 0x8f, 0x9c, 0x13, 0xb8, 0x96, 0xaf, 0x19, 0xd7,
+ 0xf2, 0x07, 0xba, 0xc5, 0x71, 0x62, 0x5d, 0xca, 0xbd, 0x8e, 0x6f, 0x25, 0xae, 0xe3, 0xa7, 0x7a,
+ 0x93, 0xea, 0x7e, 0x0d, 0xff, 0x43, 0x0b, 0xa6, 0x0d, 0xfc, 0x13, 0xb8, 0x0d, 0x56, 0xcd, 0xdb,
+ 0xe0, 0xc9, 0x9e, 0xdf, 0x90, 0x73, 0x0b, 0x7c, 0x6f, 0x31, 0xd1, 0x77, 0x76, 0xfa, 0xbf, 0x05,
+ 0x03, 0x3b, 0x4e, 0xd0, 0xe8, 0x16, 0xb7, 0x3e, 0x55, 0x69, 0xe1, 0xba, 0x13, 0x08, 0x85, 0xe8,
+ 0xb3, 0x2a, 0x21, 0xba, 0x13, 0xf4, 0x56, 0x86, 0xb2, 0xa6, 0xd0, 0xcb, 0x30, 0x14, 0xd6, 0xfd,
+ 0xb6, 0x72, 0x17, 0xb8, 0xc0, 0x93, 0xa5, 0xd3, 0x92, 0xc3, 0x83, 0x79, 0x64, 0x36, 0x47, 0x8b,
+ 0xb1, 0xc0, 0x47, 0x9f, 0x82, 0x71, 0xf6, 0x4b, 0x59, 0x27, 0x15, 0xf3, 0x73, 0x5c, 0xd5, 0x74,
+ 0x44, 0x6e, 0xba, 0x67, 0x14, 0x61, 0x93, 0xd4, 0xdc, 0x36, 0x94, 0xd4, 0x67, 0x3d, 0x52, 0x25,
+ 0xe4, 0xbf, 0x2b, 0xc2, 0x4c, 0xc6, 0x9a, 0x43, 0xa1, 0x31, 0x13, 0x57, 0xfb, 0x5c, 0xaa, 0xef,
+ 0x70, 0x2e, 0x42, 0xf6, 0x1a, 0x6a, 0x88, 0xb5, 0xd5, 0x77, 0xa3, 0xb7, 0x43, 0x92, 0x6c, 0x94,
+ 0x16, 0xf5, 0x6e, 0x94, 0x36, 0x76, 0x62, 0x43, 0x4d, 0x1b, 0x52, 0x3d, 0x7d, 0xa4, 0x73, 0xfa,
+ 0x67, 0x45, 0x38, 0x95, 0x15, 0x5a, 0x0e, 0x7d, 0x3e, 0x91, 0x35, 0xf1, 0xc5, 0x7e, 0x83, 0xd2,
+ 0xf1, 0x54, 0x8a, 0x5c, 0xd8, 0xbc, 0xb4, 0x60, 0xe6, 0x51, 0xec, 0x39, 0xcc, 0xa2, 0x4d, 0x16,
+ 0xf7, 0x20, 0xe0, 0xd9, 0x2e, 0xe5, 0xf1, 0xf1, 0xe1, 0xbe, 0x3b, 0x20, 0xd2, 0x64, 0x86, 0x09,
+ 0xcb, 0x07, 0x59, 0xdc, 0xdb, 0xf2, 0x41, 0xb6, 0x3c, 0xe7, 0xc2, 0xa8, 0xf6, 0x35, 0x8f, 0x74,
+ 0xc6, 0x77, 0xe9, 0x6d, 0xa5, 0xf5, 0xfb, 0x91, 0xce, 0xfa, 0x8f, 0x5b, 0x90, 0x30, 0x86, 0x57,
+ 0x62, 0x31, 0x2b, 0x57, 0x2c, 0x76, 0x01, 0x06, 0x02, 0xbf, 0x49, 0x92, 0xe9, 0x05, 0xb1, 0xdf,
+ 0x24, 0x98, 0x41, 0x28, 0x46, 0x14, 0x0b, 0x3b, 0xc6, 0xf4, 0x87, 0x9c, 0x78, 0xa2, 0x5d, 0x84,
+ 0xc1, 0x26, 0xd9, 0x23, 0xcd, 0x64, 0x16, 0x98, 0x9b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x8b, 0x03,
+ 0x70, 0xae, 0x6b, 0xe4, 0x10, 0xfa, 0x1c, 0xda, 0x76, 0x22, 0x72, 0xcf, 0xd9, 0x4f, 0xa6, 0x6b,
+ 0xb8, 0xc6, 0x8b, 0xb1, 0x84, 0x33, 0x77, 0x25, 0x1e, 0x75, 0x39, 0x21, 0x44, 0x14, 0xc1, 0x96,
+ 0x05, 0xd4, 0x14, 0x4a, 0x15, 0x8f, 0x43, 0x28, 0xf5, 0x3c, 0x40, 0x18, 0x36, 0xb9, 0xc9, 0x50,
+ 0x43, 0xf8, 0x41, 0xc5, 0xd1, 0xb9, 0x6b, 0x37, 0x05, 0x04, 0x6b, 0x58, 0xa8, 0x0c, 0x53, 0xed,
+ 0xc0, 0x8f, 0xb8, 0x4c, 0xb6, 0xcc, 0xad, 0xea, 0x06, 0xcd, 0xa0, 0x0d, 0xd5, 0x04, 0x1c, 0xa7,
+ 0x6a, 0xa0, 0x97, 0x60, 0x54, 0x04, 0x72, 0xa8, 0xfa, 0x7e, 0x53, 0x88, 0x81, 0x94, 0xa1, 0x59,
+ 0x2d, 0x06, 0x61, 0x1d, 0x4f, 0xab, 0xc6, 0x04, 0xbd, 0xc3, 0x99, 0xd5, 0xb8, 0xb0, 0x57, 0xc3,
+ 0x4b, 0x84, 0x99, 0x1c, 0xe9, 0x2b, 0xcc, 0x64, 0x2c, 0x18, 0x2b, 0xf5, 0xad, 0x44, 0x83, 0x9e,
+ 0xa2, 0xa4, 0x9f, 0x1b, 0x80, 0x19, 0xb1, 0x70, 0x1e, 0xf5, 0x72, 0xb9, 0x9d, 0x5e, 0x2e, 0xc7,
+ 0x21, 0x3a, 0x7b, 0x6f, 0xcd, 0x9c, 0xf4, 0x9a, 0xf9, 0x61, 0x0b, 0x4c, 0xf6, 0x0a, 0xfd, 0x5f,
+ 0xb9, 0xf9, 0x6e, 0x5e, 0xca, 0x65, 0xd7, 0x1a, 0xf2, 0x02, 0x79, 0x87, 0x99, 0x6f, 0xec, 0xff,
+ 0x60, 0xc1, 0x93, 0x3d, 0x29, 0xa2, 0x15, 0x28, 0x31, 0x1e, 0x50, 0x7b, 0x9d, 0x3d, 0xa5, 0xac,
+ 0x6e, 0x25, 0x20, 0x87, 0x25, 0x8d, 0x6b, 0xa2, 0x95, 0x54, 0x62, 0xa1, 0xa7, 0x33, 0x12, 0x0b,
+ 0x9d, 0x36, 0x86, 0xe7, 0x21, 0x33, 0x0b, 0xfd, 0x20, 0xbd, 0x71, 0x0c, 0x8f, 0x17, 0xf4, 0x61,
+ 0x43, 0xec, 0x67, 0x27, 0xc4, 0x7e, 0xc8, 0xc4, 0xd6, 0xee, 0x90, 0x8f, 0xc3, 0x14, 0x8b, 0xf0,
+ 0xc4, 0x6c, 0xc0, 0x85, 0x2f, 0x4e, 0x21, 0xb6, 0xf3, 0xbc, 0x99, 0x80, 0xe1, 0x14, 0xb6, 0xfd,
+ 0x47, 0x45, 0x18, 0xe2, 0xdb, 0xef, 0x04, 0xde, 0x84, 0xcf, 0x40, 0xc9, 0x6d, 0xb5, 0x3a, 0x3c,
+ 0x57, 0xcc, 0x20, 0x77, 0xc0, 0xa5, 0xf3, 0x54, 0x91, 0x85, 0x38, 0x86, 0xa3, 0x55, 0x21, 0x71,
+ 0xee, 0x12, 0x44, 0x92, 0x77, 0x7c, 0xa1, 0xec, 0x44, 0x0e, 0x67, 0x70, 0xd4, 0x3d, 0x1b, 0xcb,
+ 0xa6, 0xd1, 0x67, 0x00, 0xc2, 0x28, 0x70, 0xbd, 0x6d, 0x5a, 0x26, 0x62, 0xa6, 0x7e, 0xb0, 0x0b,
+ 0xb5, 0x9a, 0x42, 0xe6, 0x34, 0xe3, 0x33, 0x47, 0x01, 0xb0, 0x46, 0x11, 0x2d, 0x18, 0x37, 0xfd,
+ 0x5c, 0x62, 0xee, 0x80, 0x53, 0x8d, 0xe7, 0x6c, 0xee, 0x23, 0x50, 0x52, 0xc4, 0x7b, 0xc9, 0x9f,
+ 0xc6, 0x74, 0xb6, 0xe8, 0x63, 0x30, 0x99, 0xe8, 0xdb, 0x91, 0xc4, 0x57, 0xbf, 0x64, 0xc1, 0x24,
+ 0xef, 0xcc, 0x8a, 0xb7, 0x27, 0x6e, 0x83, 0xb7, 0xe1, 0x54, 0x33, 0xe3, 0x54, 0x16, 0xd3, 0xdf,
+ 0xff, 0x29, 0xae, 0xc4, 0x55, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xba, 0x4c, 0x77, 0x1c, 0x3d, 0x75,
+ 0x9d, 0xa6, 0xf0, 0xc7, 0x1d, 0xe3, 0xbb, 0x8d, 0x97, 0x61, 0x05, 0xb5, 0x7f, 0xd7, 0x82, 0x69,
+ 0xde, 0xf3, 0x1b, 0x64, 0x5f, 0x9d, 0x4d, 0xdf, 0xc8, 0xbe, 0x8b, 0x2c, 0x65, 0x85, 0x9c, 0x2c,
+ 0x65, 0xfa, 0xa7, 0x15, 0xbb, 0x7e, 0xda, 0x57, 0x2c, 0x10, 0x2b, 0xe4, 0x04, 0x84, 0x10, 0xdf,
+ 0x6e, 0x0a, 0x21, 0xe6, 0xf2, 0x37, 0x41, 0x8e, 0xf4, 0xe1, 0xcf, 0x2d, 0x98, 0xe2, 0x08, 0xb1,
+ 0xb6, 0xfc, 0x1b, 0x3a, 0x0f, 0xfd, 0xe4, 0x32, 0xbe, 0x41, 0xf6, 0x37, 0xfc, 0xaa, 0x13, 0xed,
+ 0x64, 0x7f, 0x94, 0x31, 0x59, 0x03, 0x5d, 0x27, 0xab, 0x21, 0x37, 0xd0, 0x11, 0x12, 0xa4, 0x1f,
+ 0x39, 0x89, 0x87, 0xfd, 0x75, 0x0b, 0x10, 0x6f, 0xc6, 0x60, 0xdc, 0x28, 0x3b, 0xc4, 0x4a, 0xb5,
+ 0x8b, 0x2e, 0x3e, 0x9a, 0x14, 0x04, 0x6b, 0x58, 0xc7, 0x32, 0x3c, 0x09, 0x93, 0x87, 0x62, 0x6f,
+ 0x93, 0x87, 0x23, 0x8c, 0xe8, 0xbf, 0x1a, 0x82, 0xa4, 0xd7, 0x0f, 0xba, 0x03, 0x63, 0x75, 0xa7,
+ 0xed, 0x6c, 0xba, 0x4d, 0x37, 0x72, 0x49, 0xd8, 0xcd, 0x28, 0x6b, 0x59, 0xc3, 0x13, 0x4a, 0x6a,
+ 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x02, 0x40, 0x3b, 0x70, 0xf7, 0xdc, 0x26, 0xd9, 0x66, 0xb2, 0x12,
+ 0x16, 0x01, 0x80, 0x5b, 0x1a, 0xc9, 0x52, 0xac, 0x61, 0x64, 0xb8, 0x58, 0x17, 0x1f, 0xb1, 0x8b,
+ 0x35, 0x9c, 0x98, 0x8b, 0xf5, 0xc0, 0x91, 0x5c, 0xac, 0x47, 0x8e, 0xec, 0x62, 0x3d, 0xd8, 0x97,
+ 0x8b, 0x35, 0x86, 0x33, 0x92, 0xf7, 0xa4, 0xff, 0x57, 0xdd, 0x26, 0x11, 0x0f, 0x0e, 0x1e, 0xb6,
+ 0x60, 0xee, 0xc1, 0xc1, 0xfc, 0x19, 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0x4f, 0xc0, 0xac, 0xd3,
+ 0x6c, 0xfa, 0xf7, 0xd4, 0xa4, 0xae, 0x84, 0x75, 0xa7, 0xc9, 0x95, 0x10, 0xc3, 0x8c, 0xea, 0x13,
+ 0x0f, 0x0e, 0xe6, 0x67, 0x17, 0x73, 0x70, 0x70, 0x6e, 0x6d, 0xf4, 0x51, 0x28, 0xb5, 0x03, 0xbf,
+ 0xbe, 0xa6, 0xb9, 0x26, 0x9e, 0xa7, 0x03, 0x58, 0x95, 0x85, 0x87, 0x07, 0xf3, 0xe3, 0xea, 0x0f,
+ 0xbb, 0xf0, 0xe3, 0x0a, 0x19, 0x3e, 0xd3, 0xa3, 0xc7, 0xea, 0x33, 0xbd, 0x0b, 0x33, 0x35, 0x12,
+ 0xb8, 0x2c, 0x9d, 0x7a, 0x23, 0x3e, 0x9f, 0x36, 0xa0, 0x14, 0x24, 0x4e, 0xe4, 0xbe, 0x02, 0x3b,
+ 0x6a, 0xd9, 0x14, 0xe4, 0x09, 0x1c, 0x13, 0xb2, 0xff, 0xa7, 0x05, 0xc3, 0xc2, 0xcb, 0xe7, 0x04,
+ 0xb8, 0xc6, 0x45, 0x43, 0x93, 0x30, 0x9f, 0x3d, 0x60, 0xac, 0x33, 0xb9, 0x3a, 0x84, 0x4a, 0x42,
+ 0x87, 0xf0, 0x64, 0x37, 0x22, 0xdd, 0xb5, 0x07, 0x7f, 0xa5, 0x48, 0xb9, 0x77, 0xc3, 0xdf, 0xf4,
+ 0xd1, 0x0f, 0xc1, 0x3a, 0x0c, 0x87, 0xc2, 0xdf, 0xb1, 0x90, 0x6f, 0xa0, 0x9f, 0x9c, 0xc4, 0xd8,
+ 0x8e, 0x4d, 0x78, 0x38, 0x4a, 0x22, 0x99, 0x8e, 0x94, 0xc5, 0x47, 0xe8, 0x48, 0xd9, 0xcb, 0x23,
+ 0x77, 0xe0, 0x38, 0x3c, 0x72, 0xed, 0xaf, 0xb2, 0x9b, 0x53, 0x2f, 0x3f, 0x01, 0xa6, 0xea, 0x9a,
+ 0x79, 0xc7, 0xda, 0x5d, 0x56, 0x96, 0xe8, 0x54, 0x0e, 0x73, 0xf5, 0x0b, 0x16, 0x9c, 0xcb, 0xf8,
+ 0x2a, 0x8d, 0xd3, 0x7a, 0x16, 0x46, 0x9c, 0x4e, 0xc3, 0x55, 0x7b, 0x59, 0xd3, 0x27, 0x2e, 0x8a,
+ 0x72, 0xac, 0x30, 0xd0, 0x32, 0x4c, 0x93, 0xfb, 0x6d, 0x97, 0xab, 0x52, 0x75, 0xab, 0xd6, 0x22,
+ 0x77, 0x0d, 0x5b, 0x49, 0x02, 0x71, 0x1a, 0x5f, 0x45, 0x41, 0x29, 0xe6, 0x46, 0x41, 0xf9, 0xdb,
+ 0x16, 0x8c, 0x2a, 0x8f, 0xbf, 0x47, 0x3e, 0xda, 0x1f, 0x37, 0x47, 0xfb, 0xf1, 0x2e, 0xa3, 0x9d,
+ 0x33, 0xcc, 0xbf, 0x5d, 0x50, 0xfd, 0xad, 0xfa, 0x41, 0xd4, 0x07, 0x07, 0xf7, 0xf0, 0x76, 0xf8,
+ 0x57, 0x61, 0xd4, 0x69, 0xb7, 0x25, 0x40, 0xda, 0xa0, 0xb1, 0x30, 0xbd, 0x71, 0x31, 0xd6, 0x71,
+ 0x94, 0x5b, 0x40, 0x31, 0xd7, 0x2d, 0xa0, 0x01, 0x10, 0x39, 0xc1, 0x36, 0x89, 0x68, 0x99, 0x88,
+ 0x58, 0x96, 0x7f, 0xde, 0x74, 0x22, 0xb7, 0xb9, 0xe0, 0x7a, 0x51, 0x18, 0x05, 0x0b, 0x15, 0x2f,
+ 0xba, 0x15, 0xf0, 0x27, 0xa4, 0x16, 0x12, 0x48, 0xd1, 0xc2, 0x1a, 0x5d, 0xe9, 0xdd, 0xce, 0xda,
+ 0x18, 0x34, 0x8d, 0x19, 0xd6, 0x45, 0x39, 0x56, 0x18, 0xf6, 0x47, 0xd8, 0xed, 0xc3, 0xc6, 0xf4,
+ 0x68, 0x31, 0x74, 0xfe, 0xee, 0x98, 0x9a, 0x0d, 0xa6, 0xc9, 0x2c, 0xeb, 0x91, 0x7a, 0xba, 0x1f,
+ 0xf6, 0xb4, 0x61, 0xdd, 0x49, 0x2d, 0x0e, 0xe7, 0x83, 0xbe, 0x23, 0x65, 0xa0, 0xf2, 0x5c, 0x8f,
+ 0x5b, 0xe3, 0x08, 0x26, 0x29, 0x2c, 0x67, 0x07, 0xcb, 0x68, 0x50, 0xa9, 0x8a, 0x7d, 0xa1, 0xe5,
+ 0xec, 0x10, 0x00, 0x1c, 0xe3, 0x50, 0x66, 0x4a, 0xfd, 0x09, 0x67, 0x51, 0x1c, 0xbb, 0x52, 0x61,
+ 0x87, 0x58, 0xc3, 0x40, 0x57, 0x84, 0x40, 0x81, 0xeb, 0x05, 0x1e, 0x4f, 0x08, 0x14, 0xe4, 0x70,
+ 0x69, 0x52, 0xa0, 0xab, 0x30, 0xaa, 0xd2, 0x03, 0x57, 0x79, 0xd6, 0x59, 0xb1, 0xcc, 0x56, 0xe2,
+ 0x62, 0xac, 0xe3, 0xa0, 0x0d, 0x98, 0x0c, 0xb9, 0x9c, 0x4d, 0x05, 0x14, 0xe6, 0xf2, 0xca, 0x0f,
+ 0x4a, 0x2b, 0xa0, 0x9a, 0x09, 0x3e, 0x64, 0x45, 0xfc, 0x74, 0x92, 0x1e, 0xe8, 0x49, 0x12, 0xe8,
+ 0x35, 0x98, 0x68, 0xfa, 0x4e, 0x63, 0xc9, 0x69, 0x3a, 0x5e, 0x9d, 0x8d, 0xcf, 0x88, 0x99, 0x65,
+ 0xf2, 0xa6, 0x01, 0xc5, 0x09, 0x6c, 0xca, 0xbc, 0xe9, 0x25, 0x22, 0x08, 0xb6, 0xe3, 0x6d, 0x93,
+ 0x50, 0x24, 0x7b, 0x65, 0xcc, 0xdb, 0xcd, 0x1c, 0x1c, 0x9c, 0x5b, 0x1b, 0xbd, 0x0c, 0x63, 0xf2,
+ 0xf3, 0xb5, 0x80, 0x0d, 0xb1, 0x87, 0x85, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x07, 0xa7, 0xe5, 0xff,
+ 0x8d, 0xc0, 0xd9, 0xda, 0x72, 0xeb, 0xc2, 0x8b, 0x99, 0xbb, 0x62, 0x2e, 0x4a, 0x7f, 0xc1, 0x95,
+ 0x2c, 0xa4, 0xc3, 0x83, 0xf9, 0x0b, 0x62, 0xd4, 0x32, 0xe1, 0x6c, 0x12, 0xb3, 0xe9, 0xa3, 0x35,
+ 0x98, 0xd9, 0x21, 0x4e, 0x33, 0xda, 0x59, 0xde, 0x21, 0xf5, 0x5d, 0xb9, 0xe9, 0x58, 0x18, 0x08,
+ 0xcd, 0x2f, 0xe1, 0x7a, 0x1a, 0x05, 0x67, 0xd5, 0x43, 0x6f, 0xc2, 0x6c, 0xbb, 0xb3, 0xd9, 0x74,
+ 0xc3, 0x9d, 0x75, 0x3f, 0x62, 0xa6, 0x40, 0x2a, 0xdb, 0xb0, 0x88, 0x17, 0xa1, 0x02, 0x6d, 0x54,
+ 0x73, 0xf0, 0x70, 0x2e, 0x05, 0xf4, 0x36, 0x9c, 0x4e, 0x2c, 0x06, 0xe1, 0x31, 0x3f, 0x91, 0x9f,
+ 0x52, 0xa0, 0x96, 0x55, 0x41, 0x04, 0x9f, 0xc8, 0x02, 0xe1, 0xec, 0x26, 0xd0, 0x2b, 0x00, 0x6e,
+ 0x7b, 0xd5, 0x69, 0xb9, 0x4d, 0xfa, 0x5c, 0x9c, 0x61, 0xeb, 0x84, 0x3e, 0x1d, 0xa0, 0x52, 0x95,
+ 0xa5, 0xf4, 0x7c, 0x16, 0xff, 0xf6, 0xb1, 0x86, 0x8d, 0xaa, 0x30, 0x21, 0xfe, 0xed, 0x8b, 0x69,
+ 0x9d, 0x56, 0xce, 0xe9, 0x13, 0xb2, 0x86, 0x9a, 0x4b, 0x64, 0x96, 0xb0, 0xd9, 0x4b, 0xd4, 0x47,
+ 0xdb, 0x70, 0x4e, 0x24, 0xa6, 0x26, 0xfa, 0x3a, 0x95, 0xf3, 0x10, 0xb2, 0x58, 0xfe, 0x23, 0xdc,
+ 0xed, 0x61, 0xb1, 0x1b, 0x22, 0xee, 0x4e, 0x87, 0xde, 0xef, 0xfa, 0x72, 0xe7, 0xee, 0xa0, 0xa7,
+ 0xb9, 0x79, 0x12, 0xbd, 0xdf, 0x6f, 0x26, 0x81, 0x38, 0x8d, 0x8f, 0x42, 0x38, 0xed, 0x7a, 0x59,
+ 0xab, 0xfb, 0x0c, 0x23, 0xf4, 0x31, 0xee, 0x09, 0xdb, 0x7d, 0x65, 0x67, 0xc2, 0xf9, 0xca, 0xce,
+ 0xa4, 0xfd, 0xce, 0xac, 0xf0, 0x7e, 0xc7, 0xa2, 0xb5, 0x35, 0x4e, 0x1d, 0x7d, 0x16, 0xc6, 0xf4,
+ 0x0f, 0x13, 0x5c, 0xc7, 0xa5, 0x6c, 0x46, 0x56, 0x3b, 0x1f, 0x38, 0x9f, 0xaf, 0xce, 0x00, 0x1d,
+ 0x86, 0x0d, 0x8a, 0xa8, 0x9e, 0xe1, 0x33, 0x7e, 0xa5, 0x3f, 0xae, 0xa6, 0x7f, 0x23, 0x34, 0x02,
+ 0xd9, 0xcb, 0x1e, 0xdd, 0x84, 0x91, 0x7a, 0xd3, 0x25, 0x5e, 0x54, 0xa9, 0x76, 0x8b, 0xf2, 0xb6,
+ 0x2c, 0x70, 0xc4, 0x3e, 0x12, 0xa1, 0xf9, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0x2f, 0xc0, 0x7c,
+ 0x8f, 0x3c, 0x0f, 0x09, 0x95, 0x94, 0xd5, 0x97, 0x4a, 0x6a, 0x51, 0xa6, 0xd4, 0x5e, 0x4f, 0x48,
+ 0xbb, 0x12, 0xe9, 0xb2, 0x63, 0x99, 0x57, 0x12, 0xbf, 0x6f, 0x17, 0x01, 0x5d, 0xab, 0x35, 0xd0,
+ 0xd3, 0xc9, 0xc5, 0xd0, 0x66, 0x0f, 0xf6, 0xff, 0x04, 0xce, 0xd5, 0x4c, 0xda, 0x5f, 0x2d, 0xc0,
+ 0x69, 0x35, 0x84, 0xdf, 0xba, 0x03, 0x77, 0x3b, 0x3d, 0x70, 0xc7, 0xa0, 0xd7, 0xb5, 0x6f, 0xc1,
+ 0x10, 0x0f, 0x5b, 0xd7, 0x07, 0xeb, 0x7d, 0xd1, 0x8c, 0xf0, 0xaa, 0xb8, 0x3d, 0x23, 0xca, 0xeb,
+ 0xf7, 0x5b, 0x30, 0xb9, 0xb1, 0x5c, 0xad, 0xf9, 0xf5, 0x5d, 0x12, 0x2d, 0xf2, 0xa7, 0x12, 0xd6,
+ 0xbc, 0x6b, 0x1f, 0x86, 0x3d, 0xce, 0x62, 0xbc, 0x2f, 0xc0, 0xc0, 0x8e, 0x1f, 0x46, 0x49, 0xa3,
+ 0x8f, 0xeb, 0x7e, 0x18, 0x61, 0x06, 0xb1, 0x7f, 0xcf, 0x82, 0xc1, 0x0d, 0xc7, 0xf5, 0x22, 0xa9,
+ 0x20, 0xb0, 0x72, 0x14, 0x04, 0xfd, 0x7c, 0x17, 0x7a, 0x09, 0x86, 0xc8, 0xd6, 0x16, 0xa9, 0x47,
+ 0x62, 0x56, 0x65, 0x68, 0x82, 0xa1, 0x15, 0x56, 0x4a, 0x79, 0x41, 0xd6, 0x18, 0xff, 0x8b, 0x05,
+ 0x32, 0xba, 0x0b, 0xa5, 0xc8, 0x6d, 0x91, 0xc5, 0x46, 0x43, 0xa8, 0xcd, 0x1f, 0x22, 0xbc, 0xc2,
+ 0x86, 0x24, 0x80, 0x63, 0x5a, 0xf6, 0x97, 0x0a, 0x00, 0x71, 0xbc, 0x9f, 0x5e, 0x9f, 0xb8, 0x94,
+ 0x52, 0xa8, 0x5e, 0xca, 0x50, 0xa8, 0xa2, 0x98, 0x60, 0x86, 0x36, 0x55, 0x0d, 0x53, 0xb1, 0xaf,
+ 0x61, 0x1a, 0x38, 0xca, 0x30, 0x2d, 0xc3, 0x74, 0x1c, 0xaf, 0xc8, 0x0c, 0xd7, 0xc6, 0xae, 0xcf,
+ 0x8d, 0x24, 0x10, 0xa7, 0xf1, 0x6d, 0x02, 0x17, 0x54, 0xd8, 0x16, 0x71, 0xa3, 0x31, 0xab, 0x6c,
+ 0x5d, 0x41, 0xdd, 0x63, 0x9c, 0x62, 0x8d, 0x71, 0x21, 0x57, 0x63, 0xfc, 0x53, 0x16, 0x9c, 0x4a,
+ 0xb6, 0xc3, 0xfc, 0x71, 0xbf, 0x68, 0xc1, 0x69, 0xa6, 0x37, 0x67, 0xad, 0xa6, 0xb5, 0xf4, 0x2f,
+ 0x76, 0x0d, 0x45, 0x93, 0xd3, 0xe3, 0x38, 0x06, 0xc6, 0x5a, 0x16, 0x69, 0x9c, 0xdd, 0xa2, 0xfd,
+ 0xef, 0x0b, 0x30, 0x9b, 0x17, 0xc3, 0x86, 0x39, 0x6d, 0x38, 0xf7, 0x6b, 0xbb, 0xe4, 0x9e, 0x30,
+ 0x8d, 0x8f, 0x9d, 0x36, 0x78, 0x31, 0x96, 0xf0, 0x64, 0xe8, 0xfe, 0x42, 0x9f, 0xa1, 0xfb, 0x77,
+ 0x60, 0xfa, 0xde, 0x0e, 0xf1, 0x6e, 0x7b, 0xa1, 0x13, 0xb9, 0xe1, 0x96, 0xcb, 0x74, 0xcc, 0x7c,
+ 0xdd, 0xbc, 0x22, 0x0d, 0xd8, 0xef, 0x26, 0x11, 0x0e, 0x0f, 0xe6, 0xcf, 0x19, 0x05, 0x71, 0x97,
+ 0xf9, 0x41, 0x82, 0xd3, 0x44, 0xd3, 0x99, 0x0f, 0x06, 0x1e, 0x61, 0xe6, 0x03, 0xfb, 0x8b, 0x16,
+ 0x9c, 0xcd, 0x4d, 0xba, 0x8a, 0x2e, 0xc3, 0x88, 0xd3, 0x76, 0xb9, 0x98, 0x5e, 0x1c, 0xa3, 0x4c,
+ 0x1c, 0x54, 0xad, 0x70, 0x21, 0xbd, 0x82, 0xaa, 0x74, 0xf3, 0x85, 0xdc, 0x74, 0xf3, 0x3d, 0xb3,
+ 0xc7, 0xdb, 0xdf, 0x67, 0x81, 0x70, 0x38, 0xed, 0xe3, 0xec, 0xfe, 0x14, 0x8c, 0xed, 0xa5, 0xb3,
+ 0x23, 0x5d, 0xc8, 0xf7, 0xc0, 0x15, 0x39, 0x91, 0x14, 0x43, 0x66, 0x64, 0x42, 0x32, 0x68, 0xd9,
+ 0x0d, 0x10, 0xd0, 0x32, 0x61, 0x42, 0xe8, 0xde, 0xbd, 0x79, 0x1e, 0xa0, 0xc1, 0x70, 0xb5, 0x9c,
+ 0xfd, 0xea, 0x66, 0x2e, 0x2b, 0x08, 0xd6, 0xb0, 0xec, 0x7f, 0x53, 0x80, 0x51, 0x99, 0x8d, 0xa7,
+ 0xe3, 0xf5, 0x23, 0x2a, 0x3a, 0x52, 0x7a, 0x4e, 0x74, 0x05, 0x4a, 0x4c, 0x96, 0x59, 0x8d, 0x25,
+ 0x6c, 0x4a, 0x92, 0xb0, 0x26, 0x01, 0x38, 0xc6, 0xa1, 0xbb, 0x28, 0xec, 0x6c, 0x32, 0xf4, 0x84,
+ 0x7b, 0x64, 0x8d, 0x17, 0x63, 0x09, 0x47, 0x9f, 0x80, 0x29, 0x5e, 0x2f, 0xf0, 0xdb, 0xce, 0x36,
+ 0xd7, 0x7f, 0x0c, 0xaa, 0x00, 0x0a, 0x53, 0x6b, 0x09, 0xd8, 0xe1, 0xc1, 0xfc, 0xa9, 0x64, 0x19,
+ 0x53, 0xec, 0xa5, 0xa8, 0x30, 0x33, 0x27, 0xde, 0x08, 0xdd, 0xfd, 0x29, 0xeb, 0xa8, 0x18, 0x84,
+ 0x75, 0x3c, 0xfb, 0xb3, 0x80, 0xd2, 0x79, 0x89, 0xd0, 0xeb, 0xdc, 0xb6, 0xd5, 0x0d, 0x48, 0xa3,
+ 0x9b, 0xa2, 0x4f, 0x0f, 0x13, 0x20, 0x3d, 0x9b, 0x78, 0x2d, 0xac, 0xea, 0xdb, 0xff, 0x6f, 0x11,
+ 0xa6, 0x92, 0xbe, 0xdc, 0xe8, 0x3a, 0x0c, 0x71, 0xd6, 0x43, 0x90, 0xef, 0x62, 0x47, 0xa2, 0x79,
+ 0x80, 0xb3, 0x43, 0x58, 0x70, 0x2f, 0xa2, 0x3e, 0x7a, 0x13, 0x46, 0x1b, 0xfe, 0x3d, 0xef, 0x9e,
+ 0x13, 0x34, 0x16, 0xab, 0x15, 0xb1, 0x9c, 0x33, 0x1f, 0xb6, 0xe5, 0x18, 0x4d, 0xf7, 0x2a, 0x67,
+ 0x3a, 0xd3, 0x18, 0x84, 0x75, 0x72, 0x68, 0x83, 0x05, 0x33, 0xdf, 0x72, 0xb7, 0xd7, 0x9c, 0x76,
+ 0x37, 0x47, 0x87, 0x65, 0x89, 0xa4, 0x51, 0x1e, 0x17, 0x11, 0xcf, 0x39, 0x00, 0xc7, 0x84, 0xd0,
+ 0xe7, 0x61, 0x26, 0xcc, 0x11, 0xb7, 0xe7, 0xa5, 0xa9, 0xeb, 0x26, 0x81, 0x5e, 0x7a, 0xec, 0xc1,
+ 0xc1, 0xfc, 0x4c, 0x96, 0x60, 0x3e, 0xab, 0x19, 0xfb, 0xc7, 0x4e, 0x81, 0xb1, 0x89, 0x8d, 0xac,
+ 0xa5, 0xd6, 0x31, 0x65, 0x2d, 0xc5, 0x30, 0x42, 0x5a, 0xed, 0x68, 0xbf, 0xec, 0x06, 0xdd, 0x72,
+ 0x86, 0xaf, 0x08, 0x9c, 0x34, 0x4d, 0x09, 0xc1, 0x8a, 0x4e, 0x76, 0x6a, 0xd9, 0xe2, 0x37, 0x30,
+ 0xb5, 0xec, 0xc0, 0x09, 0xa6, 0x96, 0x5d, 0x87, 0xe1, 0x6d, 0x37, 0xc2, 0xa4, 0xed, 0x0b, 0xa6,
+ 0x3f, 0x73, 0x1d, 0x5e, 0xe3, 0x28, 0xe9, 0x24, 0x86, 0x02, 0x80, 0x25, 0x11, 0xf4, 0xba, 0xda,
+ 0x81, 0x43, 0xf9, 0x0f, 0xf3, 0xb4, 0xc1, 0x43, 0xe6, 0x1e, 0x14, 0x09, 0x64, 0x87, 0x1f, 0x36,
+ 0x81, 0xec, 0xaa, 0x4c, 0xfb, 0x3a, 0x92, 0xef, 0x95, 0xc4, 0xb2, 0xba, 0xf6, 0x48, 0xf6, 0x7a,
+ 0x47, 0x4f, 0x95, 0x5b, 0xca, 0x3f, 0x09, 0x54, 0x16, 0xdc, 0x3e, 0x13, 0xe4, 0x7e, 0x9f, 0x05,
+ 0xa7, 0xdb, 0x59, 0x59, 0xa3, 0x85, 0x6d, 0xc0, 0x4b, 0x7d, 0x27, 0xa6, 0x36, 0x1a, 0x64, 0x32,
+ 0xb5, 0x4c, 0x34, 0x9c, 0xdd, 0x1c, 0x1d, 0xe8, 0x60, 0xb3, 0x21, 0x74, 0xd4, 0x17, 0x73, 0x32,
+ 0xed, 0x76, 0xc9, 0xaf, 0xbb, 0x91, 0x91, 0xd5, 0xf5, 0xfd, 0x79, 0x59, 0x5d, 0xfb, 0xce, 0xe5,
+ 0xfa, 0xba, 0xca, 0xb1, 0x3b, 0x9e, 0xbf, 0x94, 0x78, 0x06, 0xdd, 0x9e, 0x99, 0x75, 0x5f, 0x57,
+ 0x99, 0x75, 0xbb, 0x44, 0xaa, 0xe5, 0x79, 0x73, 0x7b, 0xe6, 0xd3, 0xd5, 0x72, 0xe2, 0x4e, 0x1e,
+ 0x4f, 0x4e, 0x5c, 0xe3, 0xaa, 0xe1, 0x69, 0x59, 0x9f, 0xe9, 0x71, 0xd5, 0x18, 0x74, 0xbb, 0x5f,
+ 0x36, 0x3c, 0xff, 0xef, 0xf4, 0x43, 0xe5, 0xff, 0xbd, 0xa3, 0xe7, 0xd3, 0x45, 0x3d, 0x12, 0xc6,
+ 0x52, 0xa4, 0x3e, 0xb3, 0xe8, 0xde, 0xd1, 0x2f, 0xc0, 0x99, 0x7c, 0xba, 0xea, 0x9e, 0x4b, 0xd3,
+ 0xcd, 0xbc, 0x02, 0x53, 0xd9, 0x79, 0x4f, 0x9d, 0x4c, 0x76, 0xde, 0xd3, 0xc7, 0x9e, 0x9d, 0xf7,
+ 0xcc, 0x09, 0x64, 0xe7, 0x7d, 0xec, 0x04, 0xb3, 0xf3, 0xde, 0x61, 0x06, 0x35, 0x3c, 0x6c, 0x8f,
+ 0x88, 0xac, 0x9b, 0x1d, 0xc5, 0x35, 0x2b, 0xb6, 0x0f, 0xff, 0x38, 0x05, 0xc2, 0x31, 0xa9, 0x8c,
+ 0xac, 0xbf, 0xb3, 0x8f, 0x20, 0xeb, 0xef, 0x7a, 0x9c, 0xf5, 0xf7, 0x6c, 0xfe, 0x54, 0x67, 0xb8,
+ 0x60, 0xe4, 0xe4, 0xfa, 0xbd, 0xa3, 0xe7, 0xe8, 0x7d, 0xbc, 0x8b, 0xd6, 0x24, 0x4b, 0xf0, 0xd8,
+ 0x25, 0x33, 0xef, 0x6b, 0x3c, 0x33, 0xef, 0x13, 0xf9, 0x27, 0x79, 0xf2, 0xba, 0x33, 0xf2, 0xf1,
+ 0xd2, 0x7e, 0xa9, 0x18, 0x8e, 0x2c, 0x86, 0x70, 0x4e, 0xbf, 0x54, 0x10, 0xc8, 0x74, 0xbf, 0x14,
+ 0x08, 0xc7, 0xa4, 0xec, 0x1f, 0x28, 0xc0, 0xf9, 0xee, 0xfb, 0x2d, 0x96, 0xa6, 0x56, 0x63, 0x25,
+ 0x72, 0x42, 0x9a, 0xca, 0xdf, 0x6c, 0x31, 0x56, 0xdf, 0x21, 0xe9, 0xae, 0xc1, 0xb4, 0xf2, 0xdd,
+ 0x68, 0xba, 0xf5, 0xfd, 0xf5, 0xf8, 0xe5, 0xab, 0xfc, 0xdd, 0x6b, 0x49, 0x04, 0x9c, 0xae, 0x83,
+ 0x16, 0x61, 0xd2, 0x28, 0xac, 0x94, 0xc5, 0xdb, 0x4c, 0x89, 0x6f, 0x6b, 0x26, 0x18, 0x27, 0xf1,
+ 0xed, 0x2f, 0x5b, 0xf0, 0x58, 0x4e, 0x5a, 0xbb, 0xbe, 0x23, 0xae, 0x6d, 0xc1, 0x64, 0xdb, 0xac,
+ 0xda, 0x23, 0x48, 0xa4, 0x91, 0x3c, 0x4f, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0xf6, 0xcf, 0x14,
+ 0xe0, 0x5c, 0x57, 0x63, 0x44, 0x84, 0xe1, 0xcc, 0x76, 0x2b, 0x74, 0x96, 0x03, 0xd2, 0x20, 0x5e,
+ 0xe4, 0x3a, 0xcd, 0x5a, 0x9b, 0xd4, 0x35, 0x79, 0x38, 0xb3, 0xea, 0xbb, 0xb6, 0x56, 0x5b, 0x4c,
+ 0x63, 0xe0, 0x9c, 0x9a, 0x68, 0x15, 0x50, 0x1a, 0x22, 0x66, 0x98, 0x45, 0xa3, 0x4e, 0xd3, 0xc3,
+ 0x19, 0x35, 0xd0, 0x47, 0x60, 0x5c, 0x19, 0x39, 0x6a, 0x33, 0xce, 0x0e, 0x76, 0xac, 0x03, 0xb0,
+ 0x89, 0x87, 0xae, 0xf2, 0x70, 0xe6, 0x22, 0xf0, 0xbd, 0x10, 0x9e, 0x4f, 0xca, 0x58, 0xe5, 0xa2,
+ 0x18, 0xeb, 0x38, 0x4b, 0x97, 0x7f, 0xe3, 0x0f, 0xce, 0xbf, 0xef, 0xb7, 0xfe, 0xe0, 0xfc, 0xfb,
+ 0x7e, 0xf7, 0x0f, 0xce, 0xbf, 0xef, 0xbb, 0x1e, 0x9c, 0xb7, 0x7e, 0xe3, 0xc1, 0x79, 0xeb, 0xb7,
+ 0x1e, 0x9c, 0xb7, 0x7e, 0xf7, 0xc1, 0x79, 0xeb, 0xf7, 0x1f, 0x9c, 0xb7, 0xbe, 0xf4, 0x87, 0xe7,
+ 0xdf, 0xf7, 0xa9, 0xc2, 0xde, 0xd5, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x11, 0xbd, 0x2b, 0x2b,
+ 0x30, 0x04, 0x01, 0x00,
}
func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) {
@@ -9923,53 +9894,6 @@ func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error
return len(dAtA) - i, nil
}
-func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.EphemeralContainers) > 0 {
- for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- {
- size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintGenerated(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
func (m *EphemeralVolumeSource) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@@ -13230,6 +13154,18 @@ func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, erro
_ = i
var l int
_ = l
+ if m.DataSourceRef != nil {
+ {
+ size, err := m.DataSourceRef.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
if m.DataSource != nil {
{
size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i])
@@ -18111,17 +18047,6 @@ func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i--
dAtA[i] = 0x8a
}
- if len(m.TopologyKeys) > 0 {
- for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.TopologyKeys[iNdEx])
- copy(dAtA[i:], m.TopologyKeys[iNdEx])
- i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx])))
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x82
- }
- }
if m.SessionAffinityConfig != nil {
{
size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i])
@@ -19496,6 +19421,16 @@ func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int,
_ = i
var l int
_ = l
+ if m.HostProcess != nil {
+ i--
+ if *m.HostProcess {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
if m.RunAsUserName != nil {
i -= len(*m.RunAsUserName)
copy(dAtA[i:], *m.RunAsUserName)
@@ -20608,23 +20543,6 @@ func (m *EphemeralContainerCommon) Size() (n int) {
return n
}
-func (m *EphemeralContainers) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ObjectMeta.Size()
- n += 1 + l + sovGenerated(uint64(l))
- if len(m.EphemeralContainers) > 0 {
- for _, e := range m.EphemeralContainers {
- l = e.Size()
- n += 1 + l + sovGenerated(uint64(l))
- }
- }
- return n
-}
-
func (m *EphemeralVolumeSource) Size() (n int) {
if m == nil {
return 0
@@ -21838,6 +21756,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) {
l = m.DataSource.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.DataSourceRef != nil {
+ l = m.DataSourceRef.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
return n
}
@@ -23602,12 +23524,6 @@ func (m *ServiceSpec) Size() (n int) {
l = m.SessionAffinityConfig.Size()
n += 1 + l + sovGenerated(uint64(l))
}
- if len(m.TopologyKeys) > 0 {
- for _, s := range m.TopologyKeys {
- l = len(s)
- n += 2 + l + sovGenerated(uint64(l))
- }
- }
if m.IPFamilyPolicy != nil {
l = len(*m.IPFamilyPolicy)
n += 2 + l + sovGenerated(uint64(l))
@@ -24098,6 +24014,9 @@ func (m *WindowsSecurityContextOptions) Size() (n int) {
l = len(*m.RunAsUserName)
n += 1 + l + sovGenerated(uint64(l))
}
+ if m.HostProcess != nil {
+ n += 2
+ }
return n
}
@@ -24906,22 +24825,6 @@ func (this *EphemeralContainerCommon) String() string {
}, "")
return s
}
-func (this *EphemeralContainers) String() string {
- if this == nil {
- return "nil"
- }
- repeatedStringForEphemeralContainers := "[]EphemeralContainer{"
- for _, f := range this.EphemeralContainers {
- repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + ","
- }
- repeatedStringForEphemeralContainers += "}"
- s := strings.Join([]string{`&EphemeralContainers{`,
- `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
- `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`,
- `}`,
- }, "")
- return s
-}
func (this *EphemeralVolumeSource) String() string {
if this == nil {
return "nil"
@@ -25885,6 +25788,7 @@ func (this *PersistentVolumeClaimSpec) String() string {
`StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`,
`VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`,
`DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`,
+ `DataSourceRef:` + strings.Replace(this.DataSourceRef.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`,
`}`,
}, "")
return s
@@ -27224,7 +27128,6 @@ func (this *ServiceSpec) String() string {
`HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`,
`PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`,
`SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`,
- `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`,
`IPFamilyPolicy:` + valueToStringGenerated(this.IPFamilyPolicy) + `,`,
`ClusterIPs:` + fmt.Sprintf("%v", this.ClusterIPs) + `,`,
`IPFamilies:` + fmt.Sprintf("%v", this.IPFamilies) + `,`,
@@ -27519,6 +27422,7 @@ func (this *WindowsSecurityContextOptions) String() string {
`GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`,
`GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`,
`RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`,
+ `HostProcess:` + valueToStringGenerated(this.HostProcess) + `,`,
`}`,
}, "")
return s
@@ -36642,123 +36546,6 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error {
}
return nil
}
-func (m *EphemeralContainers) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{})
- if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipGenerated(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if (skippy < 0) || (iNdEx+skippy) < 0 {
- return ErrInvalidLengthGenerated
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
func (m *EphemeralVolumeSource) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@@ -47641,6 +47428,42 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DataSourceRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DataSourceRef == nil {
+ m.DataSourceRef = &TypedLocalObjectReference{}
+ }
+ if err := m.DataSourceRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
@@ -63111,38 +62934,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
- case 16:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowGenerated
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthGenerated
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthGenerated
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
case 17:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IPFamilyPolicy", wireType)
@@ -67320,6 +67111,27 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error {
s := string(dAtA[iNdEx:postIndex])
m.RunAsUserName = &s
iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HostProcess", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.HostProcess = &b
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto
index 152ea29fd..ff63fd29f 100644
--- a/vendor/k8s.io/api/core/v1/generated.proto
+++ b/vendor/k8s.io/api/core/v1/generated.proto
@@ -498,6 +498,7 @@ message ConfigMapEnvSource {
}
// Selects a key from a ConfigMap.
+// +structType=atomic
message ConfigMapKeySelector {
// The ConfigMap to select from.
optional LocalObjectReference localObjectReference = 1;
@@ -521,6 +522,7 @@ message ConfigMapList {
}
// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
message ConfigMapNodeConfigSource {
// Namespace is the metadata.namespace of the referenced ConfigMap.
// This field is required in all cases.
@@ -621,10 +623,10 @@ message Container {
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
repeated string command = 3;
@@ -632,10 +634,10 @@ message Container {
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
repeated string args = 4;
@@ -754,8 +756,8 @@ message Container {
// +optional
optional string imagePullPolicy = 14;
- // Security options the pod should run with.
- // More info: https://kubernetes.io/docs/concepts/policy/security-context/
+ // SecurityContext defines the security options the container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
optional SecurityContext securityContext = 15;
@@ -786,6 +788,7 @@ message Container {
message ContainerImage {
// Names by which this image is known.
// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
+ // +optional
repeated string names = 1;
// The size of the image in bytes.
@@ -1010,6 +1013,7 @@ message EmptyDirVolumeSource {
}
// EndpointAddress is a tuple that describes single IP address.
+// +structType=atomic
message EndpointAddress {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
@@ -1033,6 +1037,7 @@ message EndpointAddress {
}
// EndpointPort is a tuple that describes a single port.
+// +structType=atomic
message EndpointPort {
// The name of this port. This must match the 'name' field in the
// corresponding ServicePort.
@@ -1056,8 +1061,6 @@ message EndpointPort {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
- // This is a beta field that is guarded by the ServiceAppProtocol feature
- // gate and enabled by default.
// +optional
optional string appProtocol = 4;
}
@@ -1150,11 +1153,12 @@ message EnvVar {
optional string name = 1;
// Variable references $(VAR_NAME) are expanded
- // using the previous defined environment variables in the container and
+ // using the previously defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
- // the reference in the input string will be unchanged. The $(VAR_NAME)
- // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
- // references will never be expanded, regardless of whether the variable
+ // the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ // Escaped references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
@@ -1226,10 +1230,10 @@ message EphemeralContainerCommon {
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
repeated string command = 3;
@@ -1237,10 +1241,10 @@ message EphemeralContainerCommon {
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
repeated string args = 4;
@@ -1333,7 +1337,8 @@ message EphemeralContainerCommon {
// +optional
optional string imagePullPolicy = 14;
- // SecurityContext is not allowed for ephemeral containers.
+ // Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// +optional
optional SecurityContext securityContext = 15;
@@ -1359,19 +1364,6 @@ message EphemeralContainerCommon {
optional bool tty = 18;
}
-// A list of ephemeral containers used with the Pod ephemeralcontainers subresource.
-message EphemeralContainers {
- // +optional
- optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
-
- // A list of ephemeral containers associated with this pod. New ephemeral containers
- // may be appended to this list, but existing ephemeral containers may not be removed
- // or modified.
- // +patchMergeKey=name
- // +patchStrategy=merge
- repeated EphemeralContainer ephemeralContainers = 2;
-}
-
// Represents an ephemeral volume that is handled by a normal storage driver.
message EphemeralVolumeSource {
// Will be used to create a stand-alone PVC to provision the volume.
@@ -2049,6 +2041,7 @@ message LoadBalancerStatus {
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
+// +structType=atomic
message LocalObjectReference {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -2240,6 +2233,7 @@ message NodeCondition {
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+// This API is deprecated since 1.22
message NodeConfigSource {
// ConfigMap is a reference to a Node's ConfigMap
optional ConfigMapNodeConfigSource configMap = 2;
@@ -2330,6 +2324,7 @@ message NodeResources {
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
+// +structType=atomic
message NodeSelector {
// Required. A list of node selector terms. The terms are ORed.
repeated NodeSelectorTerm nodeSelectorTerms = 1;
@@ -2357,6 +2352,7 @@ message NodeSelectorRequirement {
// A null or empty node selector term matches no objects. The requirements of
// them are ANDed.
// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+// +structType=atomic
message NodeSelectorTerm {
// A list of node selector requirements by node's labels.
// +optional
@@ -2393,8 +2389,9 @@ message NodeSpec {
// +optional
repeated Taint taints = 5;
- // If specified, the source to get node configuration from
- // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
+ // Deprecated. If specified, the source of the node's configuration.
+ // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field.
+ // This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
// +optional
optional NodeConfigSource configSource = 6;
@@ -2504,6 +2501,7 @@ message NodeSystemInfo {
}
// ObjectFieldSelector selects an APIVersioned field of an object.
+// +structType=atomic
message ObjectFieldSelector {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
@@ -2529,6 +2527,7 @@ message ObjectFieldSelector {
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +structType=atomic
message ObjectReference {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
@@ -2684,13 +2683,32 @@ message PersistentVolumeClaimSpec {
// This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing PVC (PersistentVolumeClaim)
- // * An existing custom resource that implements data population (Alpha)
- // In order to use custom resource types that implement data population,
- // the AnyVolumeDataSource feature gate must be enabled.
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
+ // If the AnyVolumeDataSource feature gate is enabled, this field will always have
+ // the same contents as the DataSourceRef field.
// +optional
optional TypedLocalObjectReference dataSource = 7;
+
+ // Specifies the object from which to populate the volume with data, if a non-empty
+ // volume is desired. This may be any local object from a non-empty API group (non
+ // core object) or a PersistentVolumeClaim object.
+ // When this field is specified, volume binding will only succeed if the type of
+ // the specified object matches some installed volume populator or dynamic
+ // provisioner.
+ // This field will replace the functionality of the DataSource field and as such
+ // if both fields are non-empty, they must have the same value. For backwards
+ // compatibility, both fields (DataSource and DataSourceRef) will be set to the same
+ // value automatically if one of them is empty and the other is non-empty.
+ // There are two important differences between DataSource and DataSourceRef:
+ // * While DataSource only allows two specific types of objects, DataSourceRef
+ // allows any non-core object, as well as PersistentVolumeClaim objects.
+ // * While DataSource ignores disallowed values (dropping them), DataSourceRef
+ // preserves all values, and generates an error if a disallowed value is
+ // specified.
+ // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ // +optional
+ optional TypedLocalObjectReference dataSourceRef = 8;
}
// PersistentVolumeClaimStatus is the current status of a persistent volume claim.
@@ -3024,7 +3042,7 @@ message PodAffinityTerm {
// and the ones listed in the namespaces field.
// null selector and null or empty namespaces list means "this pod's namespace".
// An empty selector ({}) matches all namespaces.
- // This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 4;
}
@@ -3450,6 +3468,7 @@ message PodSpec {
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
+ // +mapType=atomic
map nodeSelector = 7;
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
@@ -3571,7 +3590,7 @@ message PodSpec {
// If specified, all readiness gates will be evaluated for pod readiness.
// A pod is ready when all its containers are ready AND
// all conditions specified in the readiness gates have status equal to "True"
- // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
+ // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
// +optional
repeated PodReadinessGate readinessGates = 28;
@@ -3579,7 +3598,7 @@ message PodSpec {
// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
// empty definition that uses the default runtime handler.
- // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
// This is a beta feature as of Kubernetes v1.14.
// +optional
optional string runtimeClassName = 29;
@@ -3603,8 +3622,8 @@ message PodSpec {
// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
- // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
- // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
// +optional
map overhead = 32;
@@ -3893,7 +3912,8 @@ message Probe {
// value overrides the value provided by the pod spec.
// Value must be non-negative integer. The value zero indicates stop immediately via
// the kill signal (no opportunity to shut down).
- // This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.
+ // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
// +optional
optional int64 terminationGracePeriodSeconds = 7;
}
@@ -4138,6 +4158,7 @@ message ReplicationControllerSpec {
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
+ // +mapType=atomic
map selector = 2;
// Template is the object that describes the pod that will be created if
@@ -4178,6 +4199,7 @@ message ReplicationControllerStatus {
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+// +structType=atomic
message ResourceFieldSelector {
// Container name: required for volumes, optional for env vars
// +optional
@@ -4380,6 +4402,7 @@ message ScaleIOVolumeSource {
// A scope selector represents the AND of the selectors represented
// by the scoped-resource selector requirements.
+// +structType=atomic
message ScopeSelector {
// A list of scope selector requirements by scope of the resources.
// +optional
@@ -4475,6 +4498,7 @@ message SecretEnvSource {
}
// SecretKeySelector selects a key of a Secret.
+// +structType=atomic
message SecretKeySelector {
// The name of the secret in the pod's namespace to select from.
optional LocalObjectReference localObjectReference = 1;
@@ -4525,6 +4549,7 @@ message SecretProjection {
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
+// +structType=atomic
message SecretReference {
// Name is unique within a namespace to reference a secret resource.
// +optional
@@ -4780,8 +4805,6 @@ message ServicePort {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
- // This is a beta field that is guarded by the ServiceAppProtocol feature
- // gate and enabled by default.
// +optional
optional string appProtocol = 6;
@@ -4841,6 +4864,7 @@ message ServiceSpec {
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
+ // +mapType=atomic
map selector = 2;
// clusterIP is the IP address of the service and is usually assigned
@@ -4937,7 +4961,7 @@ message ServiceSpec {
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
- // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
+ // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
// +optional
repeated string loadBalancerSourceRanges = 9;
@@ -4984,23 +5008,6 @@ message ServiceSpec {
// +optional
optional SessionAffinityConfig sessionAffinityConfig = 14;
- // topologyKeys is a preference-order list of topology keys which
- // implementations of services should use to preferentially sort endpoints
- // when accessing this Service, it can not be used at the same time as
- // externalTrafficPolicy=Local.
- // Topology keys must be valid label keys and at most 16 keys may be specified.
- // Endpoints are chosen based on the first topology key with available backends.
- // If this field is specified and all entries have no backends that match
- // the topology of the client, the service has no backends for that client
- // and connections should fail.
- // The special value "*" may be used to mean "any topology". This catch-all
- // value, if used, only makes sense as the last value in the list.
- // If this is not specified or empty, no topology constraints will be applied.
- // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.
- // This field is deprecated and will be removed in a future version.
- // +optional
- repeated string topologyKeys = 16;
-
// IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this
// service, and is gated by the "IPv6DualStack" feature gate. This field
// is usually assigned automatically based on cluster configuration and the
@@ -5035,11 +5042,14 @@ message ServiceSpec {
optional string ipFamilyPolicy = 17;
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
- // allocated for services with type LoadBalancer. Default is "true". It may be
- // set to "false" if the cluster load-balancer does not rely on NodePorts.
- // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer
- // and will be cleared if the type is changed to any other type.
- // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
+ // allocated for services with type LoadBalancer. Default is "true". It
+ // may be set to "false" if the cluster load-balancer does not rely on
+ // NodePorts. If the caller requests specific NodePorts (by specifying a
+ // value), those requests will be respected, regardless of this field.
+ // This field may only be set for services with type LoadBalancer and will
+ // be cleared if the type is changed to any other type.
+ // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
+ // +featureGate=ServiceLBNodePortControl
// +optional
optional bool allocateLoadBalancerNodePorts = 20;
@@ -5246,6 +5256,7 @@ message TopologySelectorLabelRequirement {
// The requirements of them are ANDed.
// It provides a subset of functionality as NodeSelectorTerm.
// This is an alpha feature and may change in the future.
+// +structType=atomic
message TopologySelectorTerm {
// A list of topology selector requirements by labels.
// +optional
@@ -5312,6 +5323,7 @@ message TopologySpreadConstraint {
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
+// +structType=atomic
message TypedLocalObjectReference {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
@@ -5625,5 +5637,15 @@ message WindowsSecurityContextOptions {
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
optional string runAsUserName = 3;
+
+ // HostProcess determines if a container should be run as a 'Host Process' container.
+ // This field is alpha-level and will only be honored by components that enable the
+ // WindowsHostProcessContainers feature flag. Setting this field without the feature
+ // flag will result in errors when validating the Pod. All of a Pod's containers must
+ // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
+ // containers and non-HostProcess containers). In addition, if HostProcess is true
+ // then HostNetwork must also be set to true.
+ // +optional
+ optional bool hostProcess = 4;
}
diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go
index 8da1ddead..1aac0cb41 100644
--- a/vendor/k8s.io/api/core/v1/register.go
+++ b/vendor/k8s.io/api/core/v1/register.go
@@ -88,7 +88,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&RangeAllocation{},
&ConfigMap{},
&ConfigMapList{},
- &EphemeralContainers{},
)
// Add common types
diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go
index 3eadb4567..027d73edd 100644
--- a/vendor/k8s.io/api/core/v1/types.go
+++ b/vendor/k8s.io/api/core/v1/types.go
@@ -30,8 +30,6 @@ const (
NamespaceAll string = ""
// NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats)
NamespaceNodeLease string = "kube-node-lease"
- // TopologyKeyAny is the service topology key that matches any node
- TopologyKeyAny string = "*"
)
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
@@ -494,13 +492,31 @@ type PersistentVolumeClaimSpec struct {
// This field can be used to specify either:
// * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
// * An existing PVC (PersistentVolumeClaim)
- // * An existing custom resource that implements data population (Alpha)
- // In order to use custom resource types that implement data population,
- // the AnyVolumeDataSource feature gate must be enabled.
// If the provisioner or an external controller can support the specified data source,
// it will create a new volume based on the contents of the specified data source.
+ // If the AnyVolumeDataSource feature gate is enabled, this field will always have
+ // the same contents as the DataSourceRef field.
// +optional
DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"`
+ // Specifies the object from which to populate the volume with data, if a non-empty
+ // volume is desired. This may be any local object from a non-empty API group (non
+ // core object) or a PersistentVolumeClaim object.
+ // When this field is specified, volume binding will only succeed if the type of
+ // the specified object matches some installed volume populator or dynamic
+ // provisioner.
+ // This field will replace the functionality of the DataSource field and as such
+ // if both fields are non-empty, they must have the same value. For backwards
+ // compatibility, both fields (DataSource and DataSourceRef) will be set to the same
+ // value automatically if one of them is empty and the other is non-empty.
+ // There are two important differences between DataSource and DataSourceRef:
+ // * While DataSource only allows two specific types of objects, DataSourceRef
+ // allows any non-core object, as well as PersistentVolumeClaim objects.
+ // * While DataSource ignores disallowed values (dropping them), DataSourceRef
+ // preserves all values, and generates an error if a disallowed value is
+ // specified.
+ // (Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.
+ // +optional
+ DataSourceRef *TypedLocalObjectReference `json:"dataSourceRef,omitempty" protobuf:"bytes,8,opt,name=dataSourceRef"`
}
// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type
@@ -562,6 +578,9 @@ const (
ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany"
// can be mounted in read/write mode to many hosts
ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany"
+ // can be mounted in read/write mode to exactly 1 pod
+ // cannot be used in combination with other access modes
+ ReadWriteOncePod PersistentVolumeAccessMode = "ReadWriteOncePod"
)
type PersistentVolumePhase string
@@ -861,6 +880,7 @@ type CephFSVolumeSource struct {
// SecretReference represents a Secret Reference. It has enough information to retrieve secret
// in any namespace
+// +structType=atomic
type SecretReference struct {
// Name is unique within a namespace to reference a secret resource.
// +optional
@@ -1915,11 +1935,12 @@ type EnvVar struct {
// Optional: no more than one of the following may be specified.
// Variable references $(VAR_NAME) are expanded
- // using the previous defined environment variables in the container and
+ // using the previously defined environment variables in the container and
// any service environment variables. If a variable cannot be resolved,
- // the reference in the input string will be unchanged. The $(VAR_NAME)
- // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
- // references will never be expanded, regardless of whether the variable
+ // the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e.
+ // "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)".
+ // Escaped references will never be expanded, regardless of whether the variable
// exists or not.
// Defaults to "".
// +optional
@@ -1948,6 +1969,7 @@ type EnvVarSource struct {
}
// ObjectFieldSelector selects an APIVersioned field of an object.
+// +structType=atomic
type ObjectFieldSelector struct {
// Version of the schema the FieldPath is written in terms of, defaults to "v1".
// +optional
@@ -1957,6 +1979,7 @@ type ObjectFieldSelector struct {
}
// ResourceFieldSelector represents container resources (cpu, memory) and their output format
+// +structType=atomic
type ResourceFieldSelector struct {
// Container name: required for volumes, optional for env vars
// +optional
@@ -1969,6 +1992,7 @@ type ResourceFieldSelector struct {
}
// Selects a key from a ConfigMap.
+// +structType=atomic
type ConfigMapKeySelector struct {
// The ConfigMap to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
@@ -1980,6 +2004,7 @@ type ConfigMapKeySelector struct {
}
// SecretKeySelector selects a key of a Secret.
+// +structType=atomic
type SecretKeySelector struct {
// The name of the secret in the pod's namespace to select from.
LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"`
@@ -2125,7 +2150,8 @@ type Probe struct {
// value overrides the value provided by the pod spec.
// Value must be non-negative integer. The value zero indicates stop immediately via
// the kill signal (no opportunity to shut down).
- // This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.
+ // This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate.
+ // Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.
// +optional
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,7,opt,name=terminationGracePeriodSeconds"`
}
@@ -2212,20 +2238,20 @@ type Container struct {
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
@@ -2329,8 +2355,8 @@ type Container struct {
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
- // Security options the pod should run with.
- // More info: https://kubernetes.io/docs/concepts/policy/security-context/
+ // SecurityContext defines the security options the container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
@@ -2522,6 +2548,7 @@ const (
PodFailed PodPhase = "Failed"
// PodUnknown means that for some reason the state of the pod could not be obtained, typically due
// to an error in communicating with the host of the pod.
+ // Deprecated: It isn't being set since 2015 (74da3b14b0c0f658b3bb8d2def5094686d0e9095)
PodUnknown PodPhase = "Unknown"
)
@@ -2616,6 +2643,7 @@ const (
// A node selector represents the union of the results of one or more label queries
// over a set of nodes; that is, it represents the OR of the selectors represented
// by the node selector terms.
+// +structType=atomic
type NodeSelector struct {
//Required. A list of node selector terms. The terms are ORed.
NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"`
@@ -2624,6 +2652,7 @@ type NodeSelector struct {
// A null or empty node selector term matches no objects. The requirements of
// them are ANDed.
// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
+// +structType=atomic
type NodeSelectorTerm struct {
// A list of node selector requirements by node's labels.
// +optional
@@ -2668,7 +2697,10 @@ const (
// The requirements of them are ANDed.
// It provides a subset of functionality as NodeSelectorTerm.
// This is an alpha feature and may change in the future.
+// +structType=atomic
type TopologySelectorTerm struct {
+ // Usage: Fields of type []TopologySelectorTerm must be listType=atomic.
+
// A list of topology selector requirements by labels.
// +optional
MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"`
@@ -2803,7 +2835,7 @@ type PodAffinityTerm struct {
// and the ones listed in the namespaces field.
// null selector and null or empty namespaces list means "this pod's namespace".
// An empty selector ({}) matches all namespaces.
- // This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
+ // This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,4,opt,name=namespaceSelector"`
}
@@ -3005,6 +3037,7 @@ type PodSpec struct {
// Selector which must match a node's labels for the pod to be scheduled on that node.
// More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
// +optional
+ // +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"`
// ServiceAccountName is the name of the ServiceAccount to use to run this pod.
@@ -3108,14 +3141,14 @@ type PodSpec struct {
// If specified, all readiness gates will be evaluated for pod readiness.
// A pod is ready when all its containers are ready AND
// all conditions specified in the readiness gates have status equal to "True"
- // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md
+ // More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates
// +optional
ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"`
// RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used
// to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run.
// If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an
// empty definition that uses the default runtime handler.
- // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
// This is a beta feature as of Kubernetes v1.14.
// +optional
RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
@@ -3136,8 +3169,8 @@ type PodSpec struct {
// The RuntimeClass admission controller will reject Pod create requests which have the overhead already
// set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value
// defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero.
- // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
- // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.
+ // More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
// +optional
Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"`
// TopologySpreadConstraints describes how a group of pods ought to spread across topology
@@ -3423,20 +3456,20 @@ type EphemeralContainerCommon struct {
// Entrypoint array. Not executed within a shell.
// The docker image's ENTRYPOINT is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"`
// Arguments to the entrypoint.
// The docker image's CMD is used if this is not provided.
// Variable references $(VAR_NAME) are expanded using the container's environment. If a variable
- // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax
- // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
- // regardless of whether the variable exists or not.
- // Cannot be updated.
+ // cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced
+ // to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will
+ // produce the string literal "$(VAR_NAME)". Escaped references will never be expanded, regardless
+ // of whether the variable exists or not. Cannot be updated.
// More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
// +optional
Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"`
@@ -3514,7 +3547,8 @@ type EphemeralContainerCommon struct {
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"`
- // SecurityContext is not allowed for ephemeral containers.
+ // Optional: SecurityContext defines the security options the ephemeral container should be run with.
+ // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.
// +optional
SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"`
@@ -3678,8 +3712,7 @@ type PodStatusResult struct {
}
// +genclient
-// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers
-// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers
+// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Pod is a collection of containers that can run on a host. This resource is created
@@ -3785,6 +3818,7 @@ type ReplicationControllerSpec struct {
// controller, if empty defaulted to labels on Pod template.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
// +optional
+ // +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// TemplateRef is a reference to an object that describes the pod that will be created if
@@ -4026,11 +4060,6 @@ type LoadBalancerIngress struct {
Ports []PortStatus `json:"ports,omitempty" protobuf:"bytes,4,rep,name=ports"`
}
-const (
- // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service
- MaxServiceTopologyKeys = 16
-)
-
// IPFamily represents the IP Family (IPv4 or IPv6). This type is used
// to express the family of an IP expressed by a type (e.g. service.spec.ipFamilies).
type IPFamily string
@@ -4083,6 +4112,7 @@ type ServiceSpec struct {
// Ignored if type is ExternalName.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/
// +optional
+ // +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// clusterIP is the IP address of the service and is usually assigned
@@ -4179,7 +4209,7 @@ type ServiceSpec struct {
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
- // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
+ // More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"`
@@ -4226,22 +4256,8 @@ type ServiceSpec struct {
// +optional
SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"`
- // topologyKeys is a preference-order list of topology keys which
- // implementations of services should use to preferentially sort endpoints
- // when accessing this Service, it can not be used at the same time as
- // externalTrafficPolicy=Local.
- // Topology keys must be valid label keys and at most 16 keys may be specified.
- // Endpoints are chosen based on the first topology key with available backends.
- // If this field is specified and all entries have no backends that match
- // the topology of the client, the service has no backends for that client
- // and connections should fail.
- // The special value "*" may be used to mean "any topology". This catch-all
- // value, if used, only makes sense as the last value in the list.
- // If this is not specified or empty, no topology constraints will be applied.
- // This field is alpha-level and is only honored by servers that enable the ServiceTopology feature.
- // This field is deprecated and will be removed in a future version.
- // +optional
- TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
+ // TopologyKeys is tombstoned to show why 16 is reserved protobuf tag.
+ //TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"`
// IPFamily is tombstoned to show why 15 is a reserved protobuf tag.
// IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"`
@@ -4280,11 +4296,14 @@ type ServiceSpec struct {
IPFamilyPolicy *IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty" protobuf:"bytes,17,opt,name=ipFamilyPolicy,casttype=IPFamilyPolicyType"`
// allocateLoadBalancerNodePorts defines if NodePorts will be automatically
- // allocated for services with type LoadBalancer. Default is "true". It may be
- // set to "false" if the cluster load-balancer does not rely on NodePorts.
- // allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer
- // and will be cleared if the type is changed to any other type.
- // This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
+ // allocated for services with type LoadBalancer. Default is "true". It
+ // may be set to "false" if the cluster load-balancer does not rely on
+ // NodePorts. If the caller requests specific NodePorts (by specifying a
+ // value), those requests will be respected, regardless of this field.
+ // This field may only be set for services with type LoadBalancer and will
+ // be cleared if the type is changed to any other type.
+ // This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.
+ // +featureGate=ServiceLBNodePortControl
// +optional
AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty" protobuf:"bytes,20,opt,name=allocateLoadBalancerNodePorts"`
@@ -4335,8 +4354,6 @@ type ServicePort struct {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
- // This is a beta field that is guarded by the ServiceAppProtocol feature
- // gate and enabled by default.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"`
@@ -4523,6 +4540,7 @@ type EndpointSubset struct {
}
// EndpointAddress is a tuple that describes single IP address.
+// +structType=atomic
type EndpointAddress struct {
// The IP of this endpoint.
// May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16),
@@ -4543,6 +4561,7 @@ type EndpointAddress struct {
}
// EndpointPort is a tuple that describes a single port.
+// +structType=atomic
type EndpointPort struct {
// The name of this port. This must match the 'name' field in the
// corresponding ServicePort.
@@ -4566,8 +4585,6 @@ type EndpointPort struct {
// RFC-6335 and http://www.iana.org/assignments/service-names).
// Non-standard protocols should use prefixed names such as
// mycompany.com/my-custom-protocol.
- // This is a beta field that is guarded by the ServiceAppProtocol feature
- // gate and enabled by default.
// +optional
AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"`
}
@@ -4609,8 +4626,10 @@ type NodeSpec struct {
// If specified, the node's taints.
// +optional
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
- // If specified, the source to get node configuration from
- // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
+
+ // Deprecated. If specified, the source of the node's configuration.
+ // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field.
+ // This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
// +optional
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
@@ -4621,6 +4640,7 @@ type NodeSpec struct {
}
// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.
+// This API is deprecated since 1.22
type NodeConfigSource struct {
// For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags:
// 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags)
@@ -4638,6 +4658,7 @@ type NodeConfigSource struct {
}
// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.
+// This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
type ConfigMapNodeConfigSource struct {
// Namespace is the metadata.namespace of the referenced ConfigMap.
// This field is required in all cases.
@@ -4856,6 +4877,7 @@ type PodSignature struct {
type ContainerImage struct {
// Names by which this image is known.
// e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]
+ // +optional
Names []string `json:"names" protobuf:"bytes,1,rep,name=names"`
// The size of the image in bytes.
// +optional
@@ -4916,11 +4938,44 @@ type NodeAddressType string
// These are valid address type of node.
const (
- NodeHostName NodeAddressType = "Hostname"
- NodeExternalIP NodeAddressType = "ExternalIP"
- NodeInternalIP NodeAddressType = "InternalIP"
- NodeExternalDNS NodeAddressType = "ExternalDNS"
+ // NodeHostName identifies a name of the node. Although every node can be assumed
+ // to have a NodeAddress of this type, its exact syntax and semantics are not
+ // defined, and are not consistent between different clusters.
+ NodeHostName NodeAddressType = "Hostname"
+
+ // NodeInternalIP identifies an IP address which is assigned to one of the node's
+ // network interfaces. Every node should have at least one address of this type.
+ //
+ // An internal IP is normally expected to be reachable from every other node, but
+ // may not be visible to hosts outside the cluster. By default it is assumed that
+ // kube-apiserver can reach node internal IPs, though it is possible to configure
+ // clusters where this is not the case.
+ //
+ // NodeInternalIP is the default type of node IP, and does not necessarily imply
+ // that the IP is ONLY reachable internally. If a node has multiple internal IPs,
+ // no specific semantics are assigned to the additional IPs.
+ NodeInternalIP NodeAddressType = "InternalIP"
+
+ // NodeExternalIP identifies an IP address which is, in some way, intended to be
+ // more usable from outside the cluster then an internal IP, though no specific
+ // semantics are defined. It may be a globally routable IP, though it is not
+ // required to be.
+ //
+ // External IPs may be assigned directly to an interface on the node, like a
+ // NodeInternalIP, or alternatively, packets sent to the external IP may be NAT'ed
+ // to an internal node IP rather than being delivered directly (making the IP less
+ // efficient for node-to-node traffic than a NodeInternalIP).
+ NodeExternalIP NodeAddressType = "ExternalIP"
+
+ // NodeInternalDNS identifies a DNS name which resolves to an IP address which has
+ // the characteristics of a NodeInternalIP. The IP it resolves to may or may not
+ // be a listed NodeInternalIP address.
NodeInternalDNS NodeAddressType = "InternalDNS"
+
+ // NodeExternalDNS identifies a DNS name which resolves to an IP address which has
+ // the characteristics of a NodeExternalIP. The IP it resolves to may or may not
+ // be a listed NodeExternalIP address.
+ NodeExternalDNS NodeAddressType = "ExternalDNS"
)
// NodeAddress contains information for the node's address.
@@ -5135,22 +5190,6 @@ type Binding struct {
Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"`
}
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-
-// A list of ephemeral containers used with the Pod ephemeralcontainers subresource.
-type EphemeralContainers struct {
- metav1.TypeMeta `json:",inline"`
- // +optional
- metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
-
- // A list of ephemeral containers associated with this pod. New ephemeral containers
- // may be appended to this list, but existing ephemeral containers may not be removed
- // or modified.
- // +patchMergeKey=name
- // +patchStrategy=merge
- EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"`
-}
-
// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.
// +k8s:openapi-gen=false
type Preconditions struct {
@@ -5362,6 +5401,7 @@ type ServiceProxyOptions struct {
// Instead of using this type, create a locally provided and used type that is well-focused on your reference.
// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +structType=atomic
type ObjectReference struct {
// Kind of the referent.
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
@@ -5401,6 +5441,7 @@ type ObjectReference struct {
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
+// +structType=atomic
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
@@ -5411,6 +5452,7 @@ type LocalObjectReference struct {
// TypedLocalObjectReference contains enough information to let you locate the
// typed referenced object inside the same namespace.
+// +structType=atomic
type TypedLocalObjectReference struct {
// APIGroup is the group for the resource being referenced.
// If APIGroup is not specified, the specified Kind must be in the core API group.
@@ -5686,7 +5728,7 @@ const (
// Match all pod objects that have priority class mentioned
ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass"
// Match all pod objects that have cross-namespace pod (anti)affinity mentioned.
- // This is an alpha feature enabled by the PodAffinityNamespaceSelector feature flag.
+ // This is a beta feature enabled by the PodAffinityNamespaceSelector feature flag.
ResourceQuotaScopeCrossNamespacePodAffinity ResourceQuotaScope = "CrossNamespacePodAffinity"
)
@@ -5709,6 +5751,7 @@ type ResourceQuotaSpec struct {
// A scope selector represents the AND of the selectors represented
// by the scoped-resource selector requirements.
+// +structType=atomic
type ScopeSelector struct {
// A list of scope selector requirements by scope of the resources.
// +optional
@@ -5906,7 +5949,7 @@ const (
// TODO: Consider supporting different formats, specifying CA/destinationCA.
SecretTypeTLS SecretType = "kubernetes.io/tls"
- // TLSCertKey is the key for tls certificates in a TLS secert.
+ // TLSCertKey is the key for tls certificates in a TLS secret.
TLSCertKey = "tls.crt"
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
TLSPrivateKeyKey = "tls.key"
@@ -6212,6 +6255,16 @@ type WindowsSecurityContextOptions struct {
// PodSecurityContext, the value specified in SecurityContext takes precedence.
// +optional
RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"`
+
+ // HostProcess determines if a container should be run as a 'Host Process' container.
+ // This field is alpha-level and will only be honored by components that enable the
+ // WindowsHostProcessContainers feature flag. Setting this field without the feature
+ // flag will result in errors when validating the Pod. All of a Pod's containers must
+ // have the same effective HostProcess value (it is not allowed to have a mix of HostProcess
+ // containers and non-HostProcess containers). In addition, if HostProcess is true
+ // then HostNetwork must also be set to true.
+ // +optional
+ HostProcess *bool `json:"hostProcess,omitempty" protobuf:"bytes,4,opt,name=hostProcess"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
index 0892b9b5e..8d6327375 100644
--- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
@@ -291,7 +291,7 @@ func (ConfigMapList) SwaggerDoc() map[string]string {
}
var map_ConfigMapNodeConfigSource = map[string]string{
- "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.",
+ "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration",
"namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.",
"name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.",
"uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.",
@@ -328,8 +328,8 @@ var map_Container = map[string]string{
"": "A single application container that you want to run within a pod.",
"name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.",
"image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.",
- "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
- "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
@@ -344,7 +344,7 @@ var map_Container = map[string]string{
"terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
"imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
- "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
+ "securityContext": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/",
"stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
"stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
"tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
@@ -506,7 +506,7 @@ var map_EndpointPort = map[string]string{
"name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.",
"port": "The port number of the endpoint.",
"protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.",
- "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.",
+ "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.",
}
func (EndpointPort) SwaggerDoc() map[string]string {
@@ -558,7 +558,7 @@ func (EnvFromSource) SwaggerDoc() map[string]string {
var map_EnvVar = map[string]string{
"": "EnvVar represents an environment variable present in a Container.",
"name": "Name of the environment variable. Must be a C_IDENTIFIER.",
- "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
+ "value": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".",
"valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.",
}
@@ -591,8 +591,8 @@ var map_EphemeralContainerCommon = map[string]string{
"": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.",
"name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.",
"image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images",
- "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
- "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
+ "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell",
"workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.",
"ports": "Ports are not allowed for ephemeral containers.",
"envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.",
@@ -607,7 +607,7 @@ var map_EphemeralContainerCommon = map[string]string{
"terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.",
"terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.",
"imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images",
- "securityContext": "SecurityContext is not allowed for ephemeral containers.",
+ "securityContext": "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.",
"stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.",
"stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false",
"tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.",
@@ -617,15 +617,6 @@ func (EphemeralContainerCommon) SwaggerDoc() map[string]string {
return map_EphemeralContainerCommon
}
-var map_EphemeralContainers = map[string]string{
- "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.",
- "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.",
-}
-
-func (EphemeralContainers) SwaggerDoc() map[string]string {
- return map_EphemeralContainers
-}
-
var map_EphemeralVolumeSource = map[string]string{
"": "Represents an ephemeral volume that is handled by a normal storage driver.",
"volumeClaimTemplate": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `-` where `` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.",
@@ -1094,7 +1085,7 @@ func (NodeCondition) SwaggerDoc() map[string]string {
}
var map_NodeConfigSource = map[string]string{
- "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.",
+ "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22",
"configMap": "ConfigMap is a reference to a Node's ConfigMap",
}
@@ -1188,7 +1179,7 @@ var map_NodeSpec = map[string]string{
"providerID": "ID of the node assigned by the cloud provider in the format: ://",
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
"taints": "If specified, the node's taints.",
- "configSource": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field",
+ "configSource": "Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration",
"externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
}
@@ -1310,7 +1301,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.",
- "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) * An existing custom resource that implements data population (Alpha) In order to use custom resource types that implement data population, the AnyVolumeDataSource feature gate must be enabled. If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source.",
+ "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the AnyVolumeDataSource feature gate is enabled, this field will always have the same contents as the DataSourceRef field.",
+ "dataSourceRef": "Specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any local object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the DataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, both fields (DataSource and DataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. There are two important differences between DataSource and DataSourceRef: * While DataSource only allows two specific types of objects, DataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While DataSource ignores disallowed values (dropping them), DataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n(Alpha) Using this field requires the AnyVolumeDataSource feature gate to be enabled.",
}
func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string {
@@ -1452,7 +1444,7 @@ var map_PodAffinityTerm = map[string]string{
"labelSelector": "A label query over a set of resources, in this case pods.",
"namespaces": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\"",
"topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.",
- "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is alpha-level and is only honored when PodAffinityNamespaceSelector feature is enabled.",
+ "namespaceSelector": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces. This field is beta-level and is only honored when PodAffinityNamespaceSelector feature is enabled.",
}
func (PodAffinityTerm) SwaggerDoc() map[string]string {
@@ -1650,11 +1642,11 @@ var map_PodSpec = map[string]string{
"priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.",
"priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.",
"dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
- "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md",
- "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.",
+ "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates",
+ "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class This is a beta feature as of Kubernetes v1.14.",
"enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.",
"preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is beta-level, gated by the NonPreemptingPriority feature-gate.",
- "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.",
+ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.",
"topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.",
"setHostnameAsFQDN": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.",
}
@@ -1783,7 +1775,7 @@ var map_Probe = map[string]string{
"periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
"successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.",
"failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.",
- "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is an alpha field and requires enabling ProbeTerminationGracePeriod feature gate.",
+ "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.",
}
func (Probe) SwaggerDoc() map[string]string {
@@ -2218,7 +2210,7 @@ var map_ServicePort = map[string]string{
"": "ServicePort contains information on service's port.",
"name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.",
"protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.",
- "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.",
+ "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol.",
"port": "The port that will be exposed by this service.",
"targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service",
"nodePort": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport",
@@ -2247,16 +2239,15 @@ var map_ServiceSpec = map[string]string{
"externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.",
"sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies",
"loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.",
- "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/",
+ "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/",
"externalName": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".",
"externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.",
"healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type).",
"publishNotReadyAddresses": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.",
"sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.",
- "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied. This field is alpha-level and is only honored by servers that enable the ServiceTopology feature. This field is deprecated and will be removed in a future version.",
"ipFamilies": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service, and is gated by the \"IPv6DualStack\" feature gate. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.",
"ipFamilyPolicy": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service, and is gated by the \"IPv6DualStack\" feature gate. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.",
- "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. allocateLoadBalancerNodePorts may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is alpha-level and is only honored by servers that enable the ServiceLBNodePortControl feature.",
+ "allocateLoadBalancerNodePorts": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type. This field is beta-level and is only honored by servers that enable the ServiceLBNodePortControl feature.",
"loadBalancerClass": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.",
"internalTrafficPolicy": "InternalTrafficPolicy specifies if the cluster internal traffic should be routed to all endpoints or node-local endpoints only. \"Cluster\" routes internal traffic to a Service to all endpoints. \"Local\" routes traffic to node-local endpoints only, traffic is dropped if no node-local endpoints are ready. The default value is \"Cluster\".",
}
@@ -2515,6 +2506,7 @@ var map_WindowsSecurityContextOptions = map[string]string{
"gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.",
"gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.",
"runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.",
+ "hostProcess": "HostProcess determines if a container should be run as a 'Host Process' container. This field is alpha-level and will only be honored by components that enable the WindowsHostProcessContainers feature flag. Setting this field without the feature flag will result in errors when validating the Pod. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.",
}
func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
index b60baa665..ff660ae27 100644
--- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
@@ -1400,39 +1400,6 @@ func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon {
return out
}
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- if in.EphemeralContainers != nil {
- in, out := &in.EphemeralContainers, &out.EphemeralContainers
- *out = make([]EphemeralContainer, len(*in))
- for i := range *in {
- (*in)[i].DeepCopyInto(&(*out)[i])
- }
- }
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers.
-func (in *EphemeralContainers) DeepCopy() *EphemeralContainers {
- if in == nil {
- return nil
- }
- out := new(EphemeralContainers)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *EphemeralContainers) DeepCopyObject() runtime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
-
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) {
*out = *in
@@ -2967,6 +2934,11 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec
*out = new(TypedLocalObjectReference)
(*in).DeepCopyInto(*out)
}
+ if in.DataSourceRef != nil {
+ in, out := &in.DataSourceRef, &out.DataSourceRef
+ *out = new(TypedLocalObjectReference)
+ (*in).DeepCopyInto(*out)
+ }
return
}
@@ -5330,11 +5302,6 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) {
*out = new(SessionAffinityConfig)
(*in).DeepCopyInto(*out)
}
- if in.TopologyKeys != nil {
- in, out := &in.TopologyKeys, &out.TopologyKeys
- *out = make([]string, len(*in))
- copy(*out, *in)
- }
if in.IPFamilies != nil {
in, out := &in.IPFamilies, &out.IPFamilies
*out = make([]IPFamily, len(*in))
@@ -5943,6 +5910,11 @@ func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContex
*out = new(string)
**out = **in
}
+ if in.HostProcess != nil {
+ in, out := &in.HostProcess, &out.HostProcess
+ *out = new(bool)
+ **out = **in
+ }
return
}
diff --git a/vendor/k8s.io/api/discovery/v1/generated.proto b/vendor/k8s.io/api/discovery/v1/generated.proto
index 5844965d0..0e1e4a8c3 100644
--- a/vendor/k8s.io/api/discovery/v1/generated.proto
+++ b/vendor/k8s.io/api/discovery/v1/generated.proto
@@ -115,6 +115,7 @@ message EndpointHints {
}
// EndpointPort represents a Port used by an EndpointSlice
+// +structType=atomic
message EndpointPort {
// The name of this port. All ports in an EndpointSlice must have a unique
// name. If the EndpointSlice is dervied from a Kubernetes service, this
diff --git a/vendor/k8s.io/api/discovery/v1/types.go b/vendor/k8s.io/api/discovery/v1/types.go
index fa990efdb..b66fdd884 100644
--- a/vendor/k8s.io/api/discovery/v1/types.go
+++ b/vendor/k8s.io/api/discovery/v1/types.go
@@ -153,6 +153,7 @@ type ForZone struct {
}
// EndpointPort represents a Port used by an EndpointSlice
+// +structType=atomic
type EndpointPort struct {
// The name of this port. All ports in an EndpointSlice must have a unique
// name. If the EndpointSlice is dervied from a Kubernetes service, this
diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
index 5e3d165eb..737a7e79c 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto
@@ -210,6 +210,9 @@ message DaemonSetStatus {
repeated DaemonSetCondition conditions = 10;
}
+// DaemonSetUpdateStrategy indicates the strategy that the DaemonSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
message DaemonSetUpdateStrategy {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
// Default is OnDelete.
@@ -580,7 +583,12 @@ message IngressRule {
// mixing different types of rules in a single Ingress is disallowed, so exactly
// one of the following must be set.
message IngressRuleValue {
- // +optional
+ // http is a list of http selectors pointing to backends.
+ // A path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/'.
+ // A backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
optional HTTPIngressRuleValue http = 1;
}
@@ -756,8 +764,8 @@ message NetworkPolicyPort {
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
- // This feature is in Alpha state and should be enabled using the Feature Gate
- // "NetworkPolicyEndPort".
+ // This feature is in Beta state and is enabled by default.
+ // It can be disabled using the Feature Gate "NetworkPolicyEndPort".
// +optional
optional int32 endPort = 3;
}
@@ -1090,7 +1098,7 @@ message RollingUpdateDaemonSet {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
- // number is calculated from percentage by rounding down to a minimum of one.
+ // number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
@@ -1237,6 +1245,7 @@ message ScaleStatus {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
+ // +mapType=atomic
map selector = 2;
// label selector for pods that should match the replicas count. This is a serializated
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go
index f3479713c..963318a2f 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types.go
@@ -37,6 +37,7 @@ type ScaleStatus struct {
// label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors
// +optional
+ // +mapType=atomic
Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"`
// label selector for pods that should match the replicas count. This is a serializated
@@ -73,6 +74,7 @@ type Scale struct {
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
+// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=Scale,result=Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.1
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
@@ -327,6 +329,9 @@ type DeploymentList struct {
Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+// DaemonSetUpdateStrategy indicates the strategy that the DaemonSet
+// controller will use to perform updates. It includes any additional parameters
+// necessary to perform the update for the indicated strategy.
type DaemonSetUpdateStrategy struct {
// Type of daemon set update. Can be "RollingUpdate" or "OnDelete".
// Default is OnDelete.
@@ -357,7 +362,7 @@ type RollingUpdateDaemonSet struct {
// The maximum number of DaemonSet pods that can be unavailable during the
// update. Value can be an absolute number (ex: 5) or a percentage of total
// number of DaemonSet pods at the start of the update (ex: 10%). Absolute
- // number is calculated from percentage by rounding down to a minimum of one.
+ // number is calculated from percentage by rounding up.
// This cannot be 0 if MaxSurge is 0
// Default value is 1.
// Example: when this is set to 30%, at most 30% of the total number of nodes
@@ -729,7 +734,12 @@ type IngressRuleValue struct {
// 2. Consider adding fields for ingress-type specific global options
// usable by a loadbalancer, like http keep-alive.
- // +optional
+ // http is a list of http selectors pointing to backends.
+ // A path is matched against the path of an incoming request. Currently it can
+ // contain characters disallowed from the conventional "path" part of a URL
+ // as defined by RFC 3986. Paths must begin with a '/'.
+ // A backend defines the referenced service endpoint to which the traffic
+ // will be forwarded to.
HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"`
}
@@ -827,6 +837,7 @@ type IngressBackend struct {
// +genclient
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
+// +genclient:method=ApplyScale,verb=apply,subresource=scale,input=Scale,result=Scale
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.2
// +k8s:prerelease-lifecycle-gen:deprecated=1.8
@@ -1482,8 +1493,8 @@ type NetworkPolicyPort struct {
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
- // This feature is in Alpha state and should be enabled using the Feature Gate
- // "NetworkPolicyEndPort".
+ // This feature is in Beta state and is enabled by default.
+ // It can be disabled using the Feature Gate "NetworkPolicyEndPort".
// +optional
EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"`
}
diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
index 870b607a7..d70303fa8 100644
--- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
@@ -122,6 +122,7 @@ func (DaemonSetStatus) SwaggerDoc() map[string]string {
}
var map_DaemonSetUpdateStrategy = map[string]string{
+ "": "DaemonSetUpdateStrategy indicates the strategy that the DaemonSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.",
"type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.",
"rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".",
}
@@ -321,7 +322,8 @@ func (IngressRule) SwaggerDoc() map[string]string {
}
var map_IngressRuleValue = map[string]string{
- "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.",
+ "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.",
+ "http": "http is a list of http selectors pointing to backends. A path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. A backend defines the referenced service endpoint to which the traffic will be forwarded to.",
}
func (IngressRuleValue) SwaggerDoc() map[string]string {
@@ -414,7 +416,7 @@ var map_NetworkPolicyPort = map[string]string{
"": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.",
"protocol": "Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.",
"port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.",
- "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate \"NetworkPolicyEndPort\".",
+ "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".",
}
func (NetworkPolicyPort) SwaggerDoc() map[string]string {
@@ -556,7 +558,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string {
var map_RollingUpdateDaemonSet = map[string]string{
"": "Spec to control the desired behavior of daemon set rolling update.",
- "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding down to a minimum of one. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
+ "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.",
"maxSurge": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption. This is an alpha field and requires enabling DaemonSetUpdateSurge feature gate.",
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
index 7b19a273e..2e0d0eae2 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/generated.proto
@@ -411,16 +411,20 @@ message ServiceAccountSubject {
// ways of matching an originator; by user, group, or service account.
// +union
message Subject {
+ // `kind` indicates which one of the other fields is non-empty.
// Required
// +unionDiscriminator
optional string kind = 1;
+ // `user` matches based on username.
// +optional
optional UserSubject user = 2;
+ // `group` matches based on user group name.
// +optional
optional GroupSubject group = 3;
+ // `serviceAccount` matches ServiceAccounts.
// +optional
optional ServiceAccountSubject serviceAccount = 4;
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
index 1e9701fc3..afcc6fa34 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types.go
@@ -185,13 +185,17 @@ type PolicyRulesWithSubjects struct {
// ways of matching an originator; by user, group, or service account.
// +union
type Subject struct {
+ // `kind` indicates which one of the other fields is non-empty.
// Required
// +unionDiscriminator
Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // `user` matches based on username.
// +optional
User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
+ // `group` matches based on user group name.
// +optional
Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"`
+ // `serviceAccount` matches ServiceAccounts.
// +optional
ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"`
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
index 211d55e5e..8171360bb 100644
--- a/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1alpha1/types_swagger_doc_generated.go
@@ -238,8 +238,11 @@ func (ServiceAccountSubject) SwaggerDoc() map[string]string {
}
var map_Subject = map[string]string{
- "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.",
- "kind": "Required",
+ "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.",
+ "kind": "`kind` indicates which one of the other fields is non-empty. Required",
+ "user": "`user` matches based on username.",
+ "group": "`group` matches based on user group name.",
+ "serviceAccount": "`serviceAccount` matches ServiceAccounts.",
}
func (Subject) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
index 9ddfc5465..40f441c5c 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/generated.proto
@@ -411,16 +411,20 @@ message ServiceAccountSubject {
// ways of matching an originator; by user, group, or service account.
// +union
message Subject {
+ // `kind` indicates which one of the other fields is non-empty.
// Required
// +unionDiscriminator
optional string kind = 1;
+ // `user` matches based on username.
// +optional
optional UserSubject user = 2;
+ // `group` matches based on user group name.
// +optional
optional GroupSubject group = 3;
+ // `serviceAccount` matches ServiceAccounts.
// +optional
optional ServiceAccountSubject serviceAccount = 4;
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go
index ece834e92..c38021d69 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/types.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types.go
@@ -57,6 +57,50 @@ const (
ResponseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
)
+const (
+ // AutoUpdateAnnotationKey is the name of an annotation that enables
+ // automatic update of the spec of the bootstrap configuration
+ // object(s), if set to 'true'.
+ //
+ // On a fresh install, all bootstrap configuration objects will have auto
+ // update enabled with the following annotation key:
+ // apf.kubernetes.io/autoupdate-spec: 'true'
+ //
+ // The kube-apiserver periodically checks the bootstrap configuration
+ // objects on the cluster and applies updates if necessary.
+ //
+ // kube-apiserver enforces an 'always auto-update' policy for the
+ // mandatory configuration object(s). This implies:
+ // - the auto-update annotation key is added with a value of 'true'
+ // if it is missing.
+ // - the auto-update annotation key is set to 'true' if its current value
+ // is a boolean false or has an invalid boolean representation
+ // (if the cluster operator sets it to 'false' it will be stomped)
+ // - any changes to the spec made by the cluster operator will be
+ // stomped.
+ //
+ // The kube-apiserver will apply updates on the suggested configuration if:
+ // - the cluster operator has enabled auto-update by setting the annotation
+ // (apf.kubernetes.io/autoupdate-spec: 'true') or
+ // - the annotation key is missing but the generation is 1
+ //
+ // If the suggested configuration object is missing the annotation key,
+ // kube-apiserver will update the annotation appropriately:
+ // - it is set to 'true' if generation of the object is '1' which usually
+ // indicates that the spec of the object has not been changed.
+ // - it is set to 'false' if generation of the object is greater than 1.
+ //
+ // The goal is to enable the kube-apiserver to apply update on suggested
+ // configuration objects installed by previous releases but not overwrite
+ // changes made by the cluster operators.
+ // Note that this distinction is imperfectly detected: in the case where an
+ // operator deletes a suggested configuration object and later creates it
+ // but with a variant spec and then does no updates of the object
+ // (generation is 1), the technique outlined above will incorrectly
+ // determine that the object should be auto-updated.
+ AutoUpdateAnnotationKey = "apf.kubernetes.io/autoupdate-spec"
+)
+
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -179,13 +223,17 @@ type PolicyRulesWithSubjects struct {
// ways of matching an originator; by user, group, or service account.
// +union
type Subject struct {
+ // `kind` indicates which one of the other fields is non-empty.
// Required
// +unionDiscriminator
Kind SubjectKind `json:"kind" protobuf:"bytes,1,opt,name=kind"`
+ // `user` matches based on username.
// +optional
User *UserSubject `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
+ // `group` matches based on user group name.
// +optional
Group *GroupSubject `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"`
+ // `serviceAccount` matches ServiceAccounts.
// +optional
ServiceAccount *ServiceAccountSubject `json:"serviceAccount,omitempty" protobuf:"bytes,4,opt,name=serviceAccount"`
}
diff --git a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
index 8343a883f..0f2f05e3a 100644
--- a/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/flowcontrol/v1beta1/types_swagger_doc_generated.go
@@ -238,8 +238,11 @@ func (ServiceAccountSubject) SwaggerDoc() map[string]string {
}
var map_Subject = map[string]string{
- "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.",
- "kind": "Required",
+ "": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.",
+ "kind": "`kind` indicates which one of the other fields is non-empty. Required",
+ "user": "`user` matches based on username.",
+ "group": "`group` matches based on user group name.",
+ "serviceAccount": "`serviceAccount` matches ServiceAccounts.",
}
func (Subject) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto
index 8f23477d8..8559e92d7 100644
--- a/vendor/k8s.io/api/networking/v1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1/generated.proto
@@ -35,8 +35,8 @@ option go_package = "v1";
message HTTPIngressPath {
// Path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
- // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
- // all paths from incoming requests are matched.
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
// +optional
optional string path = 1;
@@ -439,8 +439,8 @@ message NetworkPolicyPort {
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
- // This feature is in Alpha state and should be enabled using the Feature Gate
- // "NetworkPolicyEndPort".
+ // This feature is in Beta state and is enabled by default.
+ // It can be disabled using the Feature Gate "NetworkPolicyEndPort".
// +optional
optional int32 endPort = 3;
}
diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go
index 6cc210b89..7edbcd597 100644
--- a/vendor/k8s.io/api/networking/v1/types.go
+++ b/vendor/k8s.io/api/networking/v1/types.go
@@ -152,8 +152,8 @@ type NetworkPolicyPort struct {
// should be allowed by the policy. This field cannot be defined if the port field
// is not defined or if the port field is defined as a named (string) port.
// The endPort must be equal or greater than port.
- // This feature is in Alpha state and should be enabled using the Feature Gate
- // "NetworkPolicyEndPort".
+ // This feature is in Beta state and is enabled by default.
+ // It can be disabled using the Feature Gate "NetworkPolicyEndPort".
// +optional
EndPort *int32 `json:"endPort,omitempty" protobuf:"bytes,3,opt,name=endPort"`
}
@@ -407,8 +407,8 @@ const (
type HTTPIngressPath struct {
// Path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
- // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
- // all paths from incoming requests are matched.
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
@@ -426,7 +426,7 @@ type HTTPIngressPath struct {
// the IngressClass. Implementations can treat this as a separate PathType
// or treat it identically to Prefix or Exact path types.
// Implementations are required to support all path types.
- PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"`
+ PathType *PathType `json:"pathType" protobuf:"bytes,3,opt,name=pathType"`
// Backend defines the referenced service endpoint to which the traffic
// will be forwarded to.
diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
index 88f9a8f13..49b38bd04 100644
--- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_HTTPIngressPath = map[string]string{
"": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.",
- "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.",
+ "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".",
"pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.",
"backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
}
@@ -244,7 +244,7 @@ var map_NetworkPolicyPort = map[string]string{
"": "NetworkPolicyPort describes a port to allow traffic on",
"protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.",
"port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.",
- "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Alpha state and should be enabled using the Feature Gate \"NetworkPolicyEndPort\".",
+ "endPort": "If set, indicates that the range of ports from port to endPort, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port. This feature is in Beta state and is enabled by default. It can be disabled using the Feature Gate \"NetworkPolicyEndPort\".",
}
func (NetworkPolicyPort) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/networking/v1/well_known_annotations.go b/vendor/k8s.io/api/networking/v1/well_known_annotations.go
new file mode 100644
index 000000000..0e3754d5c
--- /dev/null
+++ b/vendor/k8s.io/api/networking/v1/well_known_annotations.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2020 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+const (
+ // AnnotationIsDefaultIngressClass can be used to indicate that an
+ // IngressClass should be considered default. When a single IngressClass
+ // resource has this annotation set to true, new Ingress resources without a
+ // class specified will be assigned this default class.
+ AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class"
+)
diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto
index acb6e859c..5479ac482 100644
--- a/vendor/k8s.io/api/networking/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto
@@ -35,8 +35,8 @@ option go_package = "v1beta1";
message HTTPIngressPath {
// Path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
- // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
- // all paths from incoming requests are matched.
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
// +optional
optional string path = 1;
diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go
index 09279d693..4646e90b6 100644
--- a/vendor/k8s.io/api/networking/v1beta1/types.go
+++ b/vendor/k8s.io/api/networking/v1beta1/types.go
@@ -228,8 +228,8 @@ const (
type HTTPIngressPath struct {
// Path is matched against the path of an incoming request. Currently it can
// contain characters disallowed from the conventional "path" part of a URL
- // as defined by RFC 3986. Paths must begin with a '/'. When unspecified,
- // all paths from incoming requests are matched.
+ // as defined by RFC 3986. Paths must begin with a '/' and must be present
+ // when using PathType with value "Exact" or "Prefix".
// +optional
Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"`
diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
index 84337ad3e..88c130e89 100644
--- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go
@@ -29,7 +29,7 @@ package v1beta1
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_HTTPIngressPath = map[string]string{
"": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.",
- "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.",
+ "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".",
"pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.",
"backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.",
}
diff --git a/vendor/k8s.io/api/node/v1/generated.proto b/vendor/k8s.io/api/node/v1/generated.proto
index 4a86999f1..0b98cabb2 100644
--- a/vendor/k8s.io/api/node/v1/generated.proto
+++ b/vendor/k8s.io/api/node/v1/generated.proto
@@ -97,6 +97,7 @@ message Scheduling {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
+ // +mapType=atomic
map nodeSelector = 1;
// tolerations are appended (excluding duplicates) to pods running with this
diff --git a/vendor/k8s.io/api/node/v1/types.go b/vendor/k8s.io/api/node/v1/types.go
index b32cc36c4..bfe947e1f 100644
--- a/vendor/k8s.io/api/node/v1/types.go
+++ b/vendor/k8s.io/api/node/v1/types.go
@@ -82,6 +82,7 @@ type Scheduling struct {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
+ // +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
// tolerations are appended (excluding duplicates) to pods running with this
diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto
index 310ad490b..d92e18ff3 100644
--- a/vendor/k8s.io/api/node/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto
@@ -43,7 +43,7 @@ message Overhead {
// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
// responsible for resolving the RuntimeClassName reference before running the
// pod. For more details, see
-// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
message RuntimeClass {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
@@ -84,8 +84,8 @@ message RuntimeClassSpec {
// Overhead represents the resource overhead associated with running a pod for a
// given RuntimeClass. For more details, see
- // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
- // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.
+ // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
// +optional
optional Overhead overhead = 2;
@@ -106,6 +106,7 @@ message Scheduling {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
+ // +mapType=atomic
map nodeSelector = 1;
// tolerations are appended (excluding duplicates) to pods running with this
diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go
index 03e7e6f33..f11bcbb10 100644
--- a/vendor/k8s.io/api/node/v1alpha1/types.go
+++ b/vendor/k8s.io/api/node/v1alpha1/types.go
@@ -31,7 +31,7 @@ import (
// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
// responsible for resolving the RuntimeClassName reference before running the
// pod. For more details, see
-// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
type RuntimeClass struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
@@ -62,8 +62,8 @@ type RuntimeClassSpec struct {
// Overhead represents the resource overhead associated with running a pod for a
// given RuntimeClass. For more details, see
- // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
- // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.
+ // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
// +optional
Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"`
@@ -91,6 +91,7 @@ type Scheduling struct {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
+ // +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
// tolerations are appended (excluding duplicates) to pods running with this
diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go
index d3011466b..5a259573c 100644
--- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go
@@ -37,7 +37,7 @@ func (Overhead) SwaggerDoc() map[string]string {
}
var map_RuntimeClass = map[string]string{
- "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md",
+ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class",
"metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
}
@@ -59,7 +59,7 @@ func (RuntimeClassList) SwaggerDoc() map[string]string {
var map_RuntimeClassSpec = map[string]string{
"": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.",
"runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.",
- "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.",
+ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.",
"scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.",
}
diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto
index 5c2d9d50a..3c1e5bbbd 100644
--- a/vendor/k8s.io/api/node/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/node/v1beta1/generated.proto
@@ -43,7 +43,7 @@ message Overhead {
// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
// responsible for resolving the RuntimeClassName reference before running the
// pod. For more details, see
-// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
message RuntimeClass {
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
@@ -63,8 +63,8 @@ message RuntimeClass {
// Overhead represents the resource overhead associated with running a pod for a
// given RuntimeClass. For more details, see
- // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
- // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.
+ // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
// +optional
optional Overhead overhead = 3;
@@ -96,6 +96,7 @@ message Scheduling {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
+ // +mapType=atomic
map nodeSelector = 1;
// tolerations are appended (excluding duplicates) to pods running with this
diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go
index 89559949c..c545abf18 100644
--- a/vendor/k8s.io/api/node/v1beta1/types.go
+++ b/vendor/k8s.io/api/node/v1beta1/types.go
@@ -33,7 +33,7 @@ import (
// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is
// responsible for resolving the RuntimeClassName reference before running the
// pod. For more details, see
-// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md
+// https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class
type RuntimeClass struct {
metav1.TypeMeta `json:",inline"`
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
@@ -54,8 +54,8 @@ type RuntimeClass struct {
// Overhead represents the resource overhead associated with running a pod for a
// given RuntimeClass. For more details, see
- // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md
- // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.
+ // https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md
+ // This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.
// +optional
Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"`
@@ -83,6 +83,7 @@ type Scheduling struct {
// with a pod's existing nodeSelector. Any conflicts will cause the pod to
// be rejected in admission.
// +optional
+ // +mapType=atomic
NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"`
// tolerations are appended (excluding duplicates) to pods running with this
diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go
index a486147f0..6d8871034 100644
--- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go
@@ -37,10 +37,10 @@ func (Overhead) SwaggerDoc() map[string]string {
}
var map_RuntimeClass = map[string]string{
- "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md",
+ "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class",
"metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
"handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.",
- "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.",
+ "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md This field is beta-level as of Kubernetes v1.18, and is only honored by servers that enable the PodOverhead feature.",
"scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.",
}
diff --git a/vendor/k8s.io/api/policy/v1/generated.pb.go b/vendor/k8s.io/api/policy/v1/generated.pb.go
index 9a9f73c12..183db076a 100644
--- a/vendor/k8s.io/api/policy/v1/generated.pb.go
+++ b/vendor/k8s.io/api/policy/v1/generated.pb.go
@@ -47,10 +47,38 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+func (m *Eviction) Reset() { *m = Eviction{} }
+func (*Eviction) ProtoMessage() {}
+func (*Eviction) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2d50488813b2d18e, []int{0}
+}
+func (m *Eviction) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+}
+func (m *Eviction) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Eviction.Merge(m, src)
+}
+func (m *Eviction) XXX_Size() int {
+ return m.Size()
+}
+func (m *Eviction) XXX_DiscardUnknown() {
+ xxx_messageInfo_Eviction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Eviction proto.InternalMessageInfo
+
func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} }
func (*PodDisruptionBudget) ProtoMessage() {}
func (*PodDisruptionBudget) Descriptor() ([]byte, []int) {
- return fileDescriptor_2d50488813b2d18e, []int{0}
+ return fileDescriptor_2d50488813b2d18e, []int{1}
}
func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -78,7 +106,7 @@ var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo
func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} }
func (*PodDisruptionBudgetList) ProtoMessage() {}
func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) {
- return fileDescriptor_2d50488813b2d18e, []int{1}
+ return fileDescriptor_2d50488813b2d18e, []int{2}
}
func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -106,7 +134,7 @@ var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo
func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} }
func (*PodDisruptionBudgetSpec) ProtoMessage() {}
func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) {
- return fileDescriptor_2d50488813b2d18e, []int{2}
+ return fileDescriptor_2d50488813b2d18e, []int{3}
}
func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -134,7 +162,7 @@ var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo
func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} }
func (*PodDisruptionBudgetStatus) ProtoMessage() {}
func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_2d50488813b2d18e, []int{3}
+ return fileDescriptor_2d50488813b2d18e, []int{4}
}
func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -160,6 +188,7 @@ func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() {
var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo
func init() {
+ proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1.Eviction")
proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1.PodDisruptionBudget")
proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetList")
proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1.PodDisruptionBudgetSpec")
@@ -172,55 +201,103 @@ func init() {
}
var fileDescriptor_2d50488813b2d18e = []byte{
- // 766 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xdf, 0x6e, 0xe3, 0x44,
- 0x14, 0xc6, 0xe3, 0xa4, 0x29, 0x65, 0x36, 0x8d, 0xaa, 0x61, 0x81, 0x90, 0x0b, 0x77, 0xd5, 0xab,
- 0x82, 0xb4, 0x63, 0xba, 0x8b, 0x50, 0x85, 0x04, 0x62, 0xbd, 0x59, 0xc1, 0x22, 0x4a, 0x56, 0x53,
- 0x10, 0x12, 0x02, 0x89, 0x89, 0x7d, 0xd6, 0x19, 0x62, 0x7b, 0xac, 0x99, 0xb1, 0xd9, 0x5c, 0xc1,
- 0x23, 0xf0, 0x0a, 0x3c, 0x0a, 0x57, 0xf4, 0x0a, 0xed, 0xe5, 0x5e, 0x45, 0xd4, 0xbc, 0x08, 0xf2,
- 0xd8, 0xf9, 0xe3, 0x24, 0x55, 0x03, 0x77, 0x99, 0x39, 0xe7, 0xfb, 0x9d, 0x39, 0xdf, 0x39, 0x0e,
- 0xfa, 0x78, 0x72, 0xae, 0x08, 0x17, 0xce, 0x24, 0x1d, 0x81, 0x8c, 0x41, 0x83, 0x72, 0x32, 0x88,
- 0x7d, 0x21, 0x9d, 0x2a, 0xc0, 0x12, 0xee, 0x24, 0x22, 0xe4, 0xde, 0xd4, 0xc9, 0xce, 0x9c, 0x00,
- 0x62, 0x90, 0x4c, 0x83, 0x4f, 0x12, 0x29, 0xb4, 0xc0, 0x77, 0xcb, 0x2c, 0xc2, 0x12, 0x4e, 0xca,
- 0x2c, 0x92, 0x9d, 0xf5, 0xef, 0x07, 0x5c, 0x8f, 0xd3, 0x11, 0xf1, 0x44, 0xe4, 0x04, 0x22, 0x10,
- 0x8e, 0x49, 0x1e, 0xa5, 0xcf, 0xcd, 0xc9, 0x1c, 0xcc, 0xaf, 0x12, 0xd2, 0xff, 0x60, 0x59, 0x2a,
- 0x62, 0xde, 0x98, 0xc7, 0x20, 0xa7, 0x4e, 0x32, 0x09, 0x8a, 0x0b, 0xe5, 0x44, 0xa0, 0xd9, 0x96,
- 0xd2, 0x7d, 0xe7, 0x26, 0x95, 0x4c, 0x63, 0xcd, 0x23, 0xd8, 0x10, 0x7c, 0x78, 0x9b, 0x40, 0x79,
- 0x63, 0x88, 0xd8, 0x86, 0xee, 0xe1, 0x4d, 0xba, 0x54, 0xf3, 0xd0, 0xe1, 0xb1, 0x56, 0x5a, 0xae,
- 0x8b, 0x4e, 0x7e, 0x6f, 0xa2, 0x37, 0x9e, 0x09, 0x7f, 0xc0, 0x95, 0x4c, 0x13, 0xcd, 0x45, 0xec,
- 0xa6, 0x7e, 0x00, 0x1a, 0xff, 0x88, 0x0e, 0x8a, 0x86, 0x7c, 0xa6, 0x59, 0xcf, 0xba, 0x67, 0x9d,
- 0xde, 0x79, 0xf0, 0x3e, 0x59, 0x7a, 0xb8, 0xe0, 0x93, 0x64, 0x12, 0x14, 0x17, 0x8a, 0x14, 0xd9,
- 0x24, 0x3b, 0x23, 0xc3, 0xd1, 0x4f, 0xe0, 0xe9, 0x0b, 0xd0, 0xcc, 0xc5, 0x57, 0xb3, 0xe3, 0x46,
- 0x3e, 0x3b, 0x46, 0xcb, 0x3b, 0xba, 0xa0, 0xe2, 0x21, 0xda, 0x53, 0x09, 0x78, 0xbd, 0xa6, 0xa1,
- 0xdf, 0x27, 0xdb, 0x26, 0x44, 0xb6, 0x3c, 0xed, 0x32, 0x01, 0xcf, 0xed, 0x54, 0xe8, 0xbd, 0xe2,
- 0x44, 0x0d, 0x08, 0x7f, 0x8b, 0xf6, 0x95, 0x66, 0x3a, 0x55, 0xbd, 0x96, 0x41, 0x3a, 0xbb, 0x23,
- 0x8d, 0xcc, 0xed, 0x56, 0xd0, 0xfd, 0xf2, 0x4c, 0x2b, 0xdc, 0xc9, 0x9f, 0x16, 0x7a, 0x7b, 0x8b,
- 0xea, 0x4b, 0xae, 0x34, 0xfe, 0x7e, 0xc3, 0x27, 0xb2, 0x9b, 0x4f, 0x85, 0xda, 0xb8, 0x74, 0x54,
- 0x55, 0x3d, 0x98, 0xdf, 0xac, 0x78, 0xf4, 0x15, 0x6a, 0x73, 0x0d, 0x91, 0xea, 0x35, 0xef, 0xb5,
- 0x4e, 0xef, 0x3c, 0x78, 0x77, 0xe7, 0x8e, 0xdc, 0xc3, 0x8a, 0xda, 0x7e, 0x5a, 0xe8, 0x69, 0x89,
- 0x39, 0xf9, 0xab, 0xb9, 0xb5, 0x93, 0xc2, 0x44, 0xfc, 0x1c, 0x75, 0x22, 0x1e, 0x3f, 0xca, 0x18,
- 0x0f, 0xd9, 0x28, 0x84, 0x5b, 0xa7, 0x5e, 0x6c, 0x15, 0x29, 0xb7, 0x8a, 0x3c, 0x8d, 0xf5, 0x50,
- 0x5e, 0x6a, 0xc9, 0xe3, 0xc0, 0x3d, 0xca, 0x67, 0xc7, 0x9d, 0x8b, 0x15, 0x12, 0xad, 0x71, 0xf1,
- 0x0f, 0xe8, 0x40, 0x41, 0x08, 0x9e, 0x16, 0xb2, 0x9a, 0xfd, 0xc3, 0x1d, 0x1d, 0x63, 0x23, 0x08,
- 0x2f, 0x2b, 0xa9, 0xdb, 0x29, 0x2c, 0x9b, 0x9f, 0xe8, 0x02, 0x89, 0x43, 0xd4, 0x8d, 0xd8, 0x8b,
- 0x6f, 0x62, 0xb6, 0x68, 0xa4, 0xf5, 0x3f, 0x1b, 0xc1, 0xf9, 0xec, 0xb8, 0x7b, 0x51, 0x63, 0xd1,
- 0x35, 0xf6, 0xc9, 0x1f, 0x6d, 0xf4, 0xce, 0x8d, 0x0b, 0x85, 0xbf, 0x40, 0x58, 0x8c, 0x14, 0xc8,
- 0x0c, 0xfc, 0xcf, 0xca, 0xef, 0x8e, 0x8b, 0xd8, 0x18, 0xdb, 0x72, 0xfb, 0xd5, 0x80, 0xf0, 0x70,
- 0x23, 0x83, 0x6e, 0x51, 0xe1, 0x5f, 0xd0, 0xa1, 0x5f, 0x56, 0x01, 0xff, 0x99, 0xf0, 0xe7, 0x2b,
- 0xe1, 0xfe, 0xc7, 0x25, 0x27, 0x83, 0x55, 0xc8, 0x93, 0x58, 0xcb, 0xa9, 0xfb, 0x66, 0xf5, 0x94,
- 0xc3, 0x5a, 0x8c, 0xd6, 0xeb, 0x15, 0xcd, 0xf8, 0x0b, 0xa4, 0x7a, 0x14, 0x86, 0xe2, 0x67, 0xf0,
- 0x8d, 0xb9, 0xed, 0x65, 0x33, 0x83, 0x8d, 0x0c, 0xba, 0x45, 0x85, 0x3f, 0x41, 0x5d, 0x2f, 0x95,
- 0x12, 0x62, 0xfd, 0x39, 0xb0, 0x50, 0x8f, 0xa7, 0xbd, 0x3d, 0xc3, 0x79, 0xab, 0xe2, 0x74, 0x1f,
- 0xd7, 0xa2, 0x74, 0x2d, 0xbb, 0xd0, 0xfb, 0xa0, 0xb8, 0x04, 0x7f, 0xae, 0x6f, 0xd7, 0xf5, 0x83,
- 0x5a, 0x94, 0xae, 0x65, 0xe3, 0x73, 0xd4, 0x81, 0x17, 0x09, 0x78, 0x73, 0x2f, 0xf7, 0x8d, 0xfa,
- 0x6e, 0xa5, 0xee, 0x3c, 0x59, 0x89, 0xd1, 0x5a, 0x26, 0xf6, 0x10, 0xf2, 0x44, 0xec, 0x73, 0xd3,
- 0x4e, 0xef, 0x35, 0x33, 0x03, 0x67, 0xb7, 0xfd, 0x7d, 0x3c, 0xd7, 0x2d, 0xff, 0x18, 0x17, 0x57,
- 0x8a, 0xae, 0x60, 0xfb, 0x21, 0xc2, 0x9b, 0x63, 0xc2, 0x47, 0xa8, 0x35, 0x81, 0xa9, 0x59, 0x9f,
- 0xd7, 0x69, 0xf1, 0x13, 0x7f, 0x8a, 0xda, 0x19, 0x0b, 0x53, 0xa8, 0xbe, 0xa3, 0xf7, 0x76, 0x7b,
- 0xc7, 0xd7, 0x3c, 0x02, 0x5a, 0x0a, 0x3f, 0x6a, 0x9e, 0x5b, 0xee, 0xe9, 0xd5, 0xb5, 0xdd, 0x78,
- 0x79, 0x6d, 0x37, 0x5e, 0x5d, 0xdb, 0x8d, 0x5f, 0x73, 0xdb, 0xba, 0xca, 0x6d, 0xeb, 0x65, 0x6e,
- 0x5b, 0xaf, 0x72, 0xdb, 0xfa, 0x3b, 0xb7, 0xad, 0xdf, 0xfe, 0xb1, 0x1b, 0xdf, 0x35, 0xb3, 0xb3,
- 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xca, 0x04, 0x93, 0xc0, 0x85, 0x07, 0x00, 0x00,
+ // 805 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xdf, 0x8e, 0xdb, 0x44,
+ 0x14, 0xc6, 0xe3, 0x64, 0xb3, 0x2c, 0xd3, 0x24, 0x5a, 0x86, 0x02, 0x4b, 0x2e, 0x1c, 0x94, 0xab,
+ 0x05, 0xa9, 0x63, 0xb6, 0x45, 0x68, 0x85, 0x04, 0xa2, 0x6e, 0x56, 0x50, 0xd4, 0x25, 0xd5, 0x2c,
+ 0x08, 0x09, 0x81, 0xc4, 0xc4, 0x3e, 0xcd, 0x0e, 0xb1, 0x3d, 0xd6, 0xcc, 0xd8, 0x34, 0x57, 0xf0,
+ 0x08, 0xbc, 0x02, 0x8f, 0xc2, 0x15, 0x7b, 0x85, 0x7a, 0x59, 0x71, 0x11, 0xb1, 0xe6, 0x45, 0x90,
+ 0xc7, 0xce, 0x1f, 0x27, 0x59, 0x35, 0xe5, 0x82, 0x3b, 0xcf, 0x99, 0xf3, 0xfd, 0x8e, 0xcf, 0x37,
+ 0x67, 0x06, 0x7d, 0x3c, 0x39, 0x55, 0x84, 0x0b, 0x67, 0x92, 0x8c, 0x40, 0x46, 0xa0, 0x41, 0x39,
+ 0x29, 0x44, 0xbe, 0x90, 0x4e, 0xb9, 0xc1, 0x62, 0xee, 0xc4, 0x22, 0xe0, 0xde, 0xd4, 0x49, 0x4f,
+ 0x9c, 0x31, 0x44, 0x20, 0x99, 0x06, 0x9f, 0xc4, 0x52, 0x68, 0x81, 0x6f, 0x17, 0x59, 0x84, 0xc5,
+ 0x9c, 0x14, 0x59, 0x24, 0x3d, 0xe9, 0xde, 0x19, 0x73, 0x7d, 0x99, 0x8c, 0x88, 0x27, 0x42, 0x67,
+ 0x2c, 0xc6, 0xc2, 0x31, 0xc9, 0xa3, 0xe4, 0x89, 0x59, 0x99, 0x85, 0xf9, 0x2a, 0x20, 0xdd, 0x0f,
+ 0x96, 0xa5, 0x42, 0xe6, 0x5d, 0xf2, 0x08, 0xe4, 0xd4, 0x89, 0x27, 0xe3, 0x3c, 0xa0, 0x9c, 0x10,
+ 0x34, 0xdb, 0x52, 0xba, 0xeb, 0xdc, 0xa4, 0x92, 0x49, 0xa4, 0x79, 0x08, 0x1b, 0x82, 0x0f, 0x5f,
+ 0x24, 0x50, 0xde, 0x25, 0x84, 0x6c, 0x43, 0x77, 0xef, 0x26, 0x5d, 0xa2, 0x79, 0xe0, 0xf0, 0x48,
+ 0x2b, 0x2d, 0xd7, 0x45, 0xfd, 0xbf, 0x2c, 0x74, 0x70, 0x96, 0x72, 0x4f, 0x73, 0x11, 0xe1, 0x1f,
+ 0xd0, 0x41, 0xde, 0x85, 0xcf, 0x34, 0x3b, 0xb2, 0xde, 0xb1, 0x8e, 0x6f, 0xdd, 0x7d, 0x9f, 0x2c,
+ 0x8d, 0x5b, 0x40, 0x49, 0x3c, 0x19, 0xe7, 0x01, 0x45, 0xf2, 0x6c, 0x92, 0x9e, 0x90, 0xe1, 0xe8,
+ 0x47, 0xf0, 0xf4, 0x39, 0x68, 0xe6, 0xe2, 0xab, 0x59, 0xaf, 0x96, 0xcd, 0x7a, 0x68, 0x19, 0xa3,
+ 0x0b, 0x2a, 0x0e, 0x50, 0xdb, 0x87, 0x00, 0x34, 0x0c, 0xe3, 0xbc, 0xa2, 0x3a, 0xaa, 0x9b, 0x32,
+ 0xf7, 0x76, 0x2b, 0x33, 0x58, 0x95, 0xba, 0xaf, 0x65, 0xb3, 0x5e, 0xbb, 0x12, 0xa2, 0x55, 0x78,
+ 0xff, 0xb7, 0x3a, 0x7a, 0xfd, 0xb1, 0xf0, 0x07, 0x5c, 0xc9, 0xc4, 0x84, 0xdc, 0xc4, 0x1f, 0x83,
+ 0xfe, 0x1f, 0xfa, 0x1c, 0xa2, 0x3d, 0x15, 0x83, 0x57, 0xb6, 0x77, 0x87, 0x6c, 0x1b, 0x3f, 0xb2,
+ 0xe5, 0xd7, 0x2e, 0x62, 0xf0, 0xdc, 0x56, 0x89, 0xde, 0xcb, 0x57, 0xd4, 0x80, 0xf0, 0x37, 0x68,
+ 0x5f, 0x69, 0xa6, 0x13, 0x75, 0xd4, 0x30, 0x48, 0x67, 0x77, 0xa4, 0x91, 0xb9, 0x9d, 0x12, 0xba,
+ 0x5f, 0xac, 0x69, 0x89, 0xeb, 0xff, 0x61, 0xa1, 0xb7, 0xb6, 0xa8, 0x1e, 0x71, 0xa5, 0xf1, 0x77,
+ 0x1b, 0x3e, 0x91, 0xdd, 0x7c, 0xca, 0xd5, 0xc6, 0xa5, 0xc3, 0xb2, 0xea, 0xc1, 0x3c, 0xb2, 0xe2,
+ 0xd1, 0x97, 0xa8, 0xc9, 0x35, 0x84, 0xf9, 0x0c, 0x34, 0x8e, 0x6f, 0xdd, 0x7d, 0x77, 0xe7, 0x8e,
+ 0xdc, 0x76, 0x49, 0x6d, 0x3e, 0xcc, 0xf5, 0xb4, 0xc0, 0xf4, 0xff, 0xac, 0x6f, 0xed, 0x24, 0x37,
+ 0x11, 0x3f, 0x41, 0xad, 0x90, 0x47, 0xf7, 0x53, 0xc6, 0x03, 0x36, 0x0a, 0xe0, 0x85, 0xa7, 0x9e,
+ 0x5f, 0x19, 0x52, 0x5c, 0x19, 0xf2, 0x30, 0xd2, 0x43, 0x79, 0xa1, 0x25, 0x8f, 0xc6, 0xee, 0x61,
+ 0x36, 0xeb, 0xb5, 0xce, 0x57, 0x48, 0xb4, 0xc2, 0xc5, 0xdf, 0xa3, 0x03, 0x05, 0x01, 0x78, 0x5a,
+ 0xc8, 0x97, 0x1b, 0xed, 0x47, 0x6c, 0x04, 0xc1, 0x45, 0x29, 0x75, 0x5b, 0xb9, 0x65, 0xf3, 0x15,
+ 0x5d, 0x20, 0x71, 0x80, 0x3a, 0x21, 0x7b, 0xfa, 0x75, 0xc4, 0x16, 0x8d, 0x34, 0xfe, 0x63, 0x23,
+ 0x38, 0x9b, 0xf5, 0x3a, 0xe7, 0x15, 0x16, 0x5d, 0x63, 0xf7, 0x7f, 0x6f, 0xa2, 0xb7, 0x6f, 0x1c,
+ 0x28, 0xfc, 0x05, 0xc2, 0x62, 0xa4, 0x40, 0xa6, 0xe0, 0x7f, 0x56, 0x3c, 0x2a, 0x5c, 0x44, 0xc6,
+ 0xd8, 0x86, 0xdb, 0x2d, 0x0f, 0x08, 0x0f, 0x37, 0x32, 0xe8, 0x16, 0x15, 0xfe, 0x19, 0xb5, 0xfd,
+ 0xa2, 0x0a, 0xf8, 0x8f, 0x85, 0x3f, 0x1f, 0x09, 0xf7, 0x25, 0x87, 0x9c, 0x0c, 0x56, 0x21, 0x67,
+ 0x91, 0x96, 0x53, 0xf7, 0x8d, 0xf2, 0x57, 0xda, 0x95, 0x3d, 0x5a, 0xad, 0x97, 0x37, 0xe3, 0x2f,
+ 0x90, 0xea, 0x7e, 0x10, 0x88, 0x9f, 0xc0, 0x37, 0xe6, 0x36, 0x97, 0xcd, 0x0c, 0x36, 0x32, 0xe8,
+ 0x16, 0x15, 0xfe, 0x04, 0x75, 0xbc, 0x44, 0x4a, 0x88, 0xf4, 0xe7, 0xc0, 0x02, 0x7d, 0x39, 0x3d,
+ 0xda, 0x33, 0x9c, 0x37, 0x4b, 0x4e, 0xe7, 0x41, 0x65, 0x97, 0xae, 0x65, 0xe7, 0x7a, 0x1f, 0x14,
+ 0x97, 0xe0, 0xcf, 0xf5, 0xcd, 0xaa, 0x7e, 0x50, 0xd9, 0xa5, 0x6b, 0xd9, 0xf8, 0x14, 0xb5, 0xe0,
+ 0x69, 0x0c, 0xde, 0xdc, 0xcb, 0x7d, 0xa3, 0xbe, 0x5d, 0xaa, 0x5b, 0x67, 0x2b, 0x7b, 0xb4, 0x92,
+ 0x89, 0x3d, 0x84, 0x3c, 0x11, 0xf9, 0xbc, 0x78, 0x9a, 0x5f, 0x31, 0x67, 0xe0, 0xec, 0x36, 0xbf,
+ 0x0f, 0xe6, 0xba, 0xe5, 0xc3, 0xb8, 0x08, 0x29, 0xba, 0x82, 0xed, 0x06, 0x08, 0x6f, 0x1e, 0x13,
+ 0x3e, 0x44, 0x8d, 0x09, 0x4c, 0xcd, 0xf8, 0xbc, 0x4a, 0xf3, 0x4f, 0xfc, 0x29, 0x6a, 0xa6, 0x2c,
+ 0x48, 0xa0, 0xbc, 0x47, 0xef, 0xed, 0xf6, 0x1f, 0x5f, 0xf1, 0x10, 0x68, 0x21, 0xfc, 0xa8, 0x7e,
+ 0x6a, 0xb9, 0xc7, 0x57, 0xd7, 0x76, 0xed, 0xd9, 0xb5, 0x5d, 0x7b, 0x7e, 0x6d, 0xd7, 0x7e, 0xc9,
+ 0x6c, 0xeb, 0x2a, 0xb3, 0xad, 0x67, 0x99, 0x6d, 0x3d, 0xcf, 0x6c, 0xeb, 0xef, 0xcc, 0xb6, 0x7e,
+ 0xfd, 0xc7, 0xae, 0x7d, 0x5b, 0x4f, 0x4f, 0xfe, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x1b, 0x9d,
+ 0x9f, 0x62, 0x08, 0x00, 0x00,
+}
+
+func (m *Eviction) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Eviction) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.DeleteOptions != nil {
+ {
+ size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ {
+ size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintGenerated(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
}
func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) {
@@ -474,6 +551,21 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
return base
}
+func (m *Eviction) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ObjectMeta.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ if m.DeleteOptions != nil {
+ l = m.DeleteOptions.Size()
+ n += 1 + l + sovGenerated(uint64(l))
+ }
+ return n
+}
+
func (m *PodDisruptionBudget) Size() (n int) {
if m == nil {
return 0
@@ -562,6 +654,17 @@ func sovGenerated(x uint64) (n int) {
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
+func (this *Eviction) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Eviction{`,
+ `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
+ `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`,
+ `}`,
+ }, "")
+ return s
+}
func (this *PodDisruptionBudget) String() string {
if this == nil {
return "nil"
@@ -641,6 +744,125 @@ func valueToStringGenerated(v interface{}) string {
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
+func (m *Eviction) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Eviction: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DeleteOptions == nil {
+ m.DeleteOptions = &v1.DeleteOptions{}
+ }
+ if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipGenerated(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
diff --git a/vendor/k8s.io/api/policy/v1/generated.proto b/vendor/k8s.io/api/policy/v1/generated.proto
index f57514112..5b7984235 100644
--- a/vendor/k8s.io/api/policy/v1/generated.proto
+++ b/vendor/k8s.io/api/policy/v1/generated.proto
@@ -29,6 +29,19 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1";
+// Eviction evicts a pod from its node subject to certain policies and safety constraints.
+// This is a subresource of Pod. A request to cause such an eviction is
+// created by POSTing to .../pods//evictions.
+message Eviction {
+ // ObjectMeta describes the pod that is being evicted.
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
+
+ // DeleteOptions may be provided
+ // +optional
+ optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
+}
+
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
message PodDisruptionBudget {
// Standard object's metadata.
diff --git a/vendor/k8s.io/api/policy/v1/register.go b/vendor/k8s.io/api/policy/v1/register.go
index 603c49b9c..7fb9fd3e1 100644
--- a/vendor/k8s.io/api/policy/v1/register.go
+++ b/vendor/k8s.io/api/policy/v1/register.go
@@ -44,6 +44,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&PodDisruptionBudget{},
&PodDisruptionBudgetList{},
+ &Eviction{},
)
// Add the watch version that applies
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
diff --git a/vendor/k8s.io/api/policy/v1/types.go b/vendor/k8s.io/api/policy/v1/types.go
index 65bf768d8..4a03696f0 100644
--- a/vendor/k8s.io/api/policy/v1/types.go
+++ b/vendor/k8s.io/api/policy/v1/types.go
@@ -21,6 +21,9 @@ import (
"k8s.io/apimachinery/pkg/util/intstr"
)
+// DisruptionBudgetCause is the status cause returned for eviction failures caused by PodDisruptionBudget violations.
+const DisruptionBudgetCause metav1.CauseType = "DisruptionBudget"
+
// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.
type PodDisruptionBudgetSpec struct {
// An eviction is allowed if at least "minAvailable" pods selected by
@@ -148,3 +151,22 @@ type PodDisruptionBudgetList struct {
// Items is a list of PodDisruptionBudgets
Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"`
}
+
+// +genclient
+// +genclient:noVerbs
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Eviction evicts a pod from its node subject to certain policies and safety constraints.
+// This is a subresource of Pod. A request to cause such an eviction is
+// created by POSTing to .../pods//evictions.
+type Eviction struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // ObjectMeta describes the pod that is being evicted.
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
+
+ // DeleteOptions may be provided
+ // +optional
+ DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"`
+}
diff --git a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
index 0b80a1dcc..3208392e8 100644
--- a/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/policy/v1/types_swagger_doc_generated.go
@@ -27,6 +27,16 @@ package v1
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
+var map_Eviction = map[string]string{
+ "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.",
+ "metadata": "ObjectMeta describes the pod that is being evicted.",
+ "deleteOptions": "DeleteOptions may be provided",
+}
+
+func (Eviction) SwaggerDoc() map[string]string {
+ return map_Eviction
+}
+
var map_PodDisruptionBudget = map[string]string{
"": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
diff --git a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go
index 78c0adbd7..9b7aac2f1 100644
--- a/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go
+++ b/vendor/k8s.io/api/policy/v1/zz_generated.deepcopy.go
@@ -26,6 +26,37 @@ import (
intstr "k8s.io/apimachinery/pkg/util/intstr"
)
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Eviction) DeepCopyInto(out *Eviction) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ if in.DeleteOptions != nil {
+ in, out := &in.DeleteOptions, &out.DeleteOptions
+ *out = new(metav1.DeleteOptions)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction.
+func (in *Eviction) DeepCopy() *Eviction {
+ if in == nil {
+ return nil
+ }
+ out := new(Eviction)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Eviction) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) {
*out = *in
diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto
index a47212142..133ba0493 100644
--- a/vendor/k8s.io/api/policy/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto
@@ -105,6 +105,8 @@ message IDRange {
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
message PodDisruptionBudget {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
@@ -119,9 +121,12 @@ message PodDisruptionBudget {
// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
message PodDisruptionBudgetList {
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
+ // items list individual PodDisruptionBudget objects
repeated PodDisruptionBudget items = 2;
}
diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go
index 281104451..553cb316d 100644
--- a/vendor/k8s.io/api/policy/v1beta1/types.go
+++ b/vendor/k8s.io/api/policy/v1beta1/types.go
@@ -129,6 +129,9 @@ const (
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
type PodDisruptionBudget struct {
metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
@@ -149,9 +152,13 @@ type PodDisruptionBudget struct {
// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
type PodDisruptionBudgetList struct {
metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
- Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"`
+ // items list individual PodDisruptionBudget objects
+ Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
index 0853a5e99..ef81d43af 100644
--- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
@@ -96,9 +96,10 @@ func (IDRange) SwaggerDoc() map[string]string {
}
var map_PodDisruptionBudget = map[string]string{
- "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
- "spec": "Specification of the desired behavior of the PodDisruptionBudget.",
- "status": "Most recently observed status of the PodDisruptionBudget.",
+ "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "spec": "Specification of the desired behavior of the PodDisruptionBudget.",
+ "status": "Most recently observed status of the PodDisruptionBudget.",
}
func (PodDisruptionBudget) SwaggerDoc() map[string]string {
@@ -106,7 +107,9 @@ func (PodDisruptionBudget) SwaggerDoc() map[string]string {
}
var map_PodDisruptionBudgetList = map[string]string{
- "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.",
+ "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.",
+ "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "items": "items list individual PodDisruptionBudget objects",
}
func (PodDisruptionBudgetList) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto
index 22c6dae4b..db8fd427c 100644
--- a/vendor/k8s.io/api/rbac/v1/generated.proto
+++ b/vendor/k8s.io/api/rbac/v1/generated.proto
@@ -92,7 +92,7 @@ message ClusterRoleList {
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
message PolicyRule {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.
repeated string verbs = 1;
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
@@ -100,7 +100,7 @@ message PolicyRule {
// +optional
repeated string apiGroups = 2;
- // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ // Resources is a list of resources this rule applies to. '*' represents all resources.
// +optional
repeated string resources = 3;
@@ -164,6 +164,7 @@ message RoleList {
}
// RoleRef contains information that points to the role being used
+// +structType=atomic
message RoleRef {
// APIGroup is the group for the resource being referenced
optional string apiGroup = 1;
@@ -177,6 +178,7 @@ message RoleRef {
// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
// or a value for non-objects such as user and group names.
+// +structType=atomic
message Subject {
// Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
// If the Authorizer does not recognized the kind value, the Authorizer should report an error.
diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go
index 7ba7d0543..038baf61b 100644
--- a/vendor/k8s.io/api/rbac/v1/types.go
+++ b/vendor/k8s.io/api/rbac/v1/types.go
@@ -47,14 +47,14 @@ const (
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
type PolicyRule struct {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
// the enumerated resources in any API group will be allowed.
// +optional
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"`
- // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ // Resources is a list of resources this rule applies to. '*' represents all resources.
// +optional
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
@@ -70,6 +70,7 @@ type PolicyRule struct {
// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference,
// or a value for non-objects such as user and group names.
+// +structType=atomic
type Subject struct {
// Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount".
// If the Authorizer does not recognized the kind value, the Authorizer should report an error.
@@ -88,6 +89,7 @@ type Subject struct {
}
// RoleRef contains information that points to the role being used
+// +structType=atomic
type RoleRef struct {
// APIGroup is the group for the resource being referenced
APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"`
diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
index 83ce310e6..664cf95f8 100644
--- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
@@ -80,9 +80,9 @@ func (ClusterRoleList) SwaggerDoc() map[string]string {
var map_PolicyRule = map[string]string{
"": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
- "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
- "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
+ "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.",
}
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
index caed7ec30..9dea00e05 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
+++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
@@ -96,7 +96,7 @@ message ClusterRoleList {
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
message PolicyRule {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.
repeated string verbs = 1;
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
@@ -104,7 +104,7 @@ message PolicyRule {
// +optional
repeated string apiGroups = 3;
- // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ // Resources is a list of resources this rule applies to. '*' represents all resources.
// +optional
repeated string resources = 4;
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go
index 538ae4c9b..57d993caa 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/types.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go
@@ -47,14 +47,14 @@ const (
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
type PolicyRule struct {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
// the enumerated resources in any API group will be allowed.
// +optional
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"`
- // Resources is a list of resources this rule applies to. ResourceAll represents all resources.
+ // Resources is a list of resources this rule applies to. '*' represents all resources.
// +optional
Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"`
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
index acb844782..8fc984ad5 100644
--- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
@@ -80,9 +80,9 @@ func (ClusterRoleList) SwaggerDoc() map[string]string {
var map_PolicyRule = map[string]string{
"": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
- "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
- "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.",
+ "resources": "Resources is a list of resources this rule applies to. '*' represents all resources.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
"nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.",
}
diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto
index 17d3741f0..fb975e2ea 100644
--- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto
@@ -96,7 +96,7 @@ message ClusterRoleList {
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
message PolicyRule {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.
repeated string verbs = 1;
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go
index f16788811..ad8391fd7 100644
--- a/vendor/k8s.io/api/rbac/v1beta1/types.go
+++ b/vendor/k8s.io/api/rbac/v1beta1/types.go
@@ -47,7 +47,7 @@ const (
// PolicyRule holds information that describes a policy rule, but does not contain information
// about who the rule applies to or which namespace the rule applies to.
type PolicyRule struct {
- // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.
+ // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.
Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"`
// APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of
diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
index 0512301f5..eef80f834 100644
--- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
@@ -80,7 +80,7 @@ func (ClusterRoleList) SwaggerDoc() map[string]string {
var map_PolicyRule = map[string]string{
"": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.",
- "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.",
+ "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs.",
"apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.",
"resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.",
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto
index 0e9a2e1da..9602c9098 100644
--- a/vendor/k8s.io/api/storage/v1/generated.proto
+++ b/vendor/k8s.io/api/storage/v1/generated.proto
@@ -154,11 +154,15 @@ message CSIDriverSpec {
// Defines if the underlying volume supports changing ownership and
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
- // This field is alpha-level, and is only honored by servers
+ // This field is beta, and is only honored by servers
// that enable the CSIVolumeFSGroupPolicy feature gate.
//
// This field is immutable.
//
+ // Defaults to ReadWriteOnceWithFSType, which will examine each volume
+ // to determine if Kubernetes should modify ownership and permissions of the volume.
+ // With the default policy the defined fsGroup will only be applied
+ // if a fstype is defined and the volume's access mode contains ReadWriteOnce.
// +optional
optional string fsGroupPolicy = 5;
@@ -178,9 +182,6 @@ message CSIDriverSpec {
// most one token is empty string. To receive a new token after expiry,
// RequiresRepublish can be used to trigger NodePublishVolume periodically.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
// +listType=atomic
repeated TokenRequest tokenRequests = 6;
@@ -193,9 +194,6 @@ message CSIDriverSpec {
// to NodePublishVolume should only update the contents of the volume. New
// mount points will not be seen by a running container.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
optional bool requiresRepublish = 7;
}
@@ -319,6 +317,7 @@ message StorageClass {
// An empty TopologySelectorTerm list means there is no topology restriction.
// This field is only honored by servers that enable the VolumeScheduling feature.
// +optional
+ // +listType=atomic
repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
}
diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go
index 6a7bf4929..d805e1539 100644
--- a/vendor/k8s.io/api/storage/v1/types.go
+++ b/vendor/k8s.io/api/storage/v1/types.go
@@ -71,6 +71,7 @@ type StorageClass struct {
// An empty TopologySelectorTerm list means there is no topology restriction.
// This field is only honored by servers that enable the VolumeScheduling feature.
// +optional
+ // +listType=atomic
AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
}
@@ -352,11 +353,15 @@ type CSIDriverSpec struct {
// Defines if the underlying volume supports changing ownership and
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
- // This field is alpha-level, and is only honored by servers
+ // This field is beta, and is only honored by servers
// that enable the CSIVolumeFSGroupPolicy feature gate.
//
// This field is immutable.
//
+ // Defaults to ReadWriteOnceWithFSType, which will examine each volume
+ // to determine if Kubernetes should modify ownership and permissions of the volume.
+ // With the default policy the defined fsGroup will only be applied
+ // if a fstype is defined and the volume's access mode contains ReadWriteOnce.
// +optional
FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"`
@@ -376,9 +381,6 @@ type CSIDriverSpec struct {
// most one token is empty string. To receive a new token after expiry,
// RequiresRepublish can be used to trigger NodePublishVolume periodically.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
// +listType=atomic
TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"`
@@ -391,9 +393,6 @@ type CSIDriverSpec struct {
// to NodePublishVolume should only update the contents of the volume. New
// mount points will not be seen by a running container.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"`
}
@@ -414,10 +413,11 @@ const (
ReadWriteOnceWithFSTypeFSGroupPolicy FSGroupPolicy = "ReadWriteOnceWithFSType"
// FileFSGroupPolicy indicates that CSI driver supports volume ownership
- // and permission change via fsGroup, and Kubernetes may use fsGroup
- // to change permissions and ownership of the volume to match user requested fsGroup in
+ // and permission change via fsGroup, and Kubernetes will change the permissions
+ // and ownership of every file in the volume to match the user requested fsGroup in
// the pod's SecurityPolicy regardless of fstype or access mode.
- // This mode should be defined if the fsGroup is expected to always change on mount
+ // Use this mode if Kubernetes should modify the permissions and ownership
+ // of the volume.
FileFSGroupPolicy FSGroupPolicy = "File"
// NoneFSGroupPolicy indicates that volumes will be mounted without performing
diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
index a9c7cc9af..d3747dc86 100644
--- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go
@@ -53,9 +53,9 @@ var map_CSIDriverSpec = map[string]string{
"podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.",
"volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.\n\nThis field is immutable.",
"storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field is immutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.",
- "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.",
- "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.",
- "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.",
+ "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is beta, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.",
+ "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.",
+ "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.",
}
func (CSIDriverSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto
index 5ed6413ec..f72ca6b23 100644
--- a/vendor/k8s.io/api/storage/v1beta1/generated.proto
+++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto
@@ -156,11 +156,15 @@ message CSIDriverSpec {
// Defines if the underlying volume supports changing ownership and
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
- // This field is alpha-level, and is only honored by servers
+ // This field is beta, and is only honored by servers
// that enable the CSIVolumeFSGroupPolicy feature gate.
//
// This field is immutable.
//
+ // Defaults to ReadWriteOnceWithFSType, which will examine each volume
+ // to determine if Kubernetes should modify ownership and permissions of the volume.
+ // With the default policy the defined fsGroup will only be applied
+ // if a fstype is defined and the volume's access mode contains ReadWriteOnce.
// +optional
optional string fsGroupPolicy = 5;
@@ -180,9 +184,6 @@ message CSIDriverSpec {
// most one token is empty string. To receive a new token after expiry,
// RequiresRepublish can be used to trigger NodePublishVolume periodically.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
// +listType=atomic
repeated TokenRequest tokenRequests = 6;
@@ -195,9 +196,6 @@ message CSIDriverSpec {
// to NodePublishVolume should only update the contents of the volume. New
// mount points will not be seen by a running container.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
optional bool requiresRepublish = 7;
}
@@ -412,6 +410,7 @@ message StorageClass {
// An empty TopologySelectorTerm list means there is no topology restriction.
// This field is only honored by servers that enable the VolumeScheduling feature.
// +optional
+ // +listType=atomic
repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8;
}
diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go
index d1ffe7a0e..9fe5646ad 100644
--- a/vendor/k8s.io/api/storage/v1beta1/types.go
+++ b/vendor/k8s.io/api/storage/v1beta1/types.go
@@ -75,6 +75,7 @@ type StorageClass struct {
// An empty TopologySelectorTerm list means there is no topology restriction.
// This field is only honored by servers that enable the VolumeScheduling feature.
// +optional
+ // +listType=atomic
AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"`
}
@@ -373,11 +374,15 @@ type CSIDriverSpec struct {
// Defines if the underlying volume supports changing ownership and
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
- // This field is alpha-level, and is only honored by servers
+ // This field is beta, and is only honored by servers
// that enable the CSIVolumeFSGroupPolicy feature gate.
//
// This field is immutable.
//
+ // Defaults to ReadWriteOnceWithFSType, which will examine each volume
+ // to determine if Kubernetes should modify ownership and permissions of the volume.
+ // With the default policy the defined fsGroup will only be applied
+ // if a fstype is defined and the volume's access mode contains ReadWriteOnce.
// +optional
FSGroupPolicy *FSGroupPolicy `json:"fsGroupPolicy,omitempty" protobuf:"bytes,5,opt,name=fsGroupPolicy"`
@@ -397,9 +402,6 @@ type CSIDriverSpec struct {
// most one token is empty string. To receive a new token after expiry,
// RequiresRepublish can be used to trigger NodePublishVolume periodically.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
// +listType=atomic
TokenRequests []TokenRequest `json:"tokenRequests,omitempty" protobuf:"bytes,6,opt,name=tokenRequests"`
@@ -412,9 +414,6 @@ type CSIDriverSpec struct {
// to NodePublishVolume should only update the contents of the volume. New
// mount points will not be seen by a running container.
//
- // This is a beta feature and only available when the
- // CSIServiceAccountToken feature is enabled.
- //
// +optional
RequiresRepublish *bool `json:"requiresRepublish,omitempty" protobuf:"varint,7,opt,name=requiresRepublish"`
}
@@ -433,9 +432,11 @@ const (
ReadWriteOnceWithFSTypeFSGroupPolicy FSGroupPolicy = "ReadWriteOnceWithFSType"
// FileFSGroupPolicy indicates that CSI driver supports volume ownership
- // and permission change via fsGroup, and Kubernetes may use fsGroup
- // to change permissions and ownership of the volume to match user requested fsGroup in
+ // and permission change via fsGroup, and Kubernetes will change the permissions
+ // and ownership of every file in the volume to match the user requested fsGroup in
// the pod's SecurityPolicy regardless of fstype or access mode.
+ // Use this mode if Kubernetes should modify the permissions and ownership
+ // of the volume.
FileFSGroupPolicy FSGroupPolicy = "File"
// None indicates that volumes will be mounted without performing
diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
index 0d03a30b4..9e1efa25b 100644
--- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go
@@ -53,9 +53,9 @@ var map_CSIDriverSpec = map[string]string{
"podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.",
"volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is immutable.",
"storageCapacity": "If set to true, storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field is immutable.\n\nThis is a beta field and only available when the CSIStorageCapacity feature is enabled. The default is false.",
- "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is alpha-level, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.",
- "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.",
- "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.\n\nThis is a beta feature and only available when the CSIServiceAccountToken feature is enabled.",
+ "fsGroupPolicy": "Defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details. This field is beta, and is only honored by servers that enable the CSIVolumeFSGroupPolicy feature gate.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.",
+ "tokenRequests": "TokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\": {\n \"token\": ,\n \"expirationTimestamp\": ,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.",
+ "requiresRepublish": "RequiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.",
}
func (CSIDriverSpec) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go
index c056dd91f..9bcbe5026 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/conversion.go
@@ -17,6 +17,8 @@ limitations under the License.
package v1
import (
+ "bytes"
+
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/conversion"
@@ -36,20 +38,29 @@ func Convert_apiextensions_JSONSchemaProps_To_v1_JSONSchemaProps(in *apiextensio
return nil
}
+var nullLiteral = []byte(`null`)
+
func Convert_apiextensions_JSON_To_v1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error {
raw, err := json.Marshal(*in)
if err != nil {
return err
}
- out.Raw = raw
+ if len(raw) == 0 || bytes.Equal(raw, nullLiteral) {
+ // match JSON#UnmarshalJSON treatment of literal nulls
+ out.Raw = nil
+ } else {
+ out.Raw = raw
+ }
return nil
}
func Convert_v1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error {
if in != nil {
var i interface{}
- if err := json.Unmarshal(in.Raw, &i); err != nil {
- return err
+ if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) {
+ if err := json.Unmarshal(in.Raw, &i); err != nil {
+ return err
+ }
}
*out = i
} else {
@@ -103,7 +114,7 @@ func Convert_apiextensions_CustomResourceDefinitionSpec_To_v1_CustomResourceDefi
func Convert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in *CustomResourceDefinitionSpec, out *apiextensions.CustomResourceDefinitionSpec, s conversion.Scope) error {
if err := autoConvert_v1_CustomResourceDefinitionSpec_To_apiextensions_CustomResourceDefinitionSpec(in, out, s); err != nil {
- return nil
+ return err
}
if len(out.Versions) == 0 {
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
index a82824ae7..9bdfa2030 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/generated.proto
@@ -120,6 +120,9 @@ message CustomResourceConversion {
// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format
// <.spec.name>.<.spec.group>.
message CustomResourceDefinition {
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec describes how the user wants the resources to appear
@@ -154,6 +157,9 @@ message CustomResourceDefinitionCondition {
// CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
message CustomResourceDefinitionList {
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// items list individual CustomResourceDefinition objects
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
index ba7f286eb..12cc2f6f2 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/marshal.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1
import (
+ "bytes"
"errors"
"k8s.io/apimachinery/pkg/util/json"
@@ -128,7 +129,7 @@ func (s JSON) MarshalJSON() ([]byte, error) {
}
func (s *JSON) UnmarshalJSON(data []byte) error {
- if len(data) > 0 && string(data) != "null" {
+ if len(data) > 0 && !bytes.Equal(data, nullLiteral) {
s.Raw = data
}
return nil
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
index a55dd5b1b..223601bc4 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/types.go
@@ -359,7 +359,10 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.
// CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format
// <.spec.name>.<.spec.group>.
type CustomResourceDefinition struct {
- metav1.TypeMeta `json:",inline"`
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec describes how the user wants the resources to appear
@@ -374,6 +377,10 @@ type CustomResourceDefinition struct {
// CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
type CustomResourceDefinitionList struct {
metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items list individual CustomResourceDefinition objects
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go
index e014ce62f..eed3fde63 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/conversion.go
@@ -17,6 +17,8 @@ limitations under the License.
package v1beta1
import (
+ "bytes"
+
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/json"
@@ -36,20 +38,29 @@ func Convert_apiextensions_JSONSchemaProps_To_v1beta1_JSONSchemaProps(in *apiext
return nil
}
+var nullLiteral = []byte(`null`)
+
func Convert_apiextensions_JSON_To_v1beta1_JSON(in *apiextensions.JSON, out *JSON, s conversion.Scope) error {
raw, err := json.Marshal(*in)
if err != nil {
return err
}
- out.Raw = raw
+ if len(raw) == 0 || bytes.Equal(raw, nullLiteral) {
+ // match JSON#UnmarshalJSON treatment of literal nulls
+ out.Raw = nil
+ } else {
+ out.Raw = raw
+ }
return nil
}
func Convert_v1beta1_JSON_To_apiextensions_JSON(in *JSON, out *apiextensions.JSON, s conversion.Scope) error {
if in != nil {
var i interface{}
- if err := json.Unmarshal(in.Raw, &i); err != nil {
- return err
+ if len(in.Raw) > 0 && !bytes.Equal(in.Raw, nullLiteral) {
+ if err := json.Unmarshal(in.Raw, &i); err != nil {
+ return err
+ }
}
*out = i
} else {
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto
index e738f423c..f5072b35c 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto
@@ -132,6 +132,9 @@ message CustomResourceConversion {
// <.spec.name>.<.spec.group>.
// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead.
message CustomResourceDefinition {
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// spec describes how the user wants the resources to appear
@@ -166,6 +169,9 @@ message CustomResourceDefinitionCondition {
// CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
message CustomResourceDefinitionList {
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// items list individual CustomResourceDefinition objects
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
index 9a8fad3b7..44941d82e 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/marshal.go
@@ -17,6 +17,7 @@ limitations under the License.
package v1beta1
import (
+ "bytes"
"errors"
"k8s.io/apimachinery/pkg/util/json"
@@ -128,7 +129,7 @@ func (s JSON) MarshalJSON() ([]byte, error) {
}
func (s *JSON) UnmarshalJSON(data []byte) error {
- if len(data) > 0 && string(data) != "null" {
+ if len(data) > 0 && !bytes.Equal(data, nullLiteral) {
s.Raw = data
}
return nil
diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go
index 671869b9f..61ebdb63f 100644
--- a/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go
+++ b/vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/types.go
@@ -396,7 +396,10 @@ const CustomResourceCleanupFinalizer = "customresourcecleanup.apiextensions.k8s.
// <.spec.name>.<.spec.group>.
// Deprecated in v1.16, planned for removal in v1.22. Use apiextensions.k8s.io/v1 CustomResourceDefinition instead.
type CustomResourceDefinition struct {
- metav1.TypeMeta `json:",inline"`
+ metav1.TypeMeta `json:",inline"`
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// spec describes how the user wants the resources to appear
@@ -415,6 +418,10 @@ type CustomResourceDefinition struct {
// CustomResourceDefinitionList is a list of CustomResourceDefinition objects.
type CustomResourceDefinitionList struct {
metav1.TypeMeta `json:",inline"`
+
+ // Standard object's metadata
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ // +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// items list individual CustomResourceDefinition objects
diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
index d3927d817..31eddfc1e 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go
@@ -274,7 +274,7 @@ func NewBadRequest(reason string) *StatusError {
// NewTooManyRequests creates an error that indicates that the client must try again later because
// the specified endpoint is not accepting requests. More specific details should be provided
-// if client should know why the failure was limited4.
+// if client should know why the failure was limited.
func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError {
return &StatusError{metav1.Status{
Status: metav1.StatusFailure,
diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
index 8d718945d..2395656cc 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go
@@ -696,6 +696,15 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
return nil
}
+// NewDecimalQuantity returns a new Quantity representing the given
+// value in the given format.
+func NewDecimalQuantity(b inf.Dec, format Format) *Quantity {
+ return &Quantity{
+ d: infDecAmount{&b},
+ Format: format,
+ }
+}
+
// NewQuantity returns a new Quantity representing the given
// value in the given format.
func NewQuantity(value int64, format Format) *Quantity {
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
index 947c96f43..e0b5b1490 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go
@@ -68,9 +68,11 @@ var ValidateNamespaceName = NameIsDNSLabel
var ValidateServiceAccountName = NameIsDNSSubdomain
// maskTrailingDash replaces the final character of a string with a subdomain safe
-// value if is a dash.
+// value if it is a dash and if the length of this string is greater than 1. Note that
+// this is used when a value could be appended to the string, see ValidateNameFunc
+// for more info.
func maskTrailingDash(name string) string {
- if strings.HasSuffix(name, "-") {
+ if len(name) > 1 && strings.HasSuffix(name, "-") {
return name[:len(name)-2] + "a"
}
return name
diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
index 3e234eb11..66e8d677a 100644
--- a/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
+++ b/vendor/k8s.io/apimachinery/pkg/api/validation/objectmeta.go
@@ -33,7 +33,7 @@ import (
// FieldImmutableErrorMsg is a error message for field is immutable.
const FieldImmutableErrorMsg string = `field is immutable`
-const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB
+const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB
// BannedOwners is a black list of object that are not allowed to be owners.
var BannedOwners = map[schema.GroupVersionKind]struct{}{
@@ -46,19 +46,28 @@ var ValidateClusterName = NameIsDNS1035Label
// ValidateAnnotations validates that a set of annotations are correctly defined.
func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
- var totalSize int64
- for k, v := range annotations {
+ for k := range annotations {
for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
}
- totalSize += (int64)(len(k)) + (int64)(len(v))
}
- if totalSize > (int64)(totalAnnotationSizeLimitB) {
- allErrs = append(allErrs, field.TooLong(fldPath, "", totalAnnotationSizeLimitB))
+ if err := ValidateAnnotationsSize(annotations); err != nil {
+ allErrs = append(allErrs, field.TooLong(fldPath, "", TotalAnnotationSizeLimitB))
}
return allErrs
}
+func ValidateAnnotationsSize(annotations map[string]string) error {
+ var totalSize int64
+ for k, v := range annotations {
+ totalSize += (int64)(len(k)) + (int64)(len(v))
+ }
+ if totalSize > (int64)(TotalAnnotationSizeLimitB) {
+ return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, TotalAnnotationSizeLimitB)
+ }
+ return nil
+}
+
func validateOwnerReference(ownerReference metav1.OwnerReference, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
gvk := schema.FromAPIVersionAndKind(ownerReference.APIVersion, ownerReference.Kind)
@@ -134,7 +143,7 @@ func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) fie
func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
metadata, err := meta.Accessor(objMeta)
if err != nil {
- allErrs := field.ErrorList{}
+ var allErrs field.ErrorList
allErrs = append(allErrs, field.Invalid(fldPath, objMeta, err.Error()))
return allErrs
}
@@ -145,7 +154,7 @@ func ValidateObjectMeta(objMeta *metav1.ObjectMeta, requiresNamespace bool, name
// been performed.
// It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before.
func ValidateObjectMetaAccessor(meta metav1.Object, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList {
- allErrs := field.ErrorList{}
+ var allErrs field.ErrorList
if len(meta.GetGenerateName()) != 0 {
for _, msg := range nameFn(meta.GetGenerateName(), true) {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
index 55945bada..326f68812 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go
@@ -1326,183 +1326,184 @@ func init() {
}
var fileDescriptor_cf52fa777ced5367 = []byte{
- // 2813 bytes of a gzipped FileDescriptorProto
+ // 2829 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x3a, 0xcb, 0x6f, 0x24, 0x47,
0xf9, 0xee, 0x19, 0x8f, 0x3d, 0xf3, 0x8d, 0xc7, 0x8f, 0x5a, 0xef, 0xef, 0x37, 0x6b, 0x84, 0xc7,
0xe9, 0xa0, 0x68, 0x03, 0xc9, 0x38, 0x5e, 0x42, 0xb4, 0xd9, 0x90, 0x80, 0xc7, 0xb3, 0xde, 0x98,
0xac, 0x63, 0xab, 0xbc, 0xbb, 0x40, 0x88, 0x50, 0xda, 0xdd, 0xe5, 0x71, 0xe3, 0x9e, 0xee, 0x49,
- 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x44, 0xe1, 0x86, 0x38, 0xa0, 0x44, 0xf0,
- 0x17, 0x70, 0x81, 0x3f, 0x00, 0x89, 0x1c, 0x23, 0x71, 0x89, 0x04, 0x1a, 0x25, 0xe6, 0xc0, 0x11,
- 0x71, 0xf5, 0x05, 0x54, 0x8f, 0xee, 0xae, 0x9e, 0xc7, 0xba, 0x27, 0xbb, 0x44, 0xdc, 0xa6, 0xbf,
- 0x77, 0x55, 0x7d, 0xf5, 0xbd, 0x6a, 0x60, 0xf7, 0xe4, 0x3a, 0xab, 0xbb, 0xc1, 0xfa, 0x49, 0xf7,
- 0x90, 0x50, 0x9f, 0x84, 0x84, 0xad, 0x9f, 0x12, 0xdf, 0x09, 0xe8, 0xba, 0x42, 0x58, 0x1d, 0xb7,
- 0x6d, 0xd9, 0xc7, 0xae, 0x4f, 0x68, 0x6f, 0xbd, 0x73, 0xd2, 0xe2, 0x00, 0xb6, 0xde, 0x26, 0xa1,
- 0xb5, 0x7e, 0xba, 0xb1, 0xde, 0x22, 0x3e, 0xa1, 0x56, 0x48, 0x9c, 0x7a, 0x87, 0x06, 0x61, 0x80,
- 0xbe, 0x20, 0xb9, 0xea, 0x3a, 0x57, 0xbd, 0x73, 0xd2, 0xe2, 0x00, 0x56, 0xe7, 0x5c, 0xf5, 0xd3,
- 0x8d, 0x95, 0xa7, 0x5b, 0x6e, 0x78, 0xdc, 0x3d, 0xac, 0xdb, 0x41, 0x7b, 0xbd, 0x15, 0xb4, 0x82,
- 0x75, 0xc1, 0x7c, 0xd8, 0x3d, 0x12, 0x5f, 0xe2, 0x43, 0xfc, 0x92, 0x42, 0x57, 0xc6, 0x9a, 0x42,
- 0xbb, 0x7e, 0xe8, 0xb6, 0xc9, 0xa0, 0x15, 0x2b, 0xcf, 0x5d, 0xc4, 0xc0, 0xec, 0x63, 0xd2, 0xb6,
- 0x06, 0xf9, 0xcc, 0x3f, 0xe7, 0xa1, 0xb8, 0xb9, 0xbf, 0x73, 0x8b, 0x06, 0xdd, 0x0e, 0x5a, 0x83,
- 0x69, 0xdf, 0x6a, 0x93, 0xaa, 0xb1, 0x66, 0x5c, 0x2d, 0x35, 0xe6, 0x3e, 0xe8, 0xd7, 0xa6, 0xce,
- 0xfa, 0xb5, 0xe9, 0x57, 0xad, 0x36, 0xc1, 0x02, 0x83, 0x3c, 0x28, 0x9e, 0x12, 0xca, 0xdc, 0xc0,
- 0x67, 0xd5, 0xdc, 0x5a, 0xfe, 0x6a, 0xf9, 0xda, 0x4b, 0xf5, 0x2c, 0xeb, 0xaf, 0x0b, 0x05, 0xf7,
- 0x24, 0xeb, 0x76, 0x40, 0x9b, 0x2e, 0xb3, 0x83, 0x53, 0x42, 0x7b, 0x8d, 0x45, 0xa5, 0xa5, 0xa8,
- 0x90, 0x0c, 0xc7, 0x1a, 0xd0, 0x8f, 0x0c, 0x58, 0xec, 0x50, 0x72, 0x44, 0x28, 0x25, 0x8e, 0xc2,
- 0x57, 0xf3, 0x6b, 0xc6, 0x23, 0x50, 0x5b, 0x55, 0x6a, 0x17, 0xf7, 0x07, 0xe4, 0xe3, 0x21, 0x8d,
- 0xe8, 0xb7, 0x06, 0xac, 0x30, 0x42, 0x4f, 0x09, 0xdd, 0x74, 0x1c, 0x4a, 0x18, 0x6b, 0xf4, 0xb6,
- 0x3c, 0x97, 0xf8, 0xe1, 0xd6, 0x4e, 0x13, 0xb3, 0xea, 0xb4, 0xd8, 0x87, 0xaf, 0x65, 0x33, 0xe8,
- 0x60, 0x9c, 0x9c, 0x86, 0xa9, 0x2c, 0x5a, 0x19, 0x4b, 0xc2, 0xf0, 0x03, 0xcc, 0x30, 0x8f, 0x60,
- 0x2e, 0x3a, 0xc8, 0xdb, 0x2e, 0x0b, 0xd1, 0x3d, 0x98, 0x69, 0xf1, 0x0f, 0x56, 0x35, 0x84, 0x81,
- 0xf5, 0x6c, 0x06, 0x46, 0x32, 0x1a, 0xf3, 0xca, 0x9e, 0x19, 0xf1, 0xc9, 0xb0, 0x92, 0x66, 0xfe,
- 0x6c, 0x1a, 0xca, 0x9b, 0xfb, 0x3b, 0x98, 0xb0, 0xa0, 0x4b, 0x6d, 0x92, 0xc1, 0x69, 0xae, 0xc3,
- 0x1c, 0x73, 0xfd, 0x56, 0xd7, 0xb3, 0x28, 0x87, 0x56, 0x67, 0x04, 0xe5, 0xb2, 0xa2, 0x9c, 0x3b,
- 0xd0, 0x70, 0x38, 0x45, 0x89, 0xae, 0x01, 0x70, 0x09, 0xac, 0x63, 0xd9, 0xc4, 0xa9, 0xe6, 0xd6,
- 0x8c, 0xab, 0xc5, 0x06, 0x52, 0x7c, 0xf0, 0x6a, 0x8c, 0xc1, 0x1a, 0x15, 0x7a, 0x1c, 0x0a, 0xc2,
- 0xd2, 0x6a, 0x51, 0xa8, 0xa9, 0x28, 0xf2, 0x82, 0x58, 0x06, 0x96, 0x38, 0xf4, 0x24, 0xcc, 0x2a,
- 0x2f, 0xab, 0x96, 0x04, 0xd9, 0x82, 0x22, 0x9b, 0x8d, 0xdc, 0x20, 0xc2, 0xf3, 0xf5, 0x9d, 0xb8,
- 0xbe, 0x23, 0xfc, 0x4e, 0x5b, 0xdf, 0x2b, 0xae, 0xef, 0x60, 0x81, 0x41, 0xb7, 0xa1, 0x70, 0x4a,
- 0xe8, 0x21, 0xf7, 0x04, 0xee, 0x9a, 0x5f, 0xca, 0xb6, 0xd1, 0xf7, 0x38, 0x4b, 0xa3, 0xc4, 0x4d,
- 0x13, 0x3f, 0xb1, 0x14, 0x82, 0xea, 0x00, 0xec, 0x38, 0xa0, 0xa1, 0x58, 0x5e, 0xb5, 0xb0, 0x96,
- 0xbf, 0x5a, 0x6a, 0xcc, 0xf3, 0xf5, 0x1e, 0xc4, 0x50, 0xac, 0x51, 0x70, 0x7a, 0xdb, 0x0a, 0x49,
- 0x2b, 0xa0, 0x2e, 0x61, 0xd5, 0xd9, 0x84, 0x7e, 0x2b, 0x86, 0x62, 0x8d, 0x02, 0x7d, 0x03, 0x10,
- 0x0b, 0x03, 0x6a, 0xb5, 0x88, 0x5a, 0xea, 0xcb, 0x16, 0x3b, 0xae, 0x82, 0x58, 0xdd, 0x8a, 0x5a,
- 0x1d, 0x3a, 0x18, 0xa2, 0xc0, 0x23, 0xb8, 0xcc, 0xdf, 0x1b, 0xb0, 0xa0, 0xf9, 0x82, 0xf0, 0xbb,
- 0xeb, 0x30, 0xd7, 0xd2, 0x6e, 0x9d, 0xf2, 0x8b, 0xf8, 0xb4, 0xf5, 0x1b, 0x89, 0x53, 0x94, 0x88,
- 0x40, 0x89, 0x2a, 0x49, 0x51, 0x74, 0xd9, 0xc8, 0xec, 0xb4, 0x91, 0x0d, 0x89, 0x26, 0x0d, 0xc8,
- 0x70, 0x22, 0xd9, 0xfc, 0x87, 0x21, 0x1c, 0x38, 0x8a, 0x37, 0xe8, 0xaa, 0x16, 0xd3, 0x0c, 0xb1,
- 0x7d, 0x73, 0x63, 0xe2, 0xd1, 0x05, 0x81, 0x20, 0xf7, 0x3f, 0x11, 0x08, 0x6e, 0x14, 0x7f, 0xf5,
- 0x5e, 0x6d, 0xea, 0xed, 0xbf, 0xad, 0x4d, 0x99, 0xbf, 0x34, 0x60, 0x6e, 0xb3, 0xd3, 0xf1, 0x7a,
- 0x7b, 0x9d, 0x50, 0x2c, 0xc0, 0x84, 0x19, 0x87, 0xf6, 0x70, 0xd7, 0x57, 0x0b, 0x05, 0x7e, 0xbf,
- 0x9b, 0x02, 0x82, 0x15, 0x86, 0xdf, 0x9f, 0xa3, 0x80, 0xda, 0x44, 0x5d, 0xb7, 0xf8, 0xfe, 0x6c,
- 0x73, 0x20, 0x96, 0x38, 0x7e, 0xc8, 0x47, 0x2e, 0xf1, 0x9c, 0x5d, 0xcb, 0xb7, 0x5a, 0x84, 0xaa,
- 0xcb, 0x11, 0x6f, 0xfd, 0xb6, 0x86, 0xc3, 0x29, 0x4a, 0xf3, 0xdf, 0x39, 0x28, 0x6d, 0x05, 0xbe,
- 0xe3, 0x86, 0xea, 0x72, 0x85, 0xbd, 0xce, 0x50, 0xf0, 0xb8, 0xd3, 0xeb, 0x10, 0x2c, 0x30, 0xe8,
- 0x79, 0x98, 0x61, 0xa1, 0x15, 0x76, 0x99, 0xb0, 0xa7, 0xd4, 0x78, 0x2c, 0x0a, 0x4b, 0x07, 0x02,
- 0x7a, 0xde, 0xaf, 0x2d, 0xc4, 0xe2, 0x24, 0x08, 0x2b, 0x06, 0xee, 0xe9, 0xc1, 0xa1, 0xd8, 0x28,
- 0xe7, 0x96, 0x4c, 0x7b, 0x51, 0xfe, 0xc8, 0x27, 0x9e, 0xbe, 0x37, 0x44, 0x81, 0x47, 0x70, 0xa1,
- 0x53, 0x40, 0x9e, 0xc5, 0xc2, 0x3b, 0xd4, 0xf2, 0x99, 0xd0, 0x75, 0xc7, 0x6d, 0x13, 0x75, 0xe1,
- 0xbf, 0x98, 0xed, 0xc4, 0x39, 0x47, 0xa2, 0xf7, 0xf6, 0x90, 0x34, 0x3c, 0x42, 0x03, 0x7a, 0x02,
- 0x66, 0x28, 0xb1, 0x58, 0xe0, 0x57, 0x0b, 0x62, 0xf9, 0x71, 0x54, 0xc6, 0x02, 0x8a, 0x15, 0x96,
- 0x07, 0xb4, 0x36, 0x61, 0xcc, 0x6a, 0x45, 0xe1, 0x35, 0x0e, 0x68, 0xbb, 0x12, 0x8c, 0x23, 0xbc,
- 0xd9, 0x86, 0xca, 0x16, 0x25, 0x56, 0x48, 0x26, 0xf1, 0x8a, 0x4f, 0x7f, 0xe0, 0x3f, 0xc9, 0x43,
- 0xa5, 0x49, 0x3c, 0x92, 0xe8, 0xdb, 0x06, 0xd4, 0xa2, 0x96, 0x4d, 0xf6, 0x09, 0x75, 0x03, 0xe7,
- 0x80, 0xd8, 0x81, 0xef, 0x30, 0xe1, 0x02, 0xf9, 0xc6, 0xff, 0xf1, 0xbd, 0xb9, 0x35, 0x84, 0xc5,
- 0x23, 0x38, 0x90, 0x07, 0x95, 0x0e, 0x15, 0xbf, 0xc5, 0x7e, 0x49, 0x0f, 0x29, 0x5f, 0xfb, 0x72,
- 0xb6, 0xe3, 0xd8, 0xd7, 0x59, 0x1b, 0x4b, 0x67, 0xfd, 0x5a, 0x25, 0x05, 0xc2, 0x69, 0xe1, 0xe8,
- 0xeb, 0xb0, 0x18, 0xd0, 0xce, 0xb1, 0xe5, 0x37, 0x49, 0x87, 0xf8, 0x0e, 0xf1, 0x43, 0x26, 0x76,
- 0xa1, 0xd8, 0x58, 0xe6, 0x75, 0xc4, 0xde, 0x00, 0x0e, 0x0f, 0x51, 0xa3, 0xd7, 0x60, 0xa9, 0x43,
- 0x83, 0x8e, 0xd5, 0x12, 0x2e, 0xb5, 0x1f, 0x78, 0xae, 0xdd, 0x13, 0x2e, 0x54, 0x6a, 0x3c, 0x75,
- 0xd6, 0xaf, 0x2d, 0xed, 0x0f, 0x22, 0xcf, 0xfb, 0xb5, 0x4b, 0x62, 0xeb, 0x38, 0x24, 0x41, 0xe2,
- 0x61, 0x31, 0xda, 0x19, 0x16, 0xc6, 0x9d, 0xa1, 0xb9, 0x03, 0xc5, 0x66, 0x57, 0xf9, 0xf3, 0x8b,
- 0x50, 0x74, 0xd4, 0x6f, 0xb5, 0xf3, 0xd1, 0xc5, 0x8a, 0x69, 0xce, 0xfb, 0xb5, 0x0a, 0x2f, 0x1d,
- 0xeb, 0x11, 0x00, 0xc7, 0x2c, 0xe6, 0x13, 0x50, 0x14, 0x47, 0xce, 0xee, 0x6d, 0xa0, 0x45, 0xc8,
- 0x63, 0xeb, 0xbe, 0x90, 0x32, 0x87, 0xf9, 0x4f, 0x2d, 0x02, 0xed, 0x01, 0xdc, 0x22, 0x61, 0x74,
- 0xf0, 0x9b, 0xb0, 0x10, 0x85, 0xe1, 0x74, 0x76, 0xf8, 0x7f, 0xa5, 0x7b, 0x01, 0xa7, 0xd1, 0x78,
- 0x90, 0xde, 0x7c, 0x1d, 0x4a, 0x22, 0x83, 0xf0, 0xf4, 0x9b, 0xa4, 0x7a, 0xe3, 0x01, 0xa9, 0x3e,
- 0xca, 0xdf, 0xb9, 0x71, 0xf9, 0x5b, 0x33, 0xd7, 0x83, 0x8a, 0xe4, 0x8d, 0x8a, 0x9b, 0x4c, 0x1a,
- 0x9e, 0x82, 0x62, 0x64, 0xa6, 0xd2, 0x12, 0x17, 0xb5, 0x91, 0x20, 0x1c, 0x53, 0x68, 0xda, 0x8e,
- 0x21, 0x95, 0x0d, 0xb3, 0x29, 0xd3, 0x2a, 0x97, 0xdc, 0x83, 0x2b, 0x17, 0x4d, 0xd3, 0x0f, 0xa1,
- 0x3a, 0xae, 0x12, 0x7e, 0x88, 0x7c, 0x9d, 0xdd, 0x14, 0xf3, 0x1d, 0x03, 0x16, 0x75, 0x49, 0xd9,
- 0x8f, 0x2f, 0xbb, 0x92, 0x8b, 0x2b, 0x35, 0x6d, 0x47, 0x7e, 0x63, 0xc0, 0x72, 0x6a, 0x69, 0x13,
- 0x9d, 0xf8, 0x04, 0x46, 0xe9, 0xce, 0x91, 0x9f, 0xc0, 0x39, 0xfe, 0x92, 0x83, 0xca, 0x6d, 0xeb,
- 0x90, 0x78, 0x07, 0xc4, 0x23, 0x76, 0x18, 0x50, 0xf4, 0x03, 0x28, 0xb7, 0xad, 0xd0, 0x3e, 0x16,
- 0xd0, 0xa8, 0xaa, 0x6f, 0x66, 0x0b, 0x76, 0x29, 0x49, 0xf5, 0xdd, 0x44, 0xcc, 0x4d, 0x3f, 0xa4,
- 0xbd, 0xc6, 0x25, 0x65, 0x52, 0x59, 0xc3, 0x60, 0x5d, 0x9b, 0x68, 0xc5, 0xc4, 0xf7, 0xcd, 0xb7,
- 0x3a, 0xbc, 0xe4, 0x98, 0xbc, 0x03, 0x4c, 0x99, 0x80, 0xc9, 0x9b, 0x5d, 0x97, 0x92, 0x36, 0xf1,
- 0xc3, 0xa4, 0x15, 0xdb, 0x1d, 0x90, 0x8f, 0x87, 0x34, 0xae, 0xbc, 0x04, 0x8b, 0x83, 0xc6, 0xf3,
- 0xf8, 0x73, 0x42, 0x7a, 0xf2, 0xbc, 0x30, 0xff, 0x89, 0x96, 0xa1, 0x70, 0x6a, 0x79, 0x5d, 0x75,
- 0x1b, 0xb1, 0xfc, 0xb8, 0x91, 0xbb, 0x6e, 0x98, 0xbf, 0x33, 0xa0, 0x3a, 0xce, 0x10, 0xf4, 0x79,
- 0x4d, 0x50, 0xa3, 0xac, 0xac, 0xca, 0xbf, 0x42, 0x7a, 0x52, 0xea, 0x4d, 0x28, 0x06, 0x1d, 0x5e,
- 0x0f, 0x04, 0x54, 0x9d, 0xfa, 0x93, 0xd1, 0x49, 0xee, 0x29, 0xf8, 0x79, 0xbf, 0x76, 0x39, 0x25,
- 0x3e, 0x42, 0xe0, 0x98, 0x95, 0x47, 0x6a, 0x61, 0x0f, 0xcf, 0x1e, 0x71, 0xa4, 0xbe, 0x27, 0x20,
- 0x58, 0x61, 0xcc, 0x3f, 0x1a, 0x30, 0x2d, 0x8a, 0xe9, 0xd7, 0xa1, 0xc8, 0xf7, 0xcf, 0xb1, 0x42,
- 0x4b, 0xd8, 0x95, 0xb9, 0x8d, 0xe3, 0xdc, 0xbb, 0x24, 0xb4, 0x12, 0x6f, 0x8b, 0x20, 0x38, 0x96,
- 0x88, 0x30, 0x14, 0xdc, 0x90, 0xb4, 0xa3, 0x83, 0x7c, 0x7a, 0xac, 0x68, 0x35, 0x44, 0xa8, 0x63,
- 0xeb, 0xfe, 0xcd, 0xb7, 0x42, 0xe2, 0xf3, 0xc3, 0x48, 0xae, 0xc6, 0x0e, 0x97, 0x81, 0xa5, 0x28,
- 0xf3, 0x5f, 0x06, 0xc4, 0xaa, 0xb8, 0xf3, 0x33, 0xe2, 0x1d, 0xdd, 0x76, 0xfd, 0x13, 0xb5, 0xad,
- 0xb1, 0x39, 0x07, 0x0a, 0x8e, 0x63, 0x8a, 0x51, 0xe9, 0x21, 0x37, 0x59, 0x7a, 0xe0, 0x0a, 0xed,
- 0xc0, 0x0f, 0x5d, 0xbf, 0x3b, 0x74, 0xdb, 0xb6, 0x14, 0x1c, 0xc7, 0x14, 0xbc, 0x10, 0xa1, 0xa4,
- 0x6d, 0xb9, 0xbe, 0xeb, 0xb7, 0xf8, 0x22, 0xb6, 0x82, 0xae, 0x1f, 0x8a, 0x8c, 0xac, 0x0a, 0x11,
- 0x3c, 0x84, 0xc5, 0x23, 0x38, 0xcc, 0x3f, 0x4c, 0x43, 0x99, 0xaf, 0x39, 0xca, 0x73, 0x2f, 0x40,
- 0xc5, 0xd3, 0xbd, 0x40, 0xad, 0xfd, 0xb2, 0x32, 0x25, 0x7d, 0xaf, 0x71, 0x9a, 0x96, 0x33, 0x8b,
- 0xfa, 0x29, 0x66, 0xce, 0xa5, 0x99, 0xb7, 0x75, 0x24, 0x4e, 0xd3, 0xf2, 0xe8, 0x75, 0x9f, 0xdf,
- 0x0f, 0x55, 0x99, 0xc4, 0x47, 0xf4, 0x4d, 0x0e, 0xc4, 0x12, 0x87, 0x76, 0xe1, 0x92, 0xe5, 0x79,
- 0xc1, 0x7d, 0x01, 0x6c, 0x04, 0xc1, 0x49, 0xdb, 0xa2, 0x27, 0x4c, 0x34, 0xc2, 0xc5, 0xc6, 0xe7,
- 0x14, 0xcb, 0xa5, 0xcd, 0x61, 0x12, 0x3c, 0x8a, 0x6f, 0xd4, 0xb1, 0x4d, 0x4f, 0x78, 0x6c, 0xc7,
- 0xb0, 0x3c, 0x00, 0x12, 0xb7, 0x5c, 0x75, 0xa5, 0xcf, 0x2a, 0x39, 0xcb, 0x78, 0x04, 0xcd, 0xf9,
- 0x18, 0x38, 0x1e, 0x29, 0x11, 0xdd, 0x80, 0x79, 0xee, 0xc9, 0x41, 0x37, 0x8c, 0xea, 0xce, 0x82,
- 0x38, 0x6e, 0x74, 0xd6, 0xaf, 0xcd, 0xdf, 0x49, 0x61, 0xf0, 0x00, 0x25, 0xdf, 0x5c, 0xcf, 0x6d,
- 0xbb, 0x61, 0x75, 0x56, 0xb0, 0xc4, 0x9b, 0x7b, 0x9b, 0x03, 0xb1, 0xc4, 0xa5, 0x3c, 0xb0, 0x78,
- 0x91, 0x07, 0x9a, 0xbf, 0xce, 0x03, 0x92, 0x85, 0xb2, 0x23, 0xeb, 0x29, 0x19, 0xd2, 0x78, 0x35,
- 0xaf, 0x0a, 0x6d, 0x63, 0xa0, 0x9a, 0x57, 0x35, 0x76, 0x84, 0x47, 0xbb, 0x50, 0x92, 0xa1, 0x25,
- 0xb9, 0x2e, 0xeb, 0x8a, 0xb8, 0xb4, 0x17, 0x21, 0xce, 0xfb, 0xb5, 0x95, 0x94, 0x9a, 0x18, 0x23,
- 0x3a, 0xad, 0x44, 0x02, 0xba, 0x06, 0x60, 0x75, 0x5c, 0x7d, 0xd6, 0x56, 0x4a, 0x26, 0x2e, 0x49,
- 0xd7, 0x8c, 0x35, 0x2a, 0xf4, 0x32, 0x4c, 0x87, 0x9f, 0xae, 0x1b, 0x2a, 0x8a, 0x66, 0x8f, 0xf7,
- 0x3e, 0x42, 0x02, 0xd7, 0x2e, 0xfc, 0x99, 0x71, 0xb3, 0x54, 0x23, 0x13, 0x6b, 0xdf, 0x8e, 0x31,
- 0x58, 0xa3, 0x42, 0xdf, 0x82, 0xe2, 0x91, 0x2a, 0x45, 0xc5, 0xc1, 0x64, 0x0e, 0x91, 0x51, 0x01,
- 0x2b, 0xdb, 0xfd, 0xe8, 0x0b, 0xc7, 0xd2, 0xcc, 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10, 0x8d,
- 0xd8, 0x93, 0x30, 0xcb, 0x52, 0x9d, 0x4a, 0x7c, 0x24, 0x91, 0xbb, 0x44, 0x78, 0xee, 0x27, 0xbe,
- 0xe5, 0x07, 0xb2, 0x1f, 0x29, 0x24, 0x7e, 0xf2, 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc, 0x33,
- 0xfd, 0x4f, 0xdf, 0xaf, 0x4d, 0xbd, 0xfb, 0x7e, 0x6d, 0xea, 0xbd, 0xf7, 0x55, 0xd6, 0x3f, 0x07,
- 0x80, 0xbd, 0xc3, 0xef, 0x11, 0x5b, 0xc6, 0xcf, 0x4c, 0xb3, 0xb5, 0x68, 0xa4, 0x2b, 0x66, 0x6b,
- 0xb9, 0x81, 0xea, 0x4d, 0xc3, 0xe1, 0x14, 0x25, 0x5a, 0x87, 0x52, 0x3c, 0x35, 0x53, 0x07, 0xbd,
- 0x14, 0x39, 0x4e, 0x3c, 0x5a, 0xc3, 0x09, 0x4d, 0x2a, 0x98, 0x4f, 0x5f, 0x18, 0xcc, 0x1b, 0x90,
- 0xef, 0xba, 0x8e, 0xea, 0x5a, 0x9f, 0x89, 0x92, 0xe9, 0xdd, 0x9d, 0xe6, 0x79, 0xbf, 0xf6, 0xd8,
- 0xb8, 0x61, 0x35, 0xef, 0xf8, 0x59, 0xfd, 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x91, 0x65, 0x66,
- 0xc2, 0xc8, 0x72, 0x0d, 0xa0, 0x95, 0xf4, 0xfe, 0xf2, 0xe2, 0xc6, 0x1e, 0xa5, 0xf5, 0xfc, 0x1a,
- 0x15, 0x62, 0xb0, 0x64, 0xf3, 0x06, 0x59, 0xf5, 0xe0, 0x2c, 0xb4, 0xda, 0x72, 0x9a, 0x38, 0x99,
- 0x73, 0x5f, 0x51, 0x6a, 0x96, 0xb6, 0x06, 0x85, 0xe1, 0x61, 0xf9, 0x28, 0x80, 0x25, 0x47, 0xb5,
- 0x7a, 0x89, 0xd2, 0xd2, 0xc4, 0x4a, 0x2f, 0x73, 0x85, 0xcd, 0x41, 0x41, 0x78, 0x58, 0x36, 0xfa,
- 0x2e, 0xac, 0x44, 0xc0, 0xe1, 0x7e, 0x5b, 0x44, 0xde, 0x7c, 0x63, 0xf5, 0xac, 0x5f, 0x5b, 0x69,
- 0x8e, 0xa5, 0xc2, 0x0f, 0x90, 0x80, 0x1c, 0x98, 0xf1, 0x64, 0xa5, 0x5a, 0x16, 0xd5, 0xc5, 0x57,
- 0xb3, 0xad, 0x22, 0xf1, 0xfe, 0xba, 0x5e, 0xa1, 0xc6, 0x73, 0x0f, 0x55, 0x9c, 0x2a, 0xd9, 0xe8,
- 0x2d, 0x28, 0x5b, 0xbe, 0x1f, 0x84, 0x96, 0x9c, 0x00, 0xcc, 0x09, 0x55, 0x9b, 0x13, 0xab, 0xda,
- 0x4c, 0x64, 0x0c, 0x54, 0xc4, 0x1a, 0x06, 0xeb, 0xaa, 0xd0, 0x7d, 0x58, 0x08, 0xee, 0xfb, 0x84,
- 0x62, 0x72, 0x44, 0x28, 0xf1, 0x6d, 0xc2, 0xaa, 0x15, 0xa1, 0xfd, 0xd9, 0x8c, 0xda, 0x53, 0xcc,
- 0x89, 0x4b, 0xa7, 0xe1, 0x0c, 0x0f, 0x6a, 0x41, 0x75, 0x1e, 0x24, 0x7d, 0xcb, 0x73, 0xbf, 0x4f,
- 0x28, 0xab, 0xce, 0x27, 0x03, 0xdf, 0xed, 0x18, 0x8a, 0x35, 0x0a, 0xf4, 0x15, 0x28, 0xdb, 0x5e,
- 0x97, 0x85, 0x44, 0x4e, 0xdf, 0x17, 0xc4, 0x0d, 0x8a, 0xd7, 0xb7, 0x95, 0xa0, 0xb0, 0x4e, 0x87,
- 0xba, 0x50, 0x69, 0xeb, 0x29, 0xa3, 0xba, 0x24, 0x56, 0x77, 0x3d, 0xdb, 0xea, 0x86, 0x93, 0x5a,
- 0x52, 0xc1, 0xa4, 0x70, 0x38, 0xad, 0x65, 0xe5, 0x79, 0x28, 0x7f, 0xca, 0xe2, 0x9e, 0x37, 0x07,
- 0x83, 0xe7, 0x38, 0x51, 0x73, 0xf0, 0xa7, 0x1c, 0xcc, 0xa7, 0x77, 0x7f, 0x20, 0x1d, 0x16, 0x32,
- 0xa5, 0xc3, 0xa8, 0x0d, 0x35, 0xc6, 0x3e, 0x18, 0x44, 0x61, 0x3d, 0x3f, 0x36, 0xac, 0xab, 0xe8,
- 0x39, 0xfd, 0x30, 0xd1, 0xb3, 0x0e, 0xc0, 0xeb, 0x0c, 0x1a, 0x78, 0x1e, 0xa1, 0x22, 0x70, 0x16,
- 0xd5, 0xc3, 0x40, 0x0c, 0xc5, 0x1a, 0x05, 0xaf, 0x86, 0x0f, 0xbd, 0xc0, 0x3e, 0x11, 0x5b, 0x10,
- 0x5d, 0x7a, 0x11, 0x32, 0x8b, 0xb2, 0x1a, 0x6e, 0x0c, 0x61, 0xf1, 0x08, 0x0e, 0xb3, 0x07, 0x97,
- 0xf7, 0x2d, 0x1a, 0xba, 0x96, 0x97, 0x5c, 0x30, 0xd1, 0x6e, 0xbc, 0x31, 0xd4, 0xcc, 0x3c, 0x33,
- 0xe9, 0x45, 0x4d, 0x36, 0x3f, 0x81, 0x25, 0x0d, 0x8d, 0xf9, 0x57, 0x03, 0xae, 0x8c, 0xd4, 0xfd,
- 0x19, 0x34, 0x53, 0x6f, 0xa4, 0x9b, 0xa9, 0x17, 0x32, 0x4e, 0x21, 0x47, 0x59, 0x3b, 0xa6, 0xb5,
- 0x9a, 0x85, 0xc2, 0x3e, 0x2f, 0x62, 0xcd, 0x5f, 0x18, 0x30, 0x27, 0x7e, 0x4d, 0x32, 0xc1, 0xad,
- 0xa5, 0xe7, 0xfa, 0xa5, 0x47, 0x38, 0xd3, 0x7f, 0xc7, 0x80, 0xf4, 0xec, 0x14, 0xbd, 0x24, 0xfd,
- 0xd7, 0x88, 0x87, 0x9b, 0x13, 0xfa, 0xee, 0x8b, 0xe3, 0x5a, 0xc1, 0x4b, 0x99, 0xa6, 0x84, 0x4f,
- 0x41, 0x09, 0x07, 0x41, 0xb8, 0x6f, 0x85, 0xc7, 0x8c, 0x2f, 0xbc, 0xc3, 0x7f, 0xa8, 0xbd, 0x11,
- 0x0b, 0x17, 0x18, 0x2c, 0xe1, 0xe6, 0xcf, 0x0d, 0xb8, 0x32, 0xf6, 0xad, 0x85, 0x87, 0x00, 0x3b,
- 0xfe, 0x52, 0x2b, 0x8a, 0xbd, 0x30, 0xa1, 0xc3, 0x1a, 0x15, 0xef, 0xe1, 0x52, 0x0f, 0x34, 0x83,
- 0x3d, 0x5c, 0x4a, 0x1b, 0x4e, 0xd3, 0x9a, 0xff, 0xcc, 0x81, 0x7a, 0xdc, 0xf8, 0x2f, 0x7b, 0xec,
- 0x13, 0x03, 0x4f, 0x2b, 0xf3, 0xe9, 0xa7, 0x95, 0xf8, 0x1d, 0x45, 0x7b, 0x5b, 0xc8, 0x3f, 0xf8,
- 0x6d, 0x01, 0x3d, 0x17, 0x3f, 0x57, 0xc8, 0xd0, 0xb5, 0x9a, 0x7e, 0xae, 0x38, 0xef, 0xd7, 0xe6,
- 0x94, 0xf0, 0xf4, 0xf3, 0xc5, 0x6b, 0x30, 0xeb, 0x90, 0xd0, 0x72, 0x3d, 0xd9, 0x8f, 0x65, 0x1e,
- 0xe2, 0x4b, 0x61, 0x4d, 0xc9, 0xda, 0x28, 0x73, 0x9b, 0xd4, 0x07, 0x8e, 0x04, 0xf2, 0x68, 0x6b,
- 0x07, 0x8e, 0x6c, 0x27, 0x0a, 0x49, 0xb4, 0xdd, 0x0a, 0x1c, 0x82, 0x05, 0xc6, 0x7c, 0xd7, 0x80,
- 0xb2, 0x94, 0xb4, 0x65, 0x75, 0x19, 0x41, 0x1b, 0xf1, 0x2a, 0xe4, 0x71, 0x5f, 0xd1, 0xdf, 0xa5,
- 0xce, 0xfb, 0xb5, 0x92, 0x20, 0x13, 0x9d, 0xc8, 0x88, 0xf7, 0x97, 0xdc, 0x05, 0x7b, 0xf4, 0x38,
- 0x14, 0xc4, 0xed, 0x51, 0x9b, 0x99, 0x3c, 0xb0, 0x71, 0x20, 0x96, 0x38, 0xf3, 0xe3, 0x1c, 0x54,
- 0x52, 0x8b, 0xcb, 0xd0, 0x0b, 0xc4, 0xa3, 0xcb, 0x5c, 0x86, 0x71, 0xf8, 0xf8, 0xe7, 0x6c, 0x95,
- 0x7b, 0x66, 0x1e, 0x26, 0xf7, 0x7c, 0x1b, 0x66, 0x6c, 0xbe, 0x47, 0xd1, 0xbf, 0x23, 0x36, 0x26,
- 0x39, 0x4e, 0xb1, 0xbb, 0x89, 0x37, 0x8a, 0x4f, 0x86, 0x95, 0x40, 0x74, 0x0b, 0x96, 0x28, 0x09,
- 0x69, 0x6f, 0xf3, 0x28, 0x24, 0x54, 0x6f, 0xe2, 0x0b, 0x49, 0xc5, 0x8d, 0x07, 0x09, 0xf0, 0x30,
- 0x8f, 0x79, 0x08, 0x73, 0x77, 0xac, 0x43, 0x2f, 0x7e, 0x96, 0xc2, 0x50, 0x71, 0x7d, 0xdb, 0xeb,
- 0x3a, 0x44, 0x46, 0xe3, 0x28, 0x7a, 0x45, 0x97, 0x76, 0x47, 0x47, 0x9e, 0xf7, 0x6b, 0x97, 0x52,
- 0x00, 0xf9, 0x0e, 0x83, 0xd3, 0x22, 0x4c, 0x0f, 0xa6, 0x3f, 0xc3, 0xee, 0xf1, 0x3b, 0x50, 0x4a,
- 0xea, 0xfb, 0x47, 0xac, 0xd2, 0x7c, 0x03, 0x8a, 0xdc, 0xe3, 0xa3, 0xbe, 0xf4, 0x82, 0x12, 0x27,
- 0x5d, 0x38, 0xe5, 0xb2, 0x14, 0x4e, 0x66, 0x1b, 0x2a, 0x77, 0x3b, 0xce, 0x43, 0x3e, 0x4c, 0xe6,
- 0x32, 0x67, 0xad, 0x6b, 0x20, 0xff, 0x78, 0xc1, 0x13, 0x84, 0xcc, 0xdc, 0x5a, 0x82, 0xd0, 0x13,
- 0xaf, 0x36, 0x95, 0xff, 0xb1, 0x01, 0x20, 0xc6, 0x5f, 0x37, 0x4f, 0x89, 0x1f, 0x66, 0x78, 0xbe,
- 0xbe, 0x0b, 0x33, 0x81, 0xf4, 0x26, 0xf9, 0x38, 0x39, 0xe1, 0x8c, 0x35, 0xbe, 0x04, 0xd2, 0x9f,
- 0xb0, 0x12, 0xd6, 0xb8, 0xfa, 0xc1, 0x27, 0xab, 0x53, 0x1f, 0x7e, 0xb2, 0x3a, 0xf5, 0xd1, 0x27,
- 0xab, 0x53, 0x6f, 0x9f, 0xad, 0x1a, 0x1f, 0x9c, 0xad, 0x1a, 0x1f, 0x9e, 0xad, 0x1a, 0x1f, 0x9d,
- 0xad, 0x1a, 0x1f, 0x9f, 0xad, 0x1a, 0xef, 0xfe, 0x7d, 0x75, 0xea, 0xb5, 0xdc, 0xe9, 0xc6, 0x7f,
- 0x02, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xee, 0x35, 0x7b, 0xee, 0x26, 0x00, 0x00,
+ 0x55, 0x8f, 0x37, 0x03, 0x07, 0x72, 0x00, 0x01, 0x12, 0x44, 0xe1, 0xc6, 0x09, 0x25, 0x82, 0xbf,
+ 0x80, 0x0b, 0xfc, 0x01, 0x48, 0xe4, 0x18, 0xc4, 0x25, 0x12, 0x68, 0x94, 0x98, 0x03, 0x47, 0xc4,
+ 0xd5, 0x17, 0x50, 0x3d, 0xba, 0xbb, 0x7a, 0x1e, 0xeb, 0x9e, 0xec, 0x12, 0x71, 0x9b, 0xfe, 0xde,
+ 0x55, 0xf5, 0xd5, 0xf7, 0xaa, 0x81, 0xdd, 0x93, 0xeb, 0xac, 0xee, 0x06, 0xeb, 0x27, 0xdd, 0x43,
+ 0x42, 0x7d, 0x12, 0x12, 0xb6, 0x7e, 0x4a, 0x7c, 0x27, 0xa0, 0xeb, 0x0a, 0x61, 0x75, 0xdc, 0xb6,
+ 0x65, 0x1f, 0xbb, 0x3e, 0xa1, 0xbd, 0xf5, 0xce, 0x49, 0x8b, 0x03, 0xd8, 0x7a, 0x9b, 0x84, 0xd6,
+ 0xfa, 0xe9, 0xc6, 0x7a, 0x8b, 0xf8, 0x84, 0x5a, 0x21, 0x71, 0xea, 0x1d, 0x1a, 0x84, 0x01, 0xfa,
+ 0x82, 0xe4, 0xaa, 0xeb, 0x5c, 0xf5, 0xce, 0x49, 0x8b, 0x03, 0x58, 0x9d, 0x73, 0xd5, 0x4f, 0x37,
+ 0x56, 0x9e, 0x6e, 0xb9, 0xe1, 0x71, 0xf7, 0xb0, 0x6e, 0x07, 0xed, 0xf5, 0x56, 0xd0, 0x0a, 0xd6,
+ 0x05, 0xf3, 0x61, 0xf7, 0x48, 0x7c, 0x89, 0x0f, 0xf1, 0x4b, 0x0a, 0x5d, 0x19, 0x6b, 0x0a, 0xed,
+ 0xfa, 0xa1, 0xdb, 0x26, 0x83, 0x56, 0xac, 0x3c, 0x77, 0x11, 0x03, 0xb3, 0x8f, 0x49, 0xdb, 0x1a,
+ 0xe4, 0x33, 0xff, 0x94, 0x87, 0xe2, 0xe6, 0xfe, 0xce, 0x2d, 0x1a, 0x74, 0x3b, 0x68, 0x0d, 0xa6,
+ 0x7d, 0xab, 0x4d, 0xaa, 0xc6, 0x9a, 0x71, 0xb5, 0xd4, 0x98, 0xfb, 0xa0, 0x5f, 0x9b, 0x3a, 0xeb,
+ 0xd7, 0xa6, 0x5f, 0xb5, 0xda, 0x04, 0x0b, 0x0c, 0xf2, 0xa0, 0x78, 0x4a, 0x28, 0x73, 0x03, 0x9f,
+ 0x55, 0x73, 0x6b, 0xf9, 0xab, 0xe5, 0x6b, 0x2f, 0xd5, 0xb3, 0xac, 0xbf, 0x2e, 0x14, 0xdc, 0x93,
+ 0xac, 0xdb, 0x01, 0x6d, 0xba, 0xcc, 0x0e, 0x4e, 0x09, 0xed, 0x35, 0x16, 0x95, 0x96, 0xa2, 0x42,
+ 0x32, 0x1c, 0x6b, 0x40, 0x3f, 0x32, 0x60, 0xb1, 0x43, 0xc9, 0x11, 0xa1, 0x94, 0x38, 0x0a, 0x5f,
+ 0xcd, 0xaf, 0x19, 0x8f, 0x40, 0x6d, 0x55, 0xa9, 0x5d, 0xdc, 0x1f, 0x90, 0x8f, 0x87, 0x34, 0xa2,
+ 0xdf, 0x18, 0xb0, 0xc2, 0x08, 0x3d, 0x25, 0x74, 0xd3, 0x71, 0x28, 0x61, 0xac, 0xd1, 0xdb, 0xf2,
+ 0x5c, 0xe2, 0x87, 0x5b, 0x3b, 0x4d, 0xcc, 0xaa, 0xd3, 0x62, 0x1f, 0xbe, 0x96, 0xcd, 0xa0, 0x83,
+ 0x71, 0x72, 0x1a, 0xa6, 0xb2, 0x68, 0x65, 0x2c, 0x09, 0xc3, 0x0f, 0x30, 0xc3, 0x3c, 0x82, 0xb9,
+ 0xe8, 0x20, 0x6f, 0xbb, 0x2c, 0x44, 0xf7, 0x60, 0xa6, 0xc5, 0x3f, 0x58, 0xd5, 0x10, 0x06, 0xd6,
+ 0xb3, 0x19, 0x18, 0xc9, 0x68, 0xcc, 0x2b, 0x7b, 0x66, 0xc4, 0x27, 0xc3, 0x4a, 0x9a, 0xf9, 0xb3,
+ 0x69, 0x28, 0x6f, 0xee, 0xef, 0x60, 0xc2, 0x82, 0x2e, 0xb5, 0x49, 0x06, 0xa7, 0xb9, 0x0e, 0x73,
+ 0xcc, 0xf5, 0x5b, 0x5d, 0xcf, 0xa2, 0x1c, 0x5a, 0x9d, 0x11, 0x94, 0xcb, 0x8a, 0x72, 0xee, 0x40,
+ 0xc3, 0xe1, 0x14, 0x25, 0xba, 0x06, 0xc0, 0x25, 0xb0, 0x8e, 0x65, 0x13, 0xa7, 0x9a, 0x5b, 0x33,
+ 0xae, 0x16, 0x1b, 0x48, 0xf1, 0xc1, 0xab, 0x31, 0x06, 0x6b, 0x54, 0xe8, 0x71, 0x28, 0x08, 0x4b,
+ 0xab, 0x45, 0xa1, 0xa6, 0xa2, 0xc8, 0x0b, 0x62, 0x19, 0x58, 0xe2, 0xd0, 0x93, 0x30, 0xab, 0xbc,
+ 0xac, 0x5a, 0x12, 0x64, 0x0b, 0x8a, 0x6c, 0x36, 0x72, 0x83, 0x08, 0xcf, 0xd7, 0x77, 0xe2, 0xfa,
+ 0x8e, 0xf0, 0x3b, 0x6d, 0x7d, 0xaf, 0xb8, 0xbe, 0x83, 0x05, 0x06, 0xdd, 0x86, 0xc2, 0x29, 0xa1,
+ 0x87, 0xdc, 0x13, 0xb8, 0x6b, 0x7e, 0x29, 0xdb, 0x46, 0xdf, 0xe3, 0x2c, 0x8d, 0x12, 0x37, 0x4d,
+ 0xfc, 0xc4, 0x52, 0x08, 0xaa, 0x03, 0xb0, 0xe3, 0x80, 0x86, 0x62, 0x79, 0xd5, 0xc2, 0x5a, 0xfe,
+ 0x6a, 0xa9, 0x31, 0xcf, 0xd7, 0x7b, 0x10, 0x43, 0xb1, 0x46, 0xc1, 0xe9, 0x6d, 0x2b, 0x24, 0xad,
+ 0x80, 0xba, 0x84, 0x55, 0x67, 0x13, 0xfa, 0xad, 0x18, 0x8a, 0x35, 0x0a, 0xf4, 0x0d, 0x40, 0x2c,
+ 0x0c, 0xa8, 0xd5, 0x22, 0x6a, 0xa9, 0x2f, 0x5b, 0xec, 0xb8, 0x0a, 0x62, 0x75, 0x2b, 0x6a, 0x75,
+ 0xe8, 0x60, 0x88, 0x02, 0x8f, 0xe0, 0x32, 0x7f, 0x67, 0xc0, 0x82, 0xe6, 0x0b, 0xc2, 0xef, 0xae,
+ 0xc3, 0x5c, 0x4b, 0xbb, 0x75, 0xca, 0x2f, 0xe2, 0xd3, 0xd6, 0x6f, 0x24, 0x4e, 0x51, 0x22, 0x02,
+ 0x25, 0xaa, 0x24, 0x45, 0xd1, 0x65, 0x23, 0xb3, 0xd3, 0x46, 0x36, 0x24, 0x9a, 0x34, 0x20, 0xc3,
+ 0x89, 0x64, 0xf3, 0x1f, 0x86, 0x70, 0xe0, 0x28, 0xde, 0xa0, 0xab, 0x5a, 0x4c, 0x33, 0xc4, 0xf6,
+ 0xcd, 0x8d, 0x89, 0x47, 0x17, 0x04, 0x82, 0xdc, 0xff, 0x44, 0x20, 0xb8, 0x51, 0xfc, 0xd5, 0x7b,
+ 0xb5, 0xa9, 0xb7, 0xff, 0xb6, 0x36, 0x65, 0xfe, 0xd2, 0x80, 0xb9, 0xcd, 0x4e, 0xc7, 0xeb, 0xed,
+ 0x75, 0x42, 0xb1, 0x00, 0x13, 0x66, 0x1c, 0xda, 0xc3, 0x5d, 0x5f, 0x2d, 0x14, 0xf8, 0xfd, 0x6e,
+ 0x0a, 0x08, 0x56, 0x18, 0x7e, 0x7f, 0x8e, 0x02, 0x6a, 0x13, 0x75, 0xdd, 0xe2, 0xfb, 0xb3, 0xcd,
+ 0x81, 0x58, 0xe2, 0xf8, 0x21, 0x1f, 0xb9, 0xc4, 0x73, 0x76, 0x2d, 0xdf, 0x6a, 0x11, 0xaa, 0x2e,
+ 0x47, 0xbc, 0xf5, 0xdb, 0x1a, 0x0e, 0xa7, 0x28, 0xcd, 0x7f, 0xe7, 0xa0, 0xb4, 0x15, 0xf8, 0x8e,
+ 0x1b, 0xaa, 0xcb, 0x15, 0xf6, 0x3a, 0x43, 0xc1, 0xe3, 0x4e, 0xaf, 0x43, 0xb0, 0xc0, 0xa0, 0xe7,
+ 0x61, 0x86, 0x85, 0x56, 0xd8, 0x65, 0xc2, 0x9e, 0x52, 0xe3, 0xb1, 0x28, 0x2c, 0x1d, 0x08, 0xe8,
+ 0x79, 0xbf, 0xb6, 0x10, 0x8b, 0x93, 0x20, 0xac, 0x18, 0xb8, 0xa7, 0x07, 0x87, 0x62, 0xa3, 0x9c,
+ 0x5b, 0x32, 0xed, 0x45, 0xf9, 0x23, 0x9f, 0x78, 0xfa, 0xde, 0x10, 0x05, 0x1e, 0xc1, 0x85, 0x4e,
+ 0x01, 0x79, 0x16, 0x0b, 0xef, 0x50, 0xcb, 0x67, 0x42, 0xd7, 0x1d, 0xb7, 0x4d, 0xd4, 0x85, 0xff,
+ 0x62, 0xb6, 0x13, 0xe7, 0x1c, 0x89, 0xde, 0xdb, 0x43, 0xd2, 0xf0, 0x08, 0x0d, 0xe8, 0x09, 0x98,
+ 0xa1, 0xc4, 0x62, 0x81, 0x5f, 0x2d, 0x88, 0xe5, 0xc7, 0x51, 0x19, 0x0b, 0x28, 0x56, 0x58, 0x1e,
+ 0xd0, 0xda, 0x84, 0x31, 0xab, 0x15, 0x85, 0xd7, 0x38, 0xa0, 0xed, 0x4a, 0x30, 0x8e, 0xf0, 0x66,
+ 0x1b, 0x2a, 0x5b, 0x94, 0x58, 0x21, 0x99, 0xc4, 0x2b, 0x3e, 0xfd, 0x81, 0xff, 0x24, 0x0f, 0x95,
+ 0x26, 0xf1, 0x48, 0xa2, 0x6f, 0x1b, 0x50, 0x8b, 0x5a, 0x36, 0xd9, 0x27, 0xd4, 0x0d, 0x9c, 0x03,
+ 0x62, 0x07, 0xbe, 0xc3, 0x84, 0x0b, 0xe4, 0x1b, 0xff, 0xc7, 0xf7, 0xe6, 0xd6, 0x10, 0x16, 0x8f,
+ 0xe0, 0x40, 0x1e, 0x54, 0x3a, 0x54, 0xfc, 0x16, 0xfb, 0x25, 0x3d, 0xa4, 0x7c, 0xed, 0xcb, 0xd9,
+ 0x8e, 0x63, 0x5f, 0x67, 0x6d, 0x2c, 0x9d, 0xf5, 0x6b, 0x95, 0x14, 0x08, 0xa7, 0x85, 0xa3, 0xaf,
+ 0xc3, 0x62, 0x40, 0x3b, 0xc7, 0x96, 0xdf, 0x24, 0x1d, 0xe2, 0x3b, 0xc4, 0x0f, 0x99, 0xd8, 0x85,
+ 0x62, 0x63, 0x99, 0xd7, 0x11, 0x7b, 0x03, 0x38, 0x3c, 0x44, 0x8d, 0x5e, 0x83, 0xa5, 0x0e, 0x0d,
+ 0x3a, 0x56, 0x4b, 0xb8, 0xd4, 0x7e, 0xe0, 0xb9, 0x76, 0x4f, 0xb8, 0x50, 0xa9, 0xf1, 0xd4, 0x59,
+ 0xbf, 0xb6, 0xb4, 0x3f, 0x88, 0x3c, 0xef, 0xd7, 0x2e, 0x89, 0xad, 0xe3, 0x90, 0x04, 0x89, 0x87,
+ 0xc5, 0x68, 0x67, 0x58, 0x18, 0x77, 0x86, 0xe6, 0x0e, 0x14, 0x9b, 0x5d, 0xe5, 0xcf, 0x2f, 0x42,
+ 0xd1, 0x51, 0xbf, 0xd5, 0xce, 0x47, 0x17, 0x2b, 0xa6, 0x39, 0xef, 0xd7, 0x2a, 0xbc, 0x74, 0xac,
+ 0x47, 0x00, 0x1c, 0xb3, 0x98, 0x4f, 0x40, 0x51, 0x1c, 0x39, 0xbb, 0xb7, 0x81, 0x16, 0x21, 0x8f,
+ 0xad, 0xfb, 0x42, 0xca, 0x1c, 0xe6, 0x3f, 0xb5, 0x08, 0xb4, 0x07, 0x70, 0x8b, 0x84, 0xd1, 0xc1,
+ 0x6f, 0xc2, 0x42, 0x14, 0x86, 0xd3, 0xd9, 0xe1, 0xff, 0x95, 0xee, 0x05, 0x9c, 0x46, 0xe3, 0x41,
+ 0x7a, 0xf3, 0x75, 0x28, 0x89, 0x0c, 0xc2, 0xd3, 0x6f, 0x92, 0xea, 0x8d, 0x07, 0xa4, 0xfa, 0x28,
+ 0x7f, 0xe7, 0xc6, 0xe5, 0x6f, 0xcd, 0x5c, 0x0f, 0x2a, 0x92, 0x37, 0x2a, 0x6e, 0x32, 0x69, 0x78,
+ 0x0a, 0x8a, 0x91, 0x99, 0x4a, 0x4b, 0x5c, 0xd4, 0x46, 0x82, 0x70, 0x4c, 0xa1, 0x69, 0x3b, 0x86,
+ 0x54, 0x36, 0xcc, 0xa6, 0x4c, 0xab, 0x5c, 0x72, 0x0f, 0xae, 0x5c, 0x34, 0x4d, 0x3f, 0x84, 0xea,
+ 0xb8, 0x4a, 0xf8, 0x21, 0xf2, 0x75, 0x76, 0x53, 0xcc, 0x77, 0x0c, 0x58, 0xd4, 0x25, 0x65, 0x3f,
+ 0xbe, 0xec, 0x4a, 0x2e, 0xae, 0xd4, 0xb4, 0x1d, 0xf9, 0xb5, 0x01, 0xcb, 0xa9, 0xa5, 0x4d, 0x74,
+ 0xe2, 0x13, 0x18, 0xa5, 0x3b, 0x47, 0x7e, 0x02, 0xe7, 0xf8, 0x4b, 0x0e, 0x2a, 0xb7, 0xad, 0x43,
+ 0xe2, 0x1d, 0x10, 0x8f, 0xd8, 0x61, 0x40, 0xd1, 0x0f, 0xa0, 0xdc, 0xb6, 0x42, 0xfb, 0x58, 0x40,
+ 0xa3, 0xaa, 0xbe, 0x99, 0x2d, 0xd8, 0xa5, 0x24, 0xd5, 0x77, 0x13, 0x31, 0x37, 0xfd, 0x90, 0xf6,
+ 0x1a, 0x97, 0x94, 0x49, 0x65, 0x0d, 0x83, 0x75, 0x6d, 0xa2, 0x15, 0x13, 0xdf, 0x37, 0xdf, 0xea,
+ 0xf0, 0x92, 0x63, 0xf2, 0x0e, 0x30, 0x65, 0x02, 0x26, 0x6f, 0x76, 0x5d, 0x4a, 0xda, 0xc4, 0x0f,
+ 0x93, 0x56, 0x6c, 0x77, 0x40, 0x3e, 0x1e, 0xd2, 0xb8, 0xf2, 0x12, 0x2c, 0x0e, 0x1a, 0xcf, 0xe3,
+ 0xcf, 0x09, 0xe9, 0xc9, 0xf3, 0xc2, 0xfc, 0x27, 0x5a, 0x86, 0xc2, 0xa9, 0xe5, 0x75, 0xd5, 0x6d,
+ 0xc4, 0xf2, 0xe3, 0x46, 0xee, 0xba, 0x61, 0xfe, 0xd6, 0x80, 0xea, 0x38, 0x43, 0xd0, 0xe7, 0x35,
+ 0x41, 0x8d, 0xb2, 0xb2, 0x2a, 0xff, 0x0a, 0xe9, 0x49, 0xa9, 0x37, 0xa1, 0x18, 0x74, 0x78, 0x3d,
+ 0x10, 0x50, 0x75, 0xea, 0x4f, 0x46, 0x27, 0xb9, 0xa7, 0xe0, 0xe7, 0xfd, 0xda, 0xe5, 0x94, 0xf8,
+ 0x08, 0x81, 0x63, 0x56, 0x1e, 0xa9, 0x85, 0x3d, 0x3c, 0x7b, 0xc4, 0x91, 0xfa, 0x9e, 0x80, 0x60,
+ 0x85, 0x31, 0xff, 0x60, 0xc0, 0xb4, 0x28, 0xa6, 0x5f, 0x87, 0x22, 0xdf, 0x3f, 0xc7, 0x0a, 0x2d,
+ 0x61, 0x57, 0xe6, 0x36, 0x8e, 0x73, 0xef, 0x92, 0xd0, 0x4a, 0xbc, 0x2d, 0x82, 0xe0, 0x58, 0x22,
+ 0xc2, 0x50, 0x70, 0x43, 0xd2, 0x8e, 0x0e, 0xf2, 0xe9, 0xb1, 0xa2, 0xd5, 0x10, 0xa1, 0x8e, 0xad,
+ 0xfb, 0x37, 0xdf, 0x0a, 0x89, 0xcf, 0x0f, 0x23, 0xb9, 0x1a, 0x3b, 0x5c, 0x06, 0x96, 0xa2, 0xcc,
+ 0x7f, 0x19, 0x10, 0xab, 0xe2, 0xce, 0xcf, 0x88, 0x77, 0x74, 0xdb, 0xf5, 0x4f, 0xd4, 0xb6, 0xc6,
+ 0xe6, 0x1c, 0x28, 0x38, 0x8e, 0x29, 0x46, 0xa5, 0x87, 0xdc, 0x64, 0xe9, 0x81, 0x2b, 0xb4, 0x03,
+ 0x3f, 0x74, 0xfd, 0xee, 0xd0, 0x6d, 0xdb, 0x52, 0x70, 0x1c, 0x53, 0xf0, 0x42, 0x84, 0x92, 0xb6,
+ 0xe5, 0xfa, 0xae, 0xdf, 0xe2, 0x8b, 0xd8, 0x0a, 0xba, 0x7e, 0x28, 0x32, 0xb2, 0x2a, 0x44, 0xf0,
+ 0x10, 0x16, 0x8f, 0xe0, 0x30, 0x7f, 0x3f, 0x0d, 0x65, 0xbe, 0xe6, 0x28, 0xcf, 0xbd, 0x00, 0x15,
+ 0x4f, 0xf7, 0x02, 0xb5, 0xf6, 0xcb, 0xca, 0x94, 0xf4, 0xbd, 0xc6, 0x69, 0x5a, 0xce, 0x2c, 0xea,
+ 0xa7, 0x98, 0x39, 0x97, 0x66, 0xde, 0xd6, 0x91, 0x38, 0x4d, 0xcb, 0xa3, 0xd7, 0x7d, 0x7e, 0x3f,
+ 0x54, 0x65, 0x12, 0x1f, 0xd1, 0x37, 0x39, 0x10, 0x4b, 0x1c, 0xda, 0x85, 0x4b, 0x96, 0xe7, 0x05,
+ 0xf7, 0x05, 0xb0, 0x11, 0x04, 0x27, 0x6d, 0x8b, 0x9e, 0x30, 0xd1, 0x08, 0x17, 0x1b, 0x9f, 0x53,
+ 0x2c, 0x97, 0x36, 0x87, 0x49, 0xf0, 0x28, 0xbe, 0x51, 0xc7, 0x36, 0x3d, 0xe1, 0xb1, 0x1d, 0xc3,
+ 0xf2, 0x00, 0x48, 0xdc, 0x72, 0xd5, 0x95, 0x3e, 0xab, 0xe4, 0x2c, 0xe3, 0x11, 0x34, 0xe7, 0x63,
+ 0xe0, 0x78, 0xa4, 0x44, 0x74, 0x03, 0xe6, 0xb9, 0x27, 0x07, 0xdd, 0x30, 0xaa, 0x3b, 0x0b, 0xe2,
+ 0xb8, 0xd1, 0x59, 0xbf, 0x36, 0x7f, 0x27, 0x85, 0xc1, 0x03, 0x94, 0x7c, 0x73, 0x3d, 0xb7, 0xed,
+ 0x86, 0xd5, 0x59, 0xc1, 0x12, 0x6f, 0xee, 0x6d, 0x0e, 0xc4, 0x12, 0x97, 0xf2, 0xc0, 0xe2, 0x45,
+ 0x1e, 0x68, 0xfe, 0x39, 0x0f, 0x48, 0x16, 0xca, 0x8e, 0xac, 0xa7, 0x64, 0x48, 0xe3, 0xd5, 0xbc,
+ 0x2a, 0xb4, 0x8d, 0x81, 0x6a, 0x5e, 0xd5, 0xd8, 0x11, 0x1e, 0xed, 0x42, 0x49, 0x86, 0x96, 0xe4,
+ 0xba, 0xac, 0x2b, 0xe2, 0xd2, 0x5e, 0x84, 0x38, 0xef, 0xd7, 0x56, 0x52, 0x6a, 0x62, 0x8c, 0xe8,
+ 0xb4, 0x12, 0x09, 0xe8, 0x1a, 0x80, 0xd5, 0x71, 0xf5, 0x59, 0x5b, 0x29, 0x99, 0xb8, 0x24, 0x5d,
+ 0x33, 0xd6, 0xa8, 0xd0, 0xcb, 0x30, 0x1d, 0x7e, 0xba, 0x6e, 0xa8, 0x28, 0x9a, 0x3d, 0xde, 0xfb,
+ 0x08, 0x09, 0x5c, 0xbb, 0xf0, 0x67, 0xc6, 0xcd, 0x52, 0x8d, 0x4c, 0xac, 0x7d, 0x3b, 0xc6, 0x60,
+ 0x8d, 0x0a, 0x7d, 0x0b, 0x8a, 0x47, 0xaa, 0x14, 0x15, 0x07, 0x93, 0x39, 0x44, 0x46, 0x05, 0xac,
+ 0x6c, 0xf7, 0xa3, 0x2f, 0x1c, 0x4b, 0x43, 0x5f, 0x81, 0x32, 0xeb, 0x1e, 0xc6, 0xd9, 0x5b, 0x9e,
+ 0x66, 0x9c, 0x2a, 0x0f, 0x12, 0x14, 0xd6, 0xe9, 0xcc, 0x37, 0xa1, 0xb4, 0xeb, 0xda, 0x34, 0x10,
+ 0xfd, 0xdb, 0x93, 0x30, 0xcb, 0x52, 0x0d, 0x4e, 0x7c, 0x92, 0x91, 0x97, 0x45, 0x78, 0xee, 0x5e,
+ 0xbe, 0xe5, 0x07, 0xb2, 0x8d, 0x29, 0x24, 0xee, 0xf5, 0x2a, 0x07, 0x62, 0x89, 0xbb, 0xb1, 0xcc,
+ 0x0b, 0x84, 0x9f, 0xbe, 0x5f, 0x9b, 0x7a, 0xf7, 0xfd, 0xda, 0xd4, 0x7b, 0xef, 0xab, 0x62, 0xe1,
+ 0x1c, 0x00, 0xf6, 0x0e, 0xbf, 0x47, 0x6c, 0x19, 0x76, 0x33, 0x8d, 0xe4, 0xa2, 0x49, 0xb0, 0x18,
+ 0xc9, 0xe5, 0x06, 0x8a, 0x3e, 0x0d, 0x87, 0x53, 0x94, 0x68, 0x1d, 0x4a, 0xf1, 0xb0, 0x4d, 0xf9,
+ 0xc7, 0x52, 0xe4, 0x6f, 0xf1, 0x44, 0x0e, 0x27, 0x34, 0xa9, 0x1c, 0x30, 0x7d, 0x61, 0x0e, 0x68,
+ 0x40, 0xbe, 0xeb, 0x3a, 0xaa, 0xd9, 0x7d, 0x26, 0xca, 0xc1, 0x77, 0x77, 0x9a, 0xe7, 0xfd, 0xda,
+ 0x63, 0xe3, 0x66, 0xdc, 0x61, 0xaf, 0x43, 0x58, 0xfd, 0xee, 0x4e, 0x13, 0x73, 0xe6, 0x51, 0x01,
+ 0x69, 0x66, 0xc2, 0x80, 0x74, 0x0d, 0xa0, 0x95, 0x8c, 0x0c, 0xe4, 0x7d, 0x8f, 0x1d, 0x51, 0x1b,
+ 0x15, 0x68, 0x54, 0x88, 0xc1, 0x92, 0xcd, 0xfb, 0x6a, 0xd5, 0xba, 0xb3, 0xd0, 0x6a, 0xcb, 0x21,
+ 0xe4, 0x64, 0x77, 0xe2, 0x8a, 0x52, 0xb3, 0xb4, 0x35, 0x28, 0x0c, 0x0f, 0xcb, 0x47, 0x01, 0x2c,
+ 0x39, 0xaa, 0x43, 0x4c, 0x94, 0x96, 0x26, 0x56, 0x7a, 0x99, 0x2b, 0x6c, 0x0e, 0x0a, 0xc2, 0xc3,
+ 0xb2, 0xd1, 0x77, 0x61, 0x25, 0x02, 0x0e, 0xb7, 0xe9, 0x22, 0x60, 0xe7, 0x1b, 0xab, 0x67, 0xfd,
+ 0xda, 0x4a, 0x73, 0x2c, 0x15, 0x7e, 0x80, 0x04, 0xe4, 0xc0, 0x8c, 0x27, 0x0b, 0xdc, 0xb2, 0x28,
+ 0x4a, 0xbe, 0x9a, 0x6d, 0x15, 0x89, 0xf7, 0xd7, 0xf5, 0xc2, 0x36, 0x1e, 0x97, 0xa8, 0x9a, 0x56,
+ 0xc9, 0x46, 0x6f, 0x41, 0xd9, 0xf2, 0xfd, 0x20, 0xb4, 0xe4, 0xe0, 0x60, 0x4e, 0xa8, 0xda, 0x9c,
+ 0x58, 0xd5, 0x66, 0x22, 0x63, 0xa0, 0x90, 0xd6, 0x30, 0x58, 0x57, 0x85, 0xee, 0xc3, 0x42, 0x70,
+ 0xdf, 0x27, 0x14, 0x93, 0x23, 0x42, 0x89, 0x6f, 0x13, 0x56, 0xad, 0x08, 0xed, 0xcf, 0x66, 0xd4,
+ 0x9e, 0x62, 0x4e, 0x5c, 0x3a, 0x0d, 0x67, 0x78, 0x50, 0x0b, 0xaa, 0xf3, 0xd8, 0xea, 0x5b, 0x9e,
+ 0xfb, 0x7d, 0x42, 0x59, 0x75, 0x3e, 0x99, 0x13, 0x6f, 0xc7, 0x50, 0xac, 0x51, 0xf0, 0xe8, 0x67,
+ 0x7b, 0x5d, 0x16, 0x12, 0x39, 0xb4, 0x5f, 0x48, 0x47, 0xbf, 0xad, 0x04, 0x85, 0x75, 0x3a, 0xd4,
+ 0x85, 0x4a, 0x5b, 0xcf, 0x34, 0xd5, 0x25, 0xb1, 0xba, 0xeb, 0xd9, 0x56, 0x37, 0x9c, 0x0b, 0x93,
+ 0xc2, 0x27, 0x85, 0xc3, 0x69, 0x2d, 0x2b, 0xcf, 0x43, 0xf9, 0x53, 0xf6, 0x04, 0xbc, 0xa7, 0x18,
+ 0x3c, 0xc7, 0x89, 0x7a, 0x8a, 0x3f, 0xe6, 0x60, 0x3e, 0xbd, 0xfb, 0x03, 0x59, 0xb4, 0x90, 0x29,
+ 0x8b, 0x46, 0xdd, 0xab, 0x31, 0xf6, 0x9d, 0x21, 0x0a, 0xeb, 0xf9, 0xb1, 0x61, 0x5d, 0x45, 0xcf,
+ 0xe9, 0x87, 0x89, 0x9e, 0x75, 0x00, 0x5e, 0x9e, 0xd0, 0xc0, 0xf3, 0x08, 0x15, 0x81, 0xb3, 0xa8,
+ 0xde, 0x13, 0x62, 0x28, 0xd6, 0x28, 0x78, 0x11, 0x7d, 0xe8, 0x05, 0xf6, 0x89, 0xd8, 0x82, 0xe8,
+ 0xd2, 0x8b, 0x90, 0x59, 0x94, 0x45, 0x74, 0x63, 0x08, 0x8b, 0x47, 0x70, 0x98, 0x3d, 0xb8, 0xbc,
+ 0x6f, 0xd1, 0xd0, 0xb5, 0xbc, 0xe4, 0x82, 0x89, 0x2e, 0xe5, 0x8d, 0xa1, 0x1e, 0xe8, 0x99, 0x49,
+ 0x2f, 0x6a, 0xb2, 0xf9, 0x09, 0x2c, 0xe9, 0x83, 0xcc, 0xbf, 0x1a, 0x70, 0x65, 0xa4, 0xee, 0xcf,
+ 0xa0, 0x07, 0x7b, 0x23, 0xdd, 0x83, 0xbd, 0x90, 0x71, 0x78, 0x39, 0xca, 0xda, 0x31, 0x1d, 0xd9,
+ 0x2c, 0x14, 0xf6, 0x79, 0xed, 0x6b, 0xfe, 0xc2, 0x80, 0x39, 0xf1, 0x6b, 0x92, 0xc1, 0x6f, 0x2d,
+ 0xfd, 0x1c, 0x50, 0x7a, 0x84, 0x4f, 0x01, 0xef, 0x18, 0x90, 0x1e, 0xb9, 0xa2, 0x97, 0xa4, 0xff,
+ 0x1a, 0xf1, 0x4c, 0x74, 0x42, 0xdf, 0x7d, 0x71, 0x5c, 0x07, 0x79, 0x29, 0xd3, 0x70, 0xf1, 0x29,
+ 0x28, 0xe1, 0x20, 0x08, 0xf7, 0xad, 0xf0, 0x98, 0xf1, 0x85, 0x77, 0xf8, 0x0f, 0xb5, 0x37, 0x62,
+ 0xe1, 0x02, 0x83, 0x25, 0xdc, 0xfc, 0xb9, 0x01, 0x57, 0xc6, 0x3e, 0xd1, 0xf0, 0x10, 0x60, 0xc7,
+ 0x5f, 0x6a, 0x45, 0xb1, 0x17, 0x26, 0x74, 0x58, 0xa3, 0xe2, 0xad, 0x5f, 0xea, 0x5d, 0x67, 0xb0,
+ 0xf5, 0x4b, 0x69, 0xc3, 0x69, 0x5a, 0xf3, 0x9f, 0x39, 0x50, 0x6f, 0x22, 0xff, 0x65, 0x8f, 0x7d,
+ 0x62, 0xe0, 0x45, 0x66, 0x3e, 0xfd, 0x22, 0x13, 0x3f, 0xbf, 0x68, 0x4f, 0x12, 0xf9, 0x07, 0x3f,
+ 0x49, 0xa0, 0xe7, 0xe2, 0x57, 0x0e, 0x19, 0xba, 0x56, 0xd3, 0xaf, 0x1c, 0xe7, 0xfd, 0xda, 0x9c,
+ 0x12, 0x9e, 0x7e, 0xf5, 0x78, 0x0d, 0x66, 0x1d, 0x12, 0x5a, 0xae, 0x27, 0xdb, 0xb8, 0xcc, 0xb3,
+ 0x7f, 0x29, 0xac, 0x29, 0x59, 0x1b, 0x65, 0x6e, 0x93, 0xfa, 0xc0, 0x91, 0x40, 0x1e, 0x6d, 0xed,
+ 0xc0, 0x91, 0x5d, 0x48, 0x21, 0x89, 0xb6, 0x5b, 0x81, 0x43, 0xb0, 0xc0, 0x98, 0xef, 0x1a, 0x50,
+ 0x96, 0x92, 0xb6, 0xac, 0x2e, 0x23, 0x68, 0x23, 0x5e, 0x85, 0x3c, 0xee, 0x2b, 0xfa, 0x73, 0xd6,
+ 0x79, 0xbf, 0x56, 0x12, 0x64, 0xa2, 0x81, 0x19, 0xf1, 0x6c, 0x93, 0xbb, 0x60, 0x8f, 0x1e, 0x87,
+ 0x82, 0xb8, 0x3d, 0x6a, 0x33, 0x93, 0x77, 0x39, 0x0e, 0xc4, 0x12, 0x67, 0x7e, 0x9c, 0x83, 0x4a,
+ 0x6a, 0x71, 0x19, 0x7a, 0x81, 0x78, 0xe2, 0x99, 0xcb, 0x30, 0x45, 0x1f, 0xff, 0x0a, 0xae, 0x72,
+ 0xcf, 0xcc, 0xc3, 0xe4, 0x9e, 0x6f, 0xc3, 0x8c, 0xcd, 0xf7, 0x28, 0xfa, 0x53, 0xc5, 0xc6, 0x24,
+ 0xc7, 0x29, 0x76, 0x37, 0xf1, 0x46, 0xf1, 0xc9, 0xb0, 0x12, 0x88, 0x6e, 0xc1, 0x12, 0x25, 0x21,
+ 0xed, 0x6d, 0x1e, 0x85, 0x84, 0xea, 0xbd, 0x7f, 0x21, 0xa9, 0xb8, 0xf1, 0x20, 0x01, 0x1e, 0xe6,
+ 0x31, 0x0f, 0x61, 0xee, 0x8e, 0x75, 0xe8, 0xc5, 0xaf, 0x59, 0x18, 0x2a, 0xae, 0x6f, 0x7b, 0x5d,
+ 0x87, 0xc8, 0x68, 0x1c, 0x45, 0xaf, 0xe8, 0xd2, 0xee, 0xe8, 0xc8, 0xf3, 0x7e, 0xed, 0x52, 0x0a,
+ 0x20, 0x9f, 0x6f, 0x70, 0x5a, 0x84, 0xe9, 0xc1, 0xf4, 0x67, 0xd8, 0x3d, 0x7e, 0x07, 0x4a, 0x49,
+ 0x7d, 0xff, 0x88, 0x55, 0x9a, 0x6f, 0x40, 0x91, 0x7b, 0x7c, 0xd4, 0x97, 0x5e, 0x50, 0xe2, 0xa4,
+ 0x0b, 0xa7, 0x5c, 0x96, 0xc2, 0xc9, 0x6c, 0x43, 0xe5, 0x6e, 0xc7, 0x79, 0xc8, 0xf7, 0xcc, 0x5c,
+ 0xe6, 0xac, 0x75, 0x0d, 0xe4, 0xff, 0x35, 0x78, 0x82, 0x90, 0x99, 0x5b, 0x4b, 0x10, 0x7a, 0xe2,
+ 0xd5, 0x86, 0xf9, 0x3f, 0x36, 0x00, 0xc4, 0xd4, 0xec, 0xe6, 0x29, 0xf1, 0xc3, 0x0c, 0xaf, 0xde,
+ 0x77, 0x61, 0x26, 0x90, 0xde, 0x24, 0xdf, 0x34, 0x27, 0x1c, 0xcd, 0xc6, 0x97, 0x40, 0xfa, 0x13,
+ 0x56, 0xc2, 0x1a, 0x57, 0x3f, 0xf8, 0x64, 0x75, 0xea, 0xc3, 0x4f, 0x56, 0xa7, 0x3e, 0xfa, 0x64,
+ 0x75, 0xea, 0xed, 0xb3, 0x55, 0xe3, 0x83, 0xb3, 0x55, 0xe3, 0xc3, 0xb3, 0x55, 0xe3, 0xa3, 0xb3,
+ 0x55, 0xe3, 0xe3, 0xb3, 0x55, 0xe3, 0xdd, 0xbf, 0xaf, 0x4e, 0xbd, 0x96, 0x3b, 0xdd, 0xf8, 0x4f,
+ 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x18, 0xc5, 0x8c, 0x25, 0x27, 0x00, 0x00,
}
func (m *APIGroup) Marshal() (dAtA []byte, err error) {
@@ -2568,6 +2569,11 @@ func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ i -= len(m.Subresource)
+ copy(dAtA[i:], m.Subresource)
+ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource)))
+ i--
+ dAtA[i] = 0x42
if m.FieldsV1 != nil {
{
size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i])
@@ -3914,6 +3920,8 @@ func (m *ManagedFieldsEntry) Size() (n int) {
l = m.FieldsV1.Size()
n += 1 + l + sovGenerated(uint64(l))
}
+ l = len(m.Subresource)
+ n += 1 + l + sovGenerated(uint64(l))
return n
}
@@ -4508,6 +4516,7 @@ func (this *ManagedFieldsEntry) String() string {
`Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`,
`FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`,
`FieldsV1:` + strings.Replace(fmt.Sprintf("%v", this.FieldsV1), "FieldsV1", "FieldsV1", 1) + `,`,
+ `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`,
`}`,
}, "")
return s
@@ -8442,6 +8451,38 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowGenerated
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthGenerated
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Subresource = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(dAtA[iNdEx:])
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
index 4f41504f3..7d450644d 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto
@@ -499,8 +499,6 @@ message ListOptions {
// assume bookmarks are returned at any specific interval, nor may they
// assume the server will send any BOOKMARK event during a session.
// If this is not a watch, this field is ignored.
- // If the feature gate WatchBookmarks is not enabled in apiserver,
- // this field is ignored.
// +optional
optional bool allowWatchBookmarks = 9;
@@ -589,6 +587,15 @@ message ManagedFieldsEntry {
// FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
// +optional
optional FieldsV1 fieldsV1 = 7;
+
+ // Subresource is the name of the subresource used to update that object, or
+ // empty string if the object was updated through the main resource. The
+ // value of this field is used to distinguish between managers, even if they
+ // share the same name. For example, a status update will be distinct from a
+ // regular update using the same manager name.
+ // Note that the APIVersion field is not related to the Subresource field and
+ // it always corresponds to the version of the main resource.
+ optional string subresource = 8;
}
// MicroTime is version of Time with microsecond level precision.
@@ -788,6 +795,7 @@ message ObjectMeta {
// OwnerReference contains enough information to let you identify an owning
// object. An owning object must be in the same namespace as the dependent, or
// be cluster-scoped, so there is no namespace field.
+// +structType=atomic
message OwnerReference {
// API version of the referent.
optional string apiVersion = 5;
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
index 3c5a1518c..2b6a30b65 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go
@@ -38,13 +38,13 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 {
return labels.Everything(), nil
}
- selector := labels.NewSelector()
+ requirements := make([]labels.Requirement, 0, len(ps.MatchLabels)+len(ps.MatchExpressions))
for k, v := range ps.MatchLabels {
r, err := labels.NewRequirement(k, selection.Equals, []string{v})
if err != nil {
return nil, err
}
- selector = selector.Add(*r)
+ requirements = append(requirements, *r)
}
for _, expr := range ps.MatchExpressions {
var op selection.Operator
@@ -64,8 +64,10 @@ func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) {
if err != nil {
return nil, err
}
- selector = selector.Add(*r)
+ requirements = append(requirements, *r)
}
+ selector := labels.NewSelector()
+ selector = selector.Add(requirements...)
return selector, nil
}
@@ -154,7 +156,7 @@ func SetAsLabelSelector(ls labels.Set) *LabelSelector {
}
selector := &LabelSelector{
- MatchLabels: make(map[string]string),
+ MatchLabels: make(map[string]string, len(ls)),
}
for label, value := range ls {
selector.MatchLabels[label] = value
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
index 79e2ad48a..9660282c4 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go
@@ -303,6 +303,7 @@ const (
// OwnerReference contains enough information to let you identify an owning
// object. An owning object must be in the same namespace as the dependent, or
// be cluster-scoped, so there is no namespace field.
+// +structType=atomic
type OwnerReference struct {
// API version of the referent.
APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"`
@@ -356,8 +357,6 @@ type ListOptions struct {
// assume bookmarks are returned at any specific interval, nor may they
// assume the server will send any BOOKMARK event during a session.
// If this is not a watch, this field is ignored.
- // If the feature gate WatchBookmarks is not enabled in apiserver,
- // this field is ignored.
// +optional
AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"`
@@ -1180,6 +1179,15 @@ type ManagedFieldsEntry struct {
// FieldsV1 holds the first JSON version format as described in the "FieldsV1" type.
// +optional
FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"`
+
+ // Subresource is the name of the subresource used to update that object, or
+ // empty string if the object was updated through the main resource. The
+ // value of this field is used to distinguish between managers, even if they
+ // share the same name. For example, a status update will be distinct from a
+ // regular update using the same manager name.
+ // Note that the APIVersion field is not related to the Subresource field and
+ // it always corresponds to the version of the main resource.
+ Subresource string `json:"subresource,omitempty" protobuf:"bytes,8,opt,name=subresource"`
}
// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created.
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
index c33d8ffa7..3eae04d07 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go
@@ -209,7 +209,7 @@ var map_ListOptions = map[string]string{
"labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
"fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
"watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
- "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.",
+ "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
"resourceVersion": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
"resourceVersionMatch": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset",
"timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
@@ -222,13 +222,14 @@ func (ListOptions) SwaggerDoc() map[string]string {
}
var map_ManagedFieldsEntry = map[string]string{
- "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
- "manager": "Manager is an identifier of the workflow managing these fields.",
- "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
- "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
- "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
- "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
- "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
+ "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
+ "manager": "Manager is an identifier of the workflow managing these fields.",
+ "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
+ "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
+ "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'",
+ "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"",
+ "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.",
+ "subresource": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.",
}
func (ManagedFieldsEntry) SwaggerDoc() map[string]string {
diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
index a5a7f144a..bd82a9d65 100644
--- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
+++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go
@@ -81,6 +81,7 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi
func ValidateDeleteOptions(options *metav1.DeleteOptions) field.ErrorList {
allErrs := field.ErrorList{}
+ //lint:file-ignore SA1019 Keep validation for deprecated OrphanDependents option until it's being removed
if options.OrphanDependents != nil && options.PropagationPolicy != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("propagationPolicy"), options.PropagationPolicy, "orphanDependents and deletionPropagation cannot be both set"))
}
@@ -171,6 +172,8 @@ func ValidateTableOptions(opts *metav1.TableOptions) field.ErrorList {
return allErrs
}
+const MaxSubresourceNameLength = 256
+
func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
for i, fields := range fieldsList {
@@ -184,6 +187,10 @@ func ValidateManagedFields(fieldsList []metav1.ManagedFieldsEntry, fldPath *fiel
allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldsType"), fields.FieldsType, "must be `FieldsV1`"))
}
allErrs = append(allErrs, ValidateFieldManager(fields.Manager, fldPath.Child("manager"))...)
+
+ if len(fields.Subresource) > MaxSubresourceNameLength {
+ allErrs = append(allErrs, field.TooLong(fldPath.Child("subresource"), fields.Subresource, MaxSubresourceNameLength))
+ }
}
return allErrs
}
diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
index b0865777a..9eea34579 100644
--- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go
+++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go
@@ -31,12 +31,15 @@ import (
)
var (
- validRequirementOperators = []string{
+ unaryOperators = []string{
+ string(selection.Exists), string(selection.DoesNotExist),
+ }
+ binaryOperators = []string{
string(selection.In), string(selection.NotIn),
string(selection.Equals), string(selection.DoubleEquals), string(selection.NotEquals),
- string(selection.Exists), string(selection.DoesNotExist),
string(selection.GreaterThan), string(selection.LessThan),
}
+ validRequirementOperators = append(binaryOperators, unaryOperators...)
)
// Requirements is AND of all requirements.
@@ -140,7 +143,7 @@ type Requirement struct {
// NewRequirement is the constructor for a Requirement.
// If any of these rules is violated, an error is returned:
-// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist.
+// (1) The operator can only be In, NotIn, Equals, DoubleEquals, Gt, Lt, NotEquals, Exists, or DoesNotExist.
// (2) If the operator is In or NotIn, the values set must be non-empty.
// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value.
// (4) If the operator is Exists or DoesNotExist, the value set must be empty.
@@ -364,13 +367,9 @@ func safeSort(in []string) []string {
// Add adds requirements to the selector. It copies the current selector returning a new one
func (s internalSelector) Add(reqs ...Requirement) Selector {
- var ret internalSelector
- for ix := range s {
- ret = append(ret, s[ix])
- }
- for _, r := range reqs {
- ret = append(ret, r)
- }
+ ret := make(internalSelector, 0, len(s)+len(reqs))
+ ret = append(ret, s...)
+ ret = append(ret, reqs...)
sort.Sort(ByKey(ret))
return ret
}
@@ -753,7 +752,7 @@ func (p *Parser) parseOperator() (op selection.Operator, err error) {
case NotEqualsToken:
op = selection.NotEquals
default:
- return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit)
+ return "", fmt.Errorf("found '%s', expected: %v", lit, strings.Join(binaryOperators, ", "))
}
return op, nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go
index f6b307aa6..1328dd612 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go
@@ -17,10 +17,9 @@ limitations under the License.
package cache
import (
+ "container/list"
"sync"
"time"
-
- "github.com/hashicorp/golang-lru"
)
// Clock defines an interface for obtaining the current time
@@ -39,8 +38,11 @@ type LRUExpireCache struct {
// clock is used to obtain the current time
clock Clock
- cache *lru.Cache
- lock sync.Mutex
+ lock sync.Mutex
+
+ maxSize int
+ evictionList list.List
+ entries map[interface{}]*list.Element
}
// NewLRUExpireCache creates an expiring cache with the given size
@@ -50,15 +52,19 @@ func NewLRUExpireCache(maxSize int) *LRUExpireCache {
// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time.
func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache {
- cache, err := lru.New(maxSize)
- if err != nil {
- // if called with an invalid size
- panic(err)
+ if maxSize <= 0 {
+ panic("maxSize must be > 0")
+ }
+
+ return &LRUExpireCache{
+ clock: clock,
+ maxSize: maxSize,
+ entries: map[interface{}]*list.Element{},
}
- return &LRUExpireCache{clock: clock, cache: cache}
}
type cacheEntry struct {
+ key interface{}
value interface{}
expireTime time.Time
}
@@ -67,7 +73,31 @@ type cacheEntry struct {
func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) {
c.lock.Lock()
defer c.lock.Unlock()
- c.cache.Add(key, &cacheEntry{value, c.clock.Now().Add(ttl)})
+
+ // Key already exists
+ oldElement, ok := c.entries[key]
+ if ok {
+ c.evictionList.MoveToFront(oldElement)
+ oldElement.Value.(*cacheEntry).value = value
+ oldElement.Value.(*cacheEntry).expireTime = c.clock.Now().Add(ttl)
+ return
+ }
+
+ // Make space if necessary
+ if c.evictionList.Len() >= c.maxSize {
+ toEvict := c.evictionList.Back()
+ c.evictionList.Remove(toEvict)
+ delete(c.entries, toEvict.Value.(*cacheEntry).key)
+ }
+
+ // Add new entry
+ entry := &cacheEntry{
+ key: key,
+ value: value,
+ expireTime: c.clock.Now().Add(ttl),
+ }
+ element := c.evictionList.PushFront(entry)
+ c.entries[key] = element
}
// Get returns the value at the specified key from the cache if it exists and is not
@@ -75,28 +105,56 @@ func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Durati
func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) {
c.lock.Lock()
defer c.lock.Unlock()
- e, ok := c.cache.Get(key)
+
+ element, ok := c.entries[key]
if !ok {
return nil, false
}
- if c.clock.Now().After(e.(*cacheEntry).expireTime) {
- c.cache.Remove(key)
+
+ if c.clock.Now().After(element.Value.(*cacheEntry).expireTime) {
+ c.evictionList.Remove(element)
+ delete(c.entries, key)
return nil, false
}
- return e.(*cacheEntry).value, true
+
+ c.evictionList.MoveToFront(element)
+
+ return element.Value.(*cacheEntry).value, true
}
// Remove removes the specified key from the cache if it exists
func (c *LRUExpireCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
- c.cache.Remove(key)
+
+ element, ok := c.entries[key]
+ if !ok {
+ return
+ }
+
+ c.evictionList.Remove(element)
+ delete(c.entries, key)
}
-// Keys returns all the keys in the cache, even if they are expired. Subsequent calls to
-// get may return not found. It returns all keys from oldest to newest.
+// Keys returns all unexpired keys in the cache.
+//
+// Keep in mind that subsequent calls to Get() for any of the returned keys
+// might return "not found".
+//
+// Keys are returned ordered from least recently used to most recently used.
func (c *LRUExpireCache) Keys() []interface{} {
c.lock.Lock()
defer c.lock.Unlock()
- return c.cache.Keys()
+
+ now := c.clock.Now()
+
+ val := make([]interface{}, 0, c.evictionList.Len())
+ for element := c.evictionList.Back(); element != nil; element = element.Prev() {
+ // Only return unexpired keys
+ if !now.After(element.Value.(*cacheEntry).expireTime) {
+ val = append(val, element.Value.(*cacheEntry).key)
+ }
+ }
+
+ return val
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go
index b7c953225..590c16867 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/managedfields/extract.go
@@ -35,7 +35,22 @@ import (
// that no managed fields were found for the fieldManager because other field managers
// have taken ownership of all the fields previously owned by the fieldManager. It is
// also possible the fieldManager never owned fields.
-func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}) error {
+//
+// The provided object MUST bo a root resource object since subresource objects
+// do not contain their own managed fields. For example, an autoscaling.Scale
+// object read from a "scale" subresource does not have any managed fields and so
+// cannot be used as the object.
+//
+// If the fields of a subresource are a subset of the fields of the root object,
+// and their field paths and types are exactly the same, then ExtractInto can be
+// called with the root resource as the object and the subresource as the
+// applyConfiguration. This works for "status", obviously, because status is
+// represented by the exact same object as the root resource. This this does NOT
+// work, for example, with the "scale" subresources of Deployment, ReplicaSet and
+// StatefulSet. While the spec.replicas, status.replicas fields are in the same
+// exact field path locations as they are in autoscaling.Scale, the selector
+// fields are in different locations, and are a different type.
+func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldManager string, applyConfiguration interface{}, subresource string) error {
typedObj, err := toTyped(object, objectType)
if err != nil {
return fmt.Errorf("error converting obj to typed: %w", err)
@@ -45,7 +60,7 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan
if err != nil {
return fmt.Errorf("error accessing metadata: %w", err)
}
- fieldsEntry, ok := findManagedFields(accessor, fieldManager)
+ fieldsEntry, ok := findManagedFields(accessor, fieldManager, subresource)
if !ok {
return nil
}
@@ -66,10 +81,10 @@ func ExtractInto(object runtime.Object, objectType typed.ParseableType, fieldMan
return nil
}
-func findManagedFields(accessor metav1.Object, fieldManager string) (metav1.ManagedFieldsEntry, bool) {
+func findManagedFields(accessor metav1.Object, fieldManager string, subresource string) (metav1.ManagedFieldsEntry, bool) {
objManagedFields := accessor.GetManagedFields()
for _, mf := range objManagedFields {
- if mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply {
+ if mf.Manager == fieldManager && mf.Operation == metav1.ManagedFieldsOperationApply && mf.Subresource == subresource {
return mf, true
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
index ce69b8054..d75ac6efa 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go
@@ -113,6 +113,7 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport {
t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
}
// If no custom dialer is set, use the default context dialer
+ //lint:file-ignore SA1019 Keep supporting deprecated Dial method of custom transports
if t.DialContext == nil && t.Dial == nil {
t.DialContext = defaultTransport.DialContext
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
index 600f3befd..fd2081a28 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go
@@ -987,10 +987,10 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey
return nil
}
- var nonDeleteList, toDeleteList []interface{}
+ var nonDeleteList []interface{}
var err error
if len(mergeKey) > 0 {
- nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList)
+ nonDeleteList, _, err = extractToDeleteItems(typedPatchList)
if err != nil {
return err
}
@@ -1018,7 +1018,6 @@ func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey
if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) {
return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList)
}
- typedPatchList = append(nonDeleteList, toDeleteList...)
return nil
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
index 0cd5d6577..c283ad189 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go
@@ -181,7 +181,7 @@ func Invalid(field *Path, value interface{}, detail string) *Error {
// valid values).
func NotSupported(field *Path, value interface{}, validValues []string) *Error {
detail := ""
- if validValues != nil && len(validValues) > 0 {
+ if len(validValues) > 0 {
quotedValues := make([]string, len(validValues))
for i, v := range validValues {
quotedValues[i] = strconv.Quote(v)
diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
index 3dea7fe7f..afb24876a 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go
@@ -205,10 +205,29 @@ var ErrWaitTimeout = errors.New("timed out waiting for the condition")
// if the loop should be aborted.
type ConditionFunc func() (done bool, err error)
+// ConditionWithContextFunc returns true if the condition is satisfied, or an error
+// if the loop should be aborted.
+//
+// The caller passes along a context that can be used by the condition function.
+type ConditionWithContextFunc func(context.Context) (done bool, err error)
+
+// WithContext converts a ConditionFunc into a ConditionWithContextFunc
+func (cf ConditionFunc) WithContext() ConditionWithContextFunc {
+ return func(context.Context) (done bool, err error) {
+ return cf()
+ }
+}
+
// runConditionWithCrashProtection runs a ConditionFunc with crash protection
func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) {
+ return runConditionWithCrashProtectionWithContext(context.TODO(), condition.WithContext())
+}
+
+// runConditionWithCrashProtectionWithContext runs a
+// ConditionWithContextFunc with crash protection.
+func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) {
defer runtime.HandleCrash()
- return condition()
+ return condition(ctx)
}
// Backoff holds parameters applied to a Backoff function.
@@ -418,13 +437,62 @@ func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
//
// If you want to Poll something forever, see PollInfinite.
func Poll(interval, timeout time.Duration, condition ConditionFunc) error {
- return pollInternal(poller(interval, timeout), condition)
+ return PollWithContext(context.Background(), interval, timeout, condition.WithContext())
}
-func pollInternal(wait WaitFunc, condition ConditionFunc) error {
- done := make(chan struct{})
- defer close(done)
- return WaitFor(wait, condition, done)
+// PollWithContext tries a condition func until it returns true, an error,
+// or when the context expires or the timeout is reached, whichever
+// happens first.
+//
+// PollWithContext always waits the interval before the run of 'condition'.
+// 'condition' will always be invoked at least once.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+//
+// If you want to Poll something forever, see PollInfinite.
+func PollWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
+ return poll(ctx, false, poller(interval, timeout), condition)
+}
+
+// PollUntil tries a condition func until it returns true, an error or stopCh is
+// closed.
+//
+// PollUntil always waits interval before the first run of 'condition'.
+// 'condition' will always be invoked at least once.
+func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+ ctx, cancel := contextForChannel(stopCh)
+ defer cancel()
+ return PollUntilWithContext(ctx, interval, condition.WithContext())
+}
+
+// PollUntilWithContext tries a condition func until it returns true,
+// an error or the specified context is cancelled or expired.
+//
+// PollUntilWithContext always waits interval before the first run of 'condition'.
+// 'condition' will always be invoked at least once.
+func PollUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+ return poll(ctx, false, poller(interval, 0), condition)
+}
+
+// PollInfinite tries a condition func until it returns true or an error
+//
+// PollInfinite always waits the interval before the run of 'condition'.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollInfinite(interval time.Duration, condition ConditionFunc) error {
+ return PollInfiniteWithContext(context.Background(), interval, condition.WithContext())
+}
+
+// PollInfiniteWithContext tries a condition func until it returns true or an error
+//
+// PollInfiniteWithContext always waits the interval before the run of 'condition'.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+ return poll(ctx, false, poller(interval, 0), condition)
}
// PollImmediate tries a condition func until it returns true, an error, or the timeout
@@ -438,30 +506,40 @@ func pollInternal(wait WaitFunc, condition ConditionFunc) error {
//
// If you want to immediately Poll something forever, see PollImmediateInfinite.
func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error {
- return pollImmediateInternal(poller(interval, timeout), condition)
+ return PollImmediateWithContext(context.Background(), interval, timeout, condition.WithContext())
}
-func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error {
- done, err := runConditionWithCrashProtection(condition)
- if err != nil {
- return err
- }
- if done {
- return nil
- }
- return pollInternal(wait, condition)
-}
-
-// PollInfinite tries a condition func until it returns true or an error
+// PollImmediateWithContext tries a condition func until it returns true, an error,
+// or the timeout is reached or the specified context expires, whichever happens first.
//
-// PollInfinite always waits the interval before the run of 'condition'.
+// PollImmediateWithContext always checks 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
//
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
-func PollInfinite(interval time.Duration, condition ConditionFunc) error {
- done := make(chan struct{})
- defer close(done)
- return PollUntil(interval, condition, done)
+//
+// If you want to immediately Poll something forever, see PollImmediateInfinite.
+func PollImmediateWithContext(ctx context.Context, interval, timeout time.Duration, condition ConditionWithContextFunc) error {
+ return poll(ctx, true, poller(interval, timeout), condition)
+}
+
+// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
+//
+// PollImmediateUntil runs the 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
+ ctx, cancel := contextForChannel(stopCh)
+ defer cancel()
+ return PollImmediateUntilWithContext(ctx, interval, condition.WithContext())
+}
+
+// PollImmediateUntilWithContext tries a condition func until it returns true,
+// an error or the specified context is cancelled or expired.
+//
+// PollImmediateUntilWithContext runs the 'condition' before waiting for the interval.
+// 'condition' will always be invoked at least once.
+func PollImmediateUntilWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+ return poll(ctx, true, poller(interval, 0), condition)
}
// PollImmediateInfinite tries a condition func until it returns true or an error
@@ -471,44 +549,46 @@ func PollInfinite(interval time.Duration, condition ConditionFunc) error {
// Some intervals may be missed if the condition takes too long or the time
// window is too short.
func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error {
- done, err := runConditionWithCrashProtection(condition)
- if err != nil {
- return err
- }
- if done {
- return nil
- }
- return PollInfinite(interval, condition)
+ return PollImmediateInfiniteWithContext(context.Background(), interval, condition.WithContext())
}
-// PollUntil tries a condition func until it returns true, an error or stopCh is
-// closed.
+// PollImmediateInfiniteWithContext tries a condition func until it returns true
+// or an error or the specified context gets cancelled or expired.
//
-// PollUntil always waits interval before the first run of 'condition'.
-// 'condition' will always be invoked at least once.
-func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
- ctx, cancel := contextForChannel(stopCh)
- defer cancel()
- return WaitFor(poller(interval, 0), condition, ctx.Done())
+// PollImmediateInfiniteWithContext runs the 'condition' before waiting for the interval.
+//
+// Some intervals may be missed if the condition takes too long or the time
+// window is too short.
+func PollImmediateInfiniteWithContext(ctx context.Context, interval time.Duration, condition ConditionWithContextFunc) error {
+ return poll(ctx, true, poller(interval, 0), condition)
}
-// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
-//
-// PollImmediateUntil runs the 'condition' before waiting for the interval.
-// 'condition' will always be invoked at least once.
-func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
- done, err := condition()
- if err != nil {
- return err
- }
- if done {
- return nil
+// Internally used, each of the the public 'Poll*' function defined in this
+// package should invoke this internal function with appropriate parameters.
+// ctx: the context specified by the caller, for infinite polling pass
+// a context that never gets cancelled or expired.
+// immediate: if true, the 'condition' will be invoked before waiting for the interval,
+// in this case 'condition' will always be invoked at least once.
+// wait: user specified WaitFunc function that controls at what interval the condition
+// function should be invoked periodically and whether it is bound by a timeout.
+// condition: user specified ConditionWithContextFunc function.
+func poll(ctx context.Context, immediate bool, wait WaitWithContextFunc, condition ConditionWithContextFunc) error {
+ if immediate {
+ done, err := runConditionWithCrashProtectionWithContext(ctx, condition)
+ if err != nil {
+ return err
+ }
+ if done {
+ return nil
+ }
}
+
select {
- case <-stopCh:
+ case <-ctx.Done():
+ // returning ctx.Err() will break backward compatibility
return ErrWaitTimeout
default:
- return PollUntil(interval, condition, stopCh)
+ return WaitForWithContext(ctx, wait, condition)
}
}
@@ -516,6 +596,20 @@ func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh
// should be executed and is closed when the last test should be invoked.
type WaitFunc func(done <-chan struct{}) <-chan struct{}
+// WithContext converts the WaitFunc to an equivalent WaitWithContextFunc
+func (w WaitFunc) WithContext() WaitWithContextFunc {
+ return func(ctx context.Context) <-chan struct{} {
+ return w(ctx.Done())
+ }
+}
+
+// WaitWithContextFunc creates a channel that receives an item every time a test
+// should be executed and is closed when the last test should be invoked.
+//
+// When the specified context gets cancelled or expires the function
+// stops sending item and returns immediately.
+type WaitWithContextFunc func(ctx context.Context) <-chan struct{}
+
// WaitFor continually checks 'fn' as driven by 'wait'.
//
// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value
@@ -532,13 +626,35 @@ type WaitFunc func(done <-chan struct{}) <-chan struct{}
// "uniform pseudo-random", the `fn` might still run one or multiple time,
// though eventually `WaitFor` will return.
func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
- stopCh := make(chan struct{})
- defer close(stopCh)
- c := wait(stopCh)
+ ctx, cancel := contextForChannel(done)
+ defer cancel()
+ return WaitForWithContext(ctx, wait.WithContext(), fn.WithContext())
+}
+
+// WaitForWithContext continually checks 'fn' as driven by 'wait'.
+//
+// WaitForWithContext gets a channel from 'wait()'', and then invokes 'fn'
+// once for every value placed on the channel and once more when the
+// channel is closed. If the channel is closed and 'fn'
+// returns false without error, WaitForWithContext returns ErrWaitTimeout.
+//
+// If 'fn' returns an error the loop ends and that error is returned. If
+// 'fn' returns true the loop ends and nil is returned.
+//
+// context.Canceled will be returned if the ctx.Done() channel is closed
+// without fn ever returning true.
+//
+// When the ctx.Done() channel is closed, because the golang `select` statement is
+// "uniform pseudo-random", the `fn` might still run one or multiple times,
+// though eventually `WaitForWithContext` will return.
+func WaitForWithContext(ctx context.Context, wait WaitWithContextFunc, fn ConditionWithContextFunc) error {
+ waitCtx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ c := wait(waitCtx)
for {
select {
case _, open := <-c:
- ok, err := runConditionWithCrashProtection(fn)
+ ok, err := runConditionWithCrashProtectionWithContext(ctx, fn)
if err != nil {
return err
}
@@ -548,7 +664,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
if !open {
return ErrWaitTimeout
}
- case <-done:
+ case <-ctx.Done():
+ // returning ctx.Err() will break backward compatibility
return ErrWaitTimeout
}
}
@@ -564,8 +681,8 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.
-func poller(interval, timeout time.Duration) WaitFunc {
- return WaitFunc(func(done <-chan struct{}) <-chan struct{} {
+func poller(interval, timeout time.Duration) WaitWithContextFunc {
+ return WaitWithContextFunc(func(ctx context.Context) <-chan struct{} {
ch := make(chan struct{})
go func() {
@@ -595,7 +712,7 @@ func poller(interval, timeout time.Duration) WaitFunc {
}
case <-after:
return
- case <-done:
+ case <-ctx.Done():
return
}
}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
index 612d63a69..56f4262ac 100644
--- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
+++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go
@@ -291,15 +291,19 @@ func (r *YAMLReader) Read() ([]byte, error) {
if i := bytes.Index(line, []byte(separator)); i == 0 {
// We have a potential document terminator
i += sep
- after := line[i:]
- if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 {
- if buffer.Len() != 0 {
- return buffer.Bytes(), nil
- }
- if err == io.EOF {
- return nil, err
+ trimmed := strings.TrimSpace(string(line[i:]))
+ // We only allow comments and spaces following the yaml doc separator, otherwise we'll return an error
+ if len(trimmed) > 0 && string(trimmed[0]) != "#" {
+ return nil, YAMLSyntaxError{
+ err: fmt.Errorf("invalid Yaml document separator: %s", trimmed),
}
}
+ if buffer.Len() != 0 {
+ return buffer.Bytes(), nil
+ }
+ if err == io.EOF {
+ return nil, err
+ }
}
if err == io.EOF {
if buffer.Len() != 0 {
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
index 6c1658804..7ae061e3c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/mutatingwebhookconfiguration.go
@@ -48,7 +48,7 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
// ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
// mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a
// MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+ return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "")
+}
+
+// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+ return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status")
+}
+
+func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *apiadmissionregistrationv1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
b := &MutatingWebhookConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b)
+ err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
index f7802f540..ae19ed81e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1/validatingwebhookconfiguration.go
@@ -48,7 +48,7 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
// ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
// validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a
// ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+ return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "")
+}
+
+// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+ return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status")
+}
+
+func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *apiadmissionregistrationv1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
b := &ValidatingWebhookConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b)
+ err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
index 47ed1e252..178745c23 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/mutatingwebhookconfiguration.go
@@ -48,7 +48,7 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
// ExtractMutatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
// mutatingWebhookConfiguration. If no managedFields are found in mutatingWebhookConfiguration for fieldManager, a
// MutatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// mutatingWebhookConfiguration must be a unmodified MutatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func MutatingWebhookConfiguration(name string) *MutatingWebhookConfigurationAppl
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+ return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "")
+}
+
+// ExtractMutatingWebhookConfigurationStatus is the same as ExtractMutatingWebhookConfiguration except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractMutatingWebhookConfigurationStatus(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
+ return extractMutatingWebhookConfiguration(mutatingWebhookConfiguration, fieldManager, "status")
+}
+
+func extractMutatingWebhookConfiguration(mutatingWebhookConfiguration *admissionregistrationv1beta1.MutatingWebhookConfiguration, fieldManager string, subresource string) (*MutatingWebhookConfigurationApplyConfiguration, error) {
b := &MutatingWebhookConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration"), fieldManager, b)
+ err := managedfields.ExtractInto(mutatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.MutatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
index dac1c27a0..e60d997f8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1/validatingwebhookconfiguration.go
@@ -48,7 +48,7 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
// ExtractValidatingWebhookConfiguration extracts the applied configuration owned by fieldManager from
// validatingWebhookConfiguration. If no managedFields are found in validatingWebhookConfiguration for fieldManager, a
// ValidatingWebhookConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// validatingWebhookConfiguration must be a unmodified ValidatingWebhookConfiguration API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func ValidatingWebhookConfiguration(name string) *ValidatingWebhookConfiguration
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+ return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "")
+}
+
+// ExtractValidatingWebhookConfigurationStatus is the same as ExtractValidatingWebhookConfiguration except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractValidatingWebhookConfigurationStatus(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
+ return extractValidatingWebhookConfiguration(validatingWebhookConfiguration, fieldManager, "status")
+}
+
+func extractValidatingWebhookConfiguration(validatingWebhookConfiguration *admissionregistrationv1beta1.ValidatingWebhookConfiguration, fieldManager string, subresource string) (*ValidatingWebhookConfigurationApplyConfiguration, error) {
b := &ValidatingWebhookConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration"), fieldManager, b)
+ err := managedfields.ExtractInto(validatingWebhookConfiguration, internal.Parser().Type("io.k8s.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
index 44d9b05c0..180b77625 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apiserverinternal/v1alpha1/storageversion.go
@@ -49,7 +49,7 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration {
// ExtractStorageVersion extracts the applied configuration owned by fieldManager from
// storageVersion. If no managedFields are found in storageVersion for fieldManager, a
// StorageVersionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// storageVersion must be a unmodified StorageVersion API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func StorageVersion(name string) *StorageVersionApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
+ return extractStorageVersion(storageVersion, fieldManager, "")
+}
+
+// ExtractStorageVersionStatus is the same as ExtractStorageVersion except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractStorageVersionStatus(storageVersion *v1alpha1.StorageVersion, fieldManager string) (*StorageVersionApplyConfiguration, error) {
+ return extractStorageVersion(storageVersion, fieldManager, "status")
+}
+
+func extractStorageVersion(storageVersion *v1alpha1.StorageVersion, fieldManager string, subresource string) (*StorageVersionApplyConfiguration, error) {
b := &StorageVersionApplyConfiguration{}
- err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b)
+ err := managedfields.ExtractInto(storageVersion, internal.Parser().Type("io.k8s.api.apiserverinternal.v1alpha1.StorageVersion"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
index d01f77a5e..28a0c582b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/controllerrevision.go
@@ -51,7 +51,7 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return extractControllerRevision(controllerRevision, fieldManager, "")
+}
+
+// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractControllerRevisionStatus(controllerRevision *appsv1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return extractControllerRevision(controllerRevision, fieldManager, "status")
+}
+
+func extractControllerRevision(controllerRevision *appsv1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
- err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1.ControllerRevision"), fieldManager, b)
+ err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
index bcfe7a4a6..6dd8c6e88 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/daemonset.go
@@ -50,7 +50,7 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// ExtractDaemonSet extracts the applied configuration owned by fieldManager from
// daemonSet. If no managedFields are found in daemonSet for fieldManager, a
// DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+ return extractDaemonSet(daemonSet, fieldManager, "")
+}
+
+// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDaemonSetStatus(daemonSet *apiappsv1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+ return extractDaemonSet(daemonSet, fieldManager, "status")
+}
+
+func extractDaemonSet(daemonSet *apiappsv1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
b := &DaemonSetApplyConfiguration{}
- err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b)
+ err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1.DaemonSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
index 37ef1896a..d33321c52 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/deployment.go
@@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDeployment(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "")
+}
+
+// ExtractDeploymentStatus is the same as ExtractDeployment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDeploymentStatus(deployment *apiappsv1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "status")
+}
+
+func extractDeployment(deployment *apiappsv1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b)
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1.Deployment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
index fc7a468e4..0affbf82f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/replicaset.go
@@ -50,7 +50,7 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// ExtractReplicaSet extracts the applied configuration owned by fieldManager from
// replicaSet. If no managedFields are found in replicaSet for fieldManager, a
// ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return extractReplicaSet(replicaSet, fieldManager, "")
+}
+
+// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractReplicaSetStatus(replicaSet *apiappsv1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return extractReplicaSet(replicaSet, fieldManager, "status")
+}
+
+func extractReplicaSet(replicaSet *apiappsv1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
b := &ReplicaSetApplyConfiguration{}
- err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b)
+ err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1.ReplicaSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
index ff0d50862..7cb5ec12c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulset.go
@@ -50,7 +50,7 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// ExtractStatefulSet extracts the applied configuration owned by fieldManager from
// statefulSet. If no managedFields are found in statefulSet for fieldManager, a
// StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return extractStatefulSet(statefulSet, fieldManager, "")
+}
+
+// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractStatefulSetStatus(statefulSet *apiappsv1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return extractStatefulSet(statefulSet, fieldManager, "status")
+}
+
+func extractStatefulSet(statefulSet *apiappsv1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
b := &StatefulSetApplyConfiguration{}
- err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b)
+ err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1.StatefulSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
index 502f00ba4..ade14180b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetspec.go
@@ -35,6 +35,7 @@ type StatefulSetSpecApplyConfiguration struct {
PodManagementPolicy *appsv1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
}
// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with
@@ -111,3 +112,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithRevisionHistoryLimit(value int32
b.RevisionHistoryLimit = &value
return b
}
+
+// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinReadySeconds field is set to the value of the last call.
+func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *StatefulSetSpecApplyConfiguration {
+ b.MinReadySeconds = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
index 58c07475e..d88881b65 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1/statefulsetstatus.go
@@ -30,6 +30,7 @@ type StatefulSetStatusApplyConfiguration struct {
UpdateRevision *string `json:"updateRevision,omitempty"`
CollisionCount *int32 `json:"collisionCount,omitempty"`
Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
}
// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with
@@ -114,3 +115,11 @@ func (b *StatefulSetStatusApplyConfiguration) WithConditions(values ...*Stateful
}
return b
}
+
+// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AvailableReplicas field is set to the value of the last call.
+func (b *StatefulSetStatusApplyConfiguration) WithAvailableReplicas(value int32) *StatefulSetStatusApplyConfiguration {
+ b.AvailableReplicas = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
index 4497fd69d..bcdfa44b2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/controllerrevision.go
@@ -51,7 +51,7 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return extractControllerRevision(controllerRevision, fieldManager, "")
+}
+
+// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractControllerRevisionStatus(controllerRevision *v1beta1.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return extractControllerRevision(controllerRevision, fieldManager, "status")
+}
+
+func extractControllerRevision(controllerRevision *v1beta1.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
- err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b)
+ err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta1.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
index c79a614d3..eddab1789 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/deployment.go
@@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDeployment(deployment *appsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "")
+}
+
+// ExtractDeploymentStatus is the same as ExtractDeployment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDeploymentStatus(deployment *appsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "status")
+}
+
+func extractDeployment(deployment *appsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta1.Deployment"), fieldManager, b)
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta1.Deployment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
index 909ee91f8..a4f64cd85 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulset.go
@@ -50,7 +50,7 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// ExtractStatefulSet extracts the applied configuration owned by fieldManager from
// statefulSet. If no managedFields are found in statefulSet for fieldManager, a
// StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return extractStatefulSet(statefulSet, fieldManager, "")
+}
+
+// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractStatefulSetStatus(statefulSet *appsv1beta1.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return extractStatefulSet(statefulSet, fieldManager, "status")
+}
+
+func extractStatefulSet(statefulSet *appsv1beta1.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
b := &StatefulSetApplyConfiguration{}
- err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta1.StatefulSet"), fieldManager, b)
+ err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta1.StatefulSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
index 61b86a9d8..befd1f7e0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetspec.go
@@ -35,6 +35,7 @@ type StatefulSetSpecApplyConfiguration struct {
PodManagementPolicy *v1beta1.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
}
// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with
@@ -111,3 +112,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithRevisionHistoryLimit(value int32
b.RevisionHistoryLimit = &value
return b
}
+
+// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinReadySeconds field is set to the value of the last call.
+func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *StatefulSetSpecApplyConfiguration {
+ b.MinReadySeconds = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
index b716352d2..f31066b6f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta1/statefulsetstatus.go
@@ -30,6 +30,7 @@ type StatefulSetStatusApplyConfiguration struct {
UpdateRevision *string `json:"updateRevision,omitempty"`
CollisionCount *int32 `json:"collisionCount,omitempty"`
Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
}
// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with
@@ -114,3 +115,11 @@ func (b *StatefulSetStatusApplyConfiguration) WithConditions(values ...*Stateful
}
return b
}
+
+// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AvailableReplicas field is set to the value of the last call.
+func (b *StatefulSetStatusApplyConfiguration) WithAvailableReplicas(value int32) *StatefulSetStatusApplyConfiguration {
+ b.AvailableReplicas = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
index 7c36cd82c..f5d906162 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/controllerrevision.go
@@ -51,7 +51,7 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// ExtractControllerRevision extracts the applied configuration owned by fieldManager from
// controllerRevision. If no managedFields are found in controllerRevision for fieldManager, a
// ControllerRevisionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// controllerRevision must be a unmodified ControllerRevision API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func ControllerRevision(name, namespace string) *ControllerRevisionApplyConfigur
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return extractControllerRevision(controllerRevision, fieldManager, "")
+}
+
+// ExtractControllerRevisionStatus is the same as ExtractControllerRevision except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractControllerRevisionStatus(controllerRevision *v1beta2.ControllerRevision, fieldManager string) (*ControllerRevisionApplyConfiguration, error) {
+ return extractControllerRevision(controllerRevision, fieldManager, "status")
+}
+
+func extractControllerRevision(controllerRevision *v1beta2.ControllerRevision, fieldManager string, subresource string) (*ControllerRevisionApplyConfiguration, error) {
b := &ControllerRevisionApplyConfiguration{}
- err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b)
+ err := managedfields.ExtractInto(controllerRevision, internal.Parser().Type("io.k8s.api.apps.v1beta2.ControllerRevision"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
index 174ee8a19..92708f0a8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/daemonset.go
@@ -50,7 +50,7 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// ExtractDaemonSet extracts the applied configuration owned by fieldManager from
// daemonSet. If no managedFields are found in daemonSet for fieldManager, a
// DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+ return extractDaemonSet(daemonSet, fieldManager, "")
+}
+
+// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDaemonSetStatus(daemonSet *appsv1beta2.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+ return extractDaemonSet(daemonSet, fieldManager, "status")
+}
+
+func extractDaemonSet(daemonSet *appsv1beta2.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
b := &DaemonSetApplyConfiguration{}
- err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.DaemonSet"), fieldManager, b)
+ err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.DaemonSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
index c59ae290f..ad0c509db 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/deployment.go
@@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDeployment(deployment *appsv1beta2.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "")
+}
+
+// ExtractDeploymentStatus is the same as ExtractDeployment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDeploymentStatus(deployment *appsv1beta2.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "status")
+}
+
+func extractDeployment(deployment *appsv1beta2.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta2.Deployment"), fieldManager, b)
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.apps.v1beta2.Deployment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
index 881c503ae..e2998f2c3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/replicaset.go
@@ -50,7 +50,7 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// ExtractReplicaSet extracts the applied configuration owned by fieldManager from
// replicaSet. If no managedFields are found in replicaSet for fieldManager, a
// ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return extractReplicaSet(replicaSet, fieldManager, "")
+}
+
+// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractReplicaSetStatus(replicaSet *appsv1beta2.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return extractReplicaSet(replicaSet, fieldManager, "status")
+}
+
+func extractReplicaSet(replicaSet *appsv1beta2.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
b := &ReplicaSetApplyConfiguration{}
- err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.ReplicaSet"), fieldManager, b)
+ err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.ReplicaSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
new file mode 100644
index 000000000..d4901edf9
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/scale.go
@@ -0,0 +1,233 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ v1beta2 "k8s.io/api/apps/v1beta2"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use
+// with apply.
+type ScaleApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *v1beta2.ScaleSpec `json:"spec,omitempty"`
+ Status *v1beta2.ScaleStatus `json:"status,omitempty"`
+}
+
+// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with
+// apply.
+func Scale() *ScaleApplyConfiguration {
+ return &ScaleApplyConfiguration{}
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SelfLink field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.SelfLink = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+// WithClusterName sets the ClusterName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterName field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ClusterName = &value
+ return b
+}
+
+func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithSpec(value v1beta2.ScaleSpec) *ScaleApplyConfiguration {
+ b.Spec = &value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithStatus(value v1beta2.ScaleStatus) *ScaleApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
index 26a187947..0a242310c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulset.go
@@ -50,7 +50,7 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// ExtractStatefulSet extracts the applied configuration owned by fieldManager from
// statefulSet. If no managedFields are found in statefulSet for fieldManager, a
// StatefulSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// statefulSet must be a unmodified StatefulSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func StatefulSet(name, namespace string) *StatefulSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return extractStatefulSet(statefulSet, fieldManager, "")
+}
+
+// ExtractStatefulSetStatus is the same as ExtractStatefulSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractStatefulSetStatus(statefulSet *appsv1beta2.StatefulSet, fieldManager string) (*StatefulSetApplyConfiguration, error) {
+ return extractStatefulSet(statefulSet, fieldManager, "status")
+}
+
+func extractStatefulSet(statefulSet *appsv1beta2.StatefulSet, fieldManager string, subresource string) (*StatefulSetApplyConfiguration, error) {
b := &StatefulSetApplyConfiguration{}
- err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.StatefulSet"), fieldManager, b)
+ err := managedfields.ExtractInto(statefulSet, internal.Parser().Type("io.k8s.api.apps.v1beta2.StatefulSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
index eb2c8555b..44044be26 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetspec.go
@@ -35,6 +35,7 @@ type StatefulSetSpecApplyConfiguration struct {
PodManagementPolicy *v1beta2.PodManagementPolicyType `json:"podManagementPolicy,omitempty"`
UpdateStrategy *StatefulSetUpdateStrategyApplyConfiguration `json:"updateStrategy,omitempty"`
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty"`
+ MinReadySeconds *int32 `json:"minReadySeconds,omitempty"`
}
// StatefulSetSpecApplyConfiguration constructs an declarative configuration of the StatefulSetSpec type for use with
@@ -111,3 +112,11 @@ func (b *StatefulSetSpecApplyConfiguration) WithRevisionHistoryLimit(value int32
b.RevisionHistoryLimit = &value
return b
}
+
+// WithMinReadySeconds sets the MinReadySeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinReadySeconds field is set to the value of the last call.
+func (b *StatefulSetSpecApplyConfiguration) WithMinReadySeconds(value int32) *StatefulSetSpecApplyConfiguration {
+ b.MinReadySeconds = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
index 73f7c0b3f..63835904c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/apps/v1beta2/statefulsetstatus.go
@@ -30,6 +30,7 @@ type StatefulSetStatusApplyConfiguration struct {
UpdateRevision *string `json:"updateRevision,omitempty"`
CollisionCount *int32 `json:"collisionCount,omitempty"`
Conditions []StatefulSetConditionApplyConfiguration `json:"conditions,omitempty"`
+ AvailableReplicas *int32 `json:"availableReplicas,omitempty"`
}
// StatefulSetStatusApplyConfiguration constructs an declarative configuration of the StatefulSetStatus type for use with
@@ -114,3 +115,11 @@ func (b *StatefulSetStatusApplyConfiguration) WithConditions(values ...*Stateful
}
return b
}
+
+// WithAvailableReplicas sets the AvailableReplicas field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AvailableReplicas field is set to the value of the last call.
+func (b *StatefulSetStatusApplyConfiguration) WithAvailableReplicas(value int32) *StatefulSetStatusApplyConfiguration {
+ b.AvailableReplicas = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
index 6736bfb17..bf04b0131 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/horizontalpodautoscaler.go
@@ -50,7 +50,7 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+}
+
+// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+}
+
+func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *apiautoscalingv1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b)
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
new file mode 100644
index 000000000..2d2cfeb97
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scale.go
@@ -0,0 +1,232 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use
+// with apply.
+type ScaleApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *ScaleSpecApplyConfiguration `json:"spec,omitempty"`
+ Status *ScaleStatusApplyConfiguration `json:"status,omitempty"`
+}
+
+// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with
+// apply.
+func Scale() *ScaleApplyConfiguration {
+ return &ScaleApplyConfiguration{}
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SelfLink field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.SelfLink = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+// WithClusterName sets the ClusterName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterName field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ClusterName = &value
+ return b
+}
+
+func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithSpec(value *ScaleSpecApplyConfiguration) *ScaleApplyConfiguration {
+ b.Spec = value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithStatus(value *ScaleStatusApplyConfiguration) *ScaleApplyConfiguration {
+ b.Status = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
new file mode 100644
index 000000000..2339a8fef
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalespec.go
@@ -0,0 +1,39 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ScaleSpecApplyConfiguration represents an declarative configuration of the ScaleSpec type for use
+// with apply.
+type ScaleSpecApplyConfiguration struct {
+ Replicas *int32 `json:"replicas,omitempty"`
+}
+
+// ScaleSpecApplyConfiguration constructs an declarative configuration of the ScaleSpec type for use with
+// apply.
+func ScaleSpec() *ScaleSpecApplyConfiguration {
+ return &ScaleSpecApplyConfiguration{}
+}
+
+// WithReplicas sets the Replicas field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Replicas field is set to the value of the last call.
+func (b *ScaleSpecApplyConfiguration) WithReplicas(value int32) *ScaleSpecApplyConfiguration {
+ b.Replicas = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
new file mode 100644
index 000000000..81c8d1b30
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v1/scalestatus.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+// ScaleStatusApplyConfiguration represents an declarative configuration of the ScaleStatus type for use
+// with apply.
+type ScaleStatusApplyConfiguration struct {
+ Replicas *int32 `json:"replicas,omitempty"`
+ Selector *string `json:"selector,omitempty"`
+}
+
+// ScaleStatusApplyConfiguration constructs an declarative configuration of the ScaleStatus type for use with
+// apply.
+func ScaleStatus() *ScaleStatusApplyConfiguration {
+ return &ScaleStatusApplyConfiguration{}
+}
+
+// WithReplicas sets the Replicas field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Replicas field is set to the value of the last call.
+func (b *ScaleStatusApplyConfiguration) WithReplicas(value int32) *ScaleStatusApplyConfiguration {
+ b.Replicas = &value
+ return b
+}
+
+// WithSelector sets the Selector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Selector field is set to the value of the last call.
+func (b *ScaleStatusApplyConfiguration) WithSelector(value string) *ScaleStatusApplyConfiguration {
+ b.Selector = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
index 280ae0509..e2c24646b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta1/horizontalpodautoscaler.go
@@ -50,7 +50,7 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+}
+
+// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+}
+
+func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta1.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"), fieldManager, b)
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta1.HorizontalPodAutoscaler"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
index 0ae4a1953..381925b23 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/autoscaling/v2beta2/horizontalpodautoscaler.go
@@ -50,7 +50,7 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// ExtractHorizontalPodAutoscaler extracts the applied configuration owned by fieldManager from
// horizontalPodAutoscaler. If no managedFields are found in horizontalPodAutoscaler for fieldManager, a
// HorizontalPodAutoscalerApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// horizontalPodAutoscaler must be a unmodified HorizontalPodAutoscaler API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func HorizontalPodAutoscaler(name, namespace string) *HorizontalPodAutoscalerApp
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "")
+}
+
+// ExtractHorizontalPodAutoscalerStatus is the same as ExtractHorizontalPodAutoscaler except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractHorizontalPodAutoscalerStatus(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
+ return extractHorizontalPodAutoscaler(horizontalPodAutoscaler, fieldManager, "status")
+}
+
+func extractHorizontalPodAutoscaler(horizontalPodAutoscaler *autoscalingv2beta2.HorizontalPodAutoscaler, fieldManager string, subresource string) (*HorizontalPodAutoscalerApplyConfiguration, error) {
b := &HorizontalPodAutoscalerApplyConfiguration{}
- err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"), fieldManager, b)
+ err := managedfields.ExtractInto(horizontalPodAutoscaler, internal.Parser().Type("io.k8s.api.autoscaling.v2beta2.HorizontalPodAutoscaler"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
index af38708a0..749163cc3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/cronjob.go
@@ -50,7 +50,7 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// ExtractCronJob extracts the applied configuration owned by fieldManager from
// cronJob. If no managedFields are found in cronJob for fieldManager, a
// CronJobApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cronJob must be a unmodified CronJob API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCronJob(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
+ return extractCronJob(cronJob, fieldManager, "")
+}
+
+// ExtractCronJobStatus is the same as ExtractCronJob except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCronJobStatus(cronJob *apibatchv1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
+ return extractCronJob(cronJob, fieldManager, "status")
+}
+
+func extractCronJob(cronJob *apibatchv1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
b := &CronJobApplyConfiguration{}
- err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b)
+ err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1.CronJob"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
index 0c9bcdb75..bb84a58b0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/job.go
@@ -50,7 +50,7 @@ func Job(name, namespace string) *JobApplyConfiguration {
// ExtractJob extracts the applied configuration owned by fieldManager from
// job. If no managedFields are found in job for fieldManager, a
// JobApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// job must be a unmodified Job API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Job(name, namespace string) *JobApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractJob(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
+ return extractJob(job, fieldManager, "")
+}
+
+// ExtractJobStatus is the same as ExtractJob except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractJobStatus(job *apibatchv1.Job, fieldManager string) (*JobApplyConfiguration, error) {
+ return extractJob(job, fieldManager, "status")
+}
+
+func extractJob(job *apibatchv1.Job, fieldManager string, subresource string) (*JobApplyConfiguration, error) {
b := &JobApplyConfiguration{}
- err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b)
+ err := managedfields.ExtractInto(job, internal.Parser().Type("io.k8s.api.batch.v1.Job"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
index e59d49cf1..ba7e27e08 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/jobstatus.go
@@ -25,13 +25,14 @@ import (
// JobStatusApplyConfiguration represents an declarative configuration of the JobStatus type for use
// with apply.
type JobStatusApplyConfiguration struct {
- Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"`
- StartTime *metav1.Time `json:"startTime,omitempty"`
- CompletionTime *metav1.Time `json:"completionTime,omitempty"`
- Active *int32 `json:"active,omitempty"`
- Succeeded *int32 `json:"succeeded,omitempty"`
- Failed *int32 `json:"failed,omitempty"`
- CompletedIndexes *string `json:"completedIndexes,omitempty"`
+ Conditions []JobConditionApplyConfiguration `json:"conditions,omitempty"`
+ StartTime *metav1.Time `json:"startTime,omitempty"`
+ CompletionTime *metav1.Time `json:"completionTime,omitempty"`
+ Active *int32 `json:"active,omitempty"`
+ Succeeded *int32 `json:"succeeded,omitempty"`
+ Failed *int32 `json:"failed,omitempty"`
+ CompletedIndexes *string `json:"completedIndexes,omitempty"`
+ UncountedTerminatedPods *UncountedTerminatedPodsApplyConfiguration `json:"uncountedTerminatedPods,omitempty"`
}
// JobStatusApplyConfiguration constructs an declarative configuration of the JobStatus type for use with
@@ -100,3 +101,11 @@ func (b *JobStatusApplyConfiguration) WithCompletedIndexes(value string) *JobSta
b.CompletedIndexes = &value
return b
}
+
+// WithUncountedTerminatedPods sets the UncountedTerminatedPods field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UncountedTerminatedPods field is set to the value of the last call.
+func (b *JobStatusApplyConfiguration) WithUncountedTerminatedPods(value *UncountedTerminatedPodsApplyConfiguration) *JobStatusApplyConfiguration {
+ b.UncountedTerminatedPods = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
new file mode 100644
index 000000000..1409303ff
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1/uncountedterminatedpods.go
@@ -0,0 +1,56 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ types "k8s.io/apimachinery/pkg/types"
+)
+
+// UncountedTerminatedPodsApplyConfiguration represents an declarative configuration of the UncountedTerminatedPods type for use
+// with apply.
+type UncountedTerminatedPodsApplyConfiguration struct {
+ Succeeded []types.UID `json:"succeeded,omitempty"`
+ Failed []types.UID `json:"failed,omitempty"`
+}
+
+// UncountedTerminatedPodsApplyConfiguration constructs an declarative configuration of the UncountedTerminatedPods type for use with
+// apply.
+func UncountedTerminatedPods() *UncountedTerminatedPodsApplyConfiguration {
+ return &UncountedTerminatedPodsApplyConfiguration{}
+}
+
+// WithSucceeded adds the given value to the Succeeded field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Succeeded field.
+func (b *UncountedTerminatedPodsApplyConfiguration) WithSucceeded(values ...types.UID) *UncountedTerminatedPodsApplyConfiguration {
+ for i := range values {
+ b.Succeeded = append(b.Succeeded, values[i])
+ }
+ return b
+}
+
+// WithFailed adds the given value to the Failed field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Failed field.
+func (b *UncountedTerminatedPodsApplyConfiguration) WithFailed(values ...types.UID) *UncountedTerminatedPodsApplyConfiguration {
+ for i := range values {
+ b.Failed = append(b.Failed, values[i])
+ }
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
index ddcef76fd..8b16bc55c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/batch/v1beta1/cronjob.go
@@ -50,7 +50,7 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// ExtractCronJob extracts the applied configuration owned by fieldManager from
// cronJob. If no managedFields are found in cronJob for fieldManager, a
// CronJobApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cronJob must be a unmodified CronJob API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func CronJob(name, namespace string) *CronJobApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
+ return extractCronJob(cronJob, fieldManager, "")
+}
+
+// ExtractCronJobStatus is the same as ExtractCronJob except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCronJobStatus(cronJob *batchv1beta1.CronJob, fieldManager string) (*CronJobApplyConfiguration, error) {
+ return extractCronJob(cronJob, fieldManager, "status")
+}
+
+func extractCronJob(cronJob *batchv1beta1.CronJob, fieldManager string, subresource string) (*CronJobApplyConfiguration, error) {
b := &CronJobApplyConfiguration{}
- err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1beta1.CronJob"), fieldManager, b)
+ err := managedfields.ExtractInto(cronJob, internal.Parser().Type("io.k8s.api.batch.v1beta1.CronJob"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
index 99c710a0f..9d46541b7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequest.go
@@ -49,7 +49,7 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// ExtractCertificateSigningRequest extracts the applied configuration owned by fieldManager from
// certificateSigningRequest. If no managedFields are found in certificateSigningRequest for fieldManager, a
// CertificateSigningRequestApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// certificateSigningRequest must be a unmodified CertificateSigningRequest API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+ return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "")
+}
+
+// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCertificateSigningRequestStatus(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+ return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status")
+}
+
+func extractCertificateSigningRequest(certificateSigningRequest *apicertificatesv1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
b := &CertificateSigningRequestApplyConfiguration{}
- err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b)
+ err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1.CertificateSigningRequest"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
index 7c4d2c98e..81ca214a9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1/certificatesigningrequestspec.go
@@ -25,13 +25,14 @@ import (
// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use
// with apply.
type CertificateSigningRequestSpecApplyConfiguration struct {
- Request []byte `json:"request,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- Usages []v1.KeyUsage `json:"usages,omitempty"`
- Username *string `json:"username,omitempty"`
- UID *string `json:"uid,omitempty"`
- Groups []string `json:"groups,omitempty"`
- Extra map[string]v1.ExtraValue `json:"extra,omitempty"`
+ Request []byte `json:"request,omitempty"`
+ SignerName *string `json:"signerName,omitempty"`
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
+ Usages []v1.KeyUsage `json:"usages,omitempty"`
+ Username *string `json:"username,omitempty"`
+ UID *string `json:"uid,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+ Extra map[string]v1.ExtraValue `json:"extra,omitempty"`
}
// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with
@@ -58,6 +59,14 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithSignerName(value s
return b
}
+// WithExpirationSeconds sets the ExpirationSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ExpirationSeconds field is set to the value of the last call.
+func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds(value int32) *CertificateSigningRequestSpecApplyConfiguration {
+ b.ExpirationSeconds = &value
+ return b
+}
+
// WithUsages adds the given value to the Usages field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Usages field.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
index 920b5319a..907b81983 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequest.go
@@ -49,7 +49,7 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// ExtractCertificateSigningRequest extracts the applied configuration owned by fieldManager from
// certificateSigningRequest. If no managedFields are found in certificateSigningRequest for fieldManager, a
// CertificateSigningRequestApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// certificateSigningRequest must be a unmodified CertificateSigningRequest API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func CertificateSigningRequest(name string) *CertificateSigningRequestApplyConfi
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCertificateSigningRequest(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+ return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "")
+}
+
+// ExtractCertificateSigningRequestStatus is the same as ExtractCertificateSigningRequest except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCertificateSigningRequestStatus(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string) (*CertificateSigningRequestApplyConfiguration, error) {
+ return extractCertificateSigningRequest(certificateSigningRequest, fieldManager, "status")
+}
+
+func extractCertificateSigningRequest(certificateSigningRequest *certificatesv1beta1.CertificateSigningRequest, fieldManager string, subresource string) (*CertificateSigningRequestApplyConfiguration, error) {
b := &CertificateSigningRequestApplyConfiguration{}
- err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.CertificateSigningRequest"), fieldManager, b)
+ err := managedfields.ExtractInto(certificateSigningRequest, internal.Parser().Type("io.k8s.api.certificates.v1beta1.CertificateSigningRequest"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
index 73ea58e5e..9554b1f40 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/certificates/v1beta1/certificatesigningrequestspec.go
@@ -25,13 +25,14 @@ import (
// CertificateSigningRequestSpecApplyConfiguration represents an declarative configuration of the CertificateSigningRequestSpec type for use
// with apply.
type CertificateSigningRequestSpecApplyConfiguration struct {
- Request []byte `json:"request,omitempty"`
- SignerName *string `json:"signerName,omitempty"`
- Usages []v1beta1.KeyUsage `json:"usages,omitempty"`
- Username *string `json:"username,omitempty"`
- UID *string `json:"uid,omitempty"`
- Groups []string `json:"groups,omitempty"`
- Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"`
+ Request []byte `json:"request,omitempty"`
+ SignerName *string `json:"signerName,omitempty"`
+ ExpirationSeconds *int32 `json:"expirationSeconds,omitempty"`
+ Usages []v1beta1.KeyUsage `json:"usages,omitempty"`
+ Username *string `json:"username,omitempty"`
+ UID *string `json:"uid,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+ Extra map[string]v1beta1.ExtraValue `json:"extra,omitempty"`
}
// CertificateSigningRequestSpecApplyConfiguration constructs an declarative configuration of the CertificateSigningRequestSpec type for use with
@@ -58,6 +59,14 @@ func (b *CertificateSigningRequestSpecApplyConfiguration) WithSignerName(value s
return b
}
+// WithExpirationSeconds sets the ExpirationSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ExpirationSeconds field is set to the value of the last call.
+func (b *CertificateSigningRequestSpecApplyConfiguration) WithExpirationSeconds(value int32) *CertificateSigningRequestSpecApplyConfiguration {
+ b.ExpirationSeconds = &value
+ return b
+}
+
// WithUsages adds the given value to the Usages field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Usages field.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
index ad552f2a5..fcaddb663 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1/lease.go
@@ -49,7 +49,7 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
// ExtractLease extracts the applied configuration owned by fieldManager from
// lease. If no managedFields are found in lease for fieldManager, a
// LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// lease must be a unmodified Lease API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractLease(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+ return extractLease(lease, fieldManager, "")
+}
+
+// ExtractLeaseStatus is the same as ExtractLease except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractLeaseStatus(lease *apicoordinationv1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+ return extractLease(lease, fieldManager, "status")
+}
+
+func extractLease(lease *apicoordinationv1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
b := &LeaseApplyConfiguration{}
- err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b)
+ err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1.Lease"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
index 9093cfc54..f63ddc29e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/coordination/v1beta1/lease.go
@@ -49,7 +49,7 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
// ExtractLease extracts the applied configuration owned by fieldManager from
// lease. If no managedFields are found in lease for fieldManager, a
// LeaseApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// lease must be a unmodified Lease API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Lease(name, namespace string) *LeaseApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractLease(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+ return extractLease(lease, fieldManager, "")
+}
+
+// ExtractLeaseStatus is the same as ExtractLease except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractLeaseStatus(lease *coordinationv1beta1.Lease, fieldManager string) (*LeaseApplyConfiguration, error) {
+ return extractLease(lease, fieldManager, "status")
+}
+
+func extractLease(lease *coordinationv1beta1.Lease, fieldManager string, subresource string) (*LeaseApplyConfiguration, error) {
b := &LeaseApplyConfiguration{}
- err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1beta1.Lease"), fieldManager, b)
+ err := managedfields.ExtractInto(lease, internal.Parser().Type("io.k8s.api.coordination.v1beta1.Lease"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
index 9328bdd76..6983a689b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/componentstatus.go
@@ -48,7 +48,7 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration {
// ExtractComponentStatus extracts the applied configuration owned by fieldManager from
// componentStatus. If no managedFields are found in componentStatus for fieldManager, a
// ComponentStatusApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// componentStatus must be a unmodified ComponentStatus API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func ComponentStatus(name string) *ComponentStatusApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
+ return extractComponentStatus(componentStatus, fieldManager, "")
+}
+
+// ExtractComponentStatusStatus is the same as ExtractComponentStatus except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractComponentStatusStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string) (*ComponentStatusApplyConfiguration, error) {
+ return extractComponentStatus(componentStatus, fieldManager, "status")
+}
+
+func extractComponentStatus(componentStatus *apicorev1.ComponentStatus, fieldManager string, subresource string) (*ComponentStatusApplyConfiguration, error) {
b := &ComponentStatusApplyConfiguration{}
- err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b)
+ err := managedfields.ExtractInto(componentStatus, internal.Parser().Type("io.k8s.api.core.v1.ComponentStatus"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
index b60f981b0..0664c1849 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/configmap.go
@@ -51,7 +51,7 @@ func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration {
// ExtractConfigMap extracts the applied configuration owned by fieldManager from
// configMap. If no managedFields are found in configMap for fieldManager, a
// ConfigMapApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// configMap must be a unmodified ConfigMap API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func ConfigMap(name, namespace string) *ConfigMapApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractConfigMap(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) {
+ return extractConfigMap(configMap, fieldManager, "")
+}
+
+// ExtractConfigMapStatus is the same as ExtractConfigMap except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractConfigMapStatus(configMap *corev1.ConfigMap, fieldManager string) (*ConfigMapApplyConfiguration, error) {
+ return extractConfigMap(configMap, fieldManager, "status")
+}
+
+func extractConfigMap(configMap *corev1.ConfigMap, fieldManager string, subresource string) (*ConfigMapApplyConfiguration, error) {
b := &ConfigMapApplyConfiguration{}
- err := managedfields.ExtractInto(configMap, internal.Parser().Type("io.k8s.api.core.v1.ConfigMap"), fieldManager, b)
+ err := managedfields.ExtractInto(configMap, internal.Parser().Type("io.k8s.api.core.v1.ConfigMap"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
index 31a541c75..b3b302fe2 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/endpoints.go
@@ -49,7 +49,7 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration {
// ExtractEndpoints extracts the applied configuration owned by fieldManager from
// endpoints. If no managedFields are found in endpoints for fieldManager, a
// EndpointsApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// endpoints must be a unmodified Endpoints API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Endpoints(name, namespace string) *EndpointsApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
+ return extractEndpoints(endpoints, fieldManager, "")
+}
+
+// ExtractEndpointsStatus is the same as ExtractEndpoints except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEndpointsStatus(endpoints *apicorev1.Endpoints, fieldManager string) (*EndpointsApplyConfiguration, error) {
+ return extractEndpoints(endpoints, fieldManager, "status")
+}
+
+func extractEndpoints(endpoints *apicorev1.Endpoints, fieldManager string, subresource string) (*EndpointsApplyConfiguration, error) {
b := &EndpointsApplyConfiguration{}
- err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b)
+ err := managedfields.ExtractInto(endpoints, internal.Parser().Type("io.k8s.api.core.v1.Endpoints"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
index bd39a0fb0..3a0c53694 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/event.go
@@ -62,7 +62,7 @@ func Event(name, namespace string) *EventApplyConfiguration {
// ExtractEvent extracts the applied configuration owned by fieldManager from
// event. If no managedFields are found in event for fieldManager, a
// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
@@ -71,8 +71,19 @@ func Event(name, namespace string) *EventApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEvent(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return extractEvent(event, fieldManager, "")
+}
+
+// ExtractEventStatus is the same as ExtractEvent except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEventStatus(event *apicorev1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return extractEvent(event, fieldManager, "status")
+}
+
+func extractEvent(event *apicorev1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
- err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b)
+ err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.core.v1.Event"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
index 920b527f5..03207b8ec 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/limitrange.go
@@ -49,7 +49,7 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration {
// ExtractLimitRange extracts the applied configuration owned by fieldManager from
// limitRange. If no managedFields are found in limitRange for fieldManager, a
// LimitRangeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// limitRange must be a unmodified LimitRange API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func LimitRange(name, namespace string) *LimitRangeApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
+ return extractLimitRange(limitRange, fieldManager, "")
+}
+
+// ExtractLimitRangeStatus is the same as ExtractLimitRange except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractLimitRangeStatus(limitRange *apicorev1.LimitRange, fieldManager string) (*LimitRangeApplyConfiguration, error) {
+ return extractLimitRange(limitRange, fieldManager, "status")
+}
+
+func extractLimitRange(limitRange *apicorev1.LimitRange, fieldManager string, subresource string) (*LimitRangeApplyConfiguration, error) {
b := &LimitRangeApplyConfiguration{}
- err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b)
+ err := managedfields.ExtractInto(limitRange, internal.Parser().Type("io.k8s.api.core.v1.LimitRange"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
index 4a829f404..ec29bcfd9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/namespace.go
@@ -49,7 +49,7 @@ func Namespace(name string) *NamespaceApplyConfiguration {
// ExtractNamespace extracts the applied configuration owned by fieldManager from
// namespace. If no managedFields are found in namespace for fieldManager, a
// NamespaceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// namespace must be a unmodified Namespace API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Namespace(name string) *NamespaceApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractNamespace(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
+ return extractNamespace(namespace, fieldManager, "")
+}
+
+// ExtractNamespaceStatus is the same as ExtractNamespace except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractNamespaceStatus(namespace *apicorev1.Namespace, fieldManager string) (*NamespaceApplyConfiguration, error) {
+ return extractNamespace(namespace, fieldManager, "status")
+}
+
+func extractNamespace(namespace *apicorev1.Namespace, fieldManager string, subresource string) (*NamespaceApplyConfiguration, error) {
b := &NamespaceApplyConfiguration{}
- err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b)
+ err := managedfields.ExtractInto(namespace, internal.Parser().Type("io.k8s.api.core.v1.Namespace"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
index 4f90da59f..b26e9f499 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/node.go
@@ -49,7 +49,7 @@ func Node(name string) *NodeApplyConfiguration {
// ExtractNode extracts the applied configuration owned by fieldManager from
// node. If no managedFields are found in node for fieldManager, a
// NodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// node must be a unmodified Node API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Node(name string) *NodeApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractNode(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
+ return extractNode(node, fieldManager, "")
+}
+
+// ExtractNodeStatus is the same as ExtractNode except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractNodeStatus(node *apicorev1.Node, fieldManager string) (*NodeApplyConfiguration, error) {
+ return extractNode(node, fieldManager, "status")
+}
+
+func extractNode(node *apicorev1.Node, fieldManager string, subresource string) (*NodeApplyConfiguration, error) {
b := &NodeApplyConfiguration{}
- err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b)
+ err := managedfields.ExtractInto(node, internal.Parser().Type("io.k8s.api.core.v1.Node"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
index a5df345e1..dcef6020f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolume.go
@@ -49,7 +49,7 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration {
// ExtractPersistentVolume extracts the applied configuration owned by fieldManager from
// persistentVolume. If no managedFields are found in persistentVolume for fieldManager, a
// PersistentVolumeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// persistentVolume must be a unmodified PersistentVolume API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func PersistentVolume(name string) *PersistentVolumeApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
+ return extractPersistentVolume(persistentVolume, fieldManager, "")
+}
+
+// ExtractPersistentVolumeStatus is the same as ExtractPersistentVolume except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPersistentVolumeStatus(persistentVolume *apicorev1.PersistentVolume, fieldManager string) (*PersistentVolumeApplyConfiguration, error) {
+ return extractPersistentVolume(persistentVolume, fieldManager, "status")
+}
+
+func extractPersistentVolume(persistentVolume *apicorev1.PersistentVolume, fieldManager string, subresource string) (*PersistentVolumeApplyConfiguration, error) {
b := &PersistentVolumeApplyConfiguration{}
- err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b)
+ err := managedfields.ExtractInto(persistentVolume, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolume"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
index 229b5d481..8ed20fa29 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaim.go
@@ -50,7 +50,7 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo
// ExtractPersistentVolumeClaim extracts the applied configuration owned by fieldManager from
// persistentVolumeClaim. If no managedFields are found in persistentVolumeClaim for fieldManager, a
// PersistentVolumeClaimApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// persistentVolumeClaim must be a unmodified PersistentVolumeClaim API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func PersistentVolumeClaim(name, namespace string) *PersistentVolumeClaimApplyCo
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
+ return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "")
+}
+
+// ExtractPersistentVolumeClaimStatus is the same as ExtractPersistentVolumeClaim except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPersistentVolumeClaimStatus(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string) (*PersistentVolumeClaimApplyConfiguration, error) {
+ return extractPersistentVolumeClaim(persistentVolumeClaim, fieldManager, "status")
+}
+
+func extractPersistentVolumeClaim(persistentVolumeClaim *apicorev1.PersistentVolumeClaim, fieldManager string, subresource string) (*PersistentVolumeClaimApplyConfiguration, error) {
b := &PersistentVolumeClaimApplyConfiguration{}
- err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b)
+ err := managedfields.ExtractInto(persistentVolumeClaim, internal.Parser().Type("io.k8s.api.core.v1.PersistentVolumeClaim"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
index ac4d64c71..e22d04b0d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/persistentvolumeclaimspec.go
@@ -33,6 +33,7 @@ type PersistentVolumeClaimSpecApplyConfiguration struct {
StorageClassName *string `json:"storageClassName,omitempty"`
VolumeMode *v1.PersistentVolumeMode `json:"volumeMode,omitempty"`
DataSource *TypedLocalObjectReferenceApplyConfiguration `json:"dataSource,omitempty"`
+ DataSourceRef *TypedLocalObjectReferenceApplyConfiguration `json:"dataSourceRef,omitempty"`
}
// PersistentVolumeClaimSpecApplyConfiguration constructs an declarative configuration of the PersistentVolumeClaimSpec type for use with
@@ -98,3 +99,11 @@ func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSource(value *Type
b.DataSource = value
return b
}
+
+// WithDataSourceRef sets the DataSourceRef field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DataSourceRef field is set to the value of the last call.
+func (b *PersistentVolumeClaimSpecApplyConfiguration) WithDataSourceRef(value *TypedLocalObjectReferenceApplyConfiguration) *PersistentVolumeClaimSpecApplyConfiguration {
+ b.DataSourceRef = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
index 478371392..c3649829a 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/pod.go
@@ -50,7 +50,7 @@ func Pod(name, namespace string) *PodApplyConfiguration {
// ExtractPod extracts the applied configuration owned by fieldManager from
// pod. If no managedFields are found in pod for fieldManager, a
// PodApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// pod must be a unmodified Pod API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Pod(name, namespace string) *PodApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPod(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+ return extractPod(pod, fieldManager, "")
+}
+
+// ExtractPodStatus is the same as ExtractPod except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPodStatus(pod *apicorev1.Pod, fieldManager string) (*PodApplyConfiguration, error) {
+ return extractPod(pod, fieldManager, "status")
+}
+
+func extractPod(pod *apicorev1.Pod, fieldManager string, subresource string) (*PodApplyConfiguration, error) {
b := &PodApplyConfiguration{}
- err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b)
+ err := managedfields.ExtractInto(pod, internal.Parser().Type("io.k8s.api.core.v1.Pod"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
index 64263882a..1460977c0 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/podtemplate.go
@@ -49,7 +49,7 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration {
// ExtractPodTemplate extracts the applied configuration owned by fieldManager from
// podTemplate. If no managedFields are found in podTemplate for fieldManager, a
// PodTemplateApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// podTemplate must be a unmodified PodTemplate API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func PodTemplate(name, namespace string) *PodTemplateApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
+ return extractPodTemplate(podTemplate, fieldManager, "")
+}
+
+// ExtractPodTemplateStatus is the same as ExtractPodTemplate except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPodTemplateStatus(podTemplate *apicorev1.PodTemplate, fieldManager string) (*PodTemplateApplyConfiguration, error) {
+ return extractPodTemplate(podTemplate, fieldManager, "status")
+}
+
+func extractPodTemplate(podTemplate *apicorev1.PodTemplate, fieldManager string, subresource string) (*PodTemplateApplyConfiguration, error) {
b := &PodTemplateApplyConfiguration{}
- err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b)
+ err := managedfields.ExtractInto(podTemplate, internal.Parser().Type("io.k8s.api.core.v1.PodTemplate"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
index f9b243a65..6dd6ae267 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/replicationcontroller.go
@@ -50,7 +50,7 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo
// ExtractReplicationController extracts the applied configuration owned by fieldManager from
// replicationController. If no managedFields are found in replicationController for fieldManager, a
// ReplicationControllerApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// replicationController must be a unmodified ReplicationController API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func ReplicationController(name, namespace string) *ReplicationControllerApplyCo
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
+ return extractReplicationController(replicationController, fieldManager, "")
+}
+
+// ExtractReplicationControllerStatus is the same as ExtractReplicationController except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractReplicationControllerStatus(replicationController *apicorev1.ReplicationController, fieldManager string) (*ReplicationControllerApplyConfiguration, error) {
+ return extractReplicationController(replicationController, fieldManager, "status")
+}
+
+func extractReplicationController(replicationController *apicorev1.ReplicationController, fieldManager string, subresource string) (*ReplicationControllerApplyConfiguration, error) {
b := &ReplicationControllerApplyConfiguration{}
- err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b)
+ err := managedfields.ExtractInto(replicationController, internal.Parser().Type("io.k8s.api.core.v1.ReplicationController"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
index a0c4f0c0a..5cfb1988b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/resourcequota.go
@@ -50,7 +50,7 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration {
// ExtractResourceQuota extracts the applied configuration owned by fieldManager from
// resourceQuota. If no managedFields are found in resourceQuota for fieldManager, a
// ResourceQuotaApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// resourceQuota must be a unmodified ResourceQuota API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func ResourceQuota(name, namespace string) *ResourceQuotaApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
+ return extractResourceQuota(resourceQuota, fieldManager, "")
+}
+
+// ExtractResourceQuotaStatus is the same as ExtractResourceQuota except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractResourceQuotaStatus(resourceQuota *apicorev1.ResourceQuota, fieldManager string) (*ResourceQuotaApplyConfiguration, error) {
+ return extractResourceQuota(resourceQuota, fieldManager, "status")
+}
+
+func extractResourceQuota(resourceQuota *apicorev1.ResourceQuota, fieldManager string, subresource string) (*ResourceQuotaApplyConfiguration, error) {
b := &ResourceQuotaApplyConfiguration{}
- err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b)
+ err := managedfields.ExtractInto(resourceQuota, internal.Parser().Type("io.k8s.api.core.v1.ResourceQuota"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
index 7fd2aa936..00d789f41 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/secret.go
@@ -52,7 +52,7 @@ func Secret(name, namespace string) *SecretApplyConfiguration {
// ExtractSecret extracts the applied configuration owned by fieldManager from
// secret. If no managedFields are found in secret for fieldManager, a
// SecretApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// secret must be a unmodified Secret API object that was retrieved from the Kubernetes API.
@@ -61,8 +61,19 @@ func Secret(name, namespace string) *SecretApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractSecret(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) {
+ return extractSecret(secret, fieldManager, "")
+}
+
+// ExtractSecretStatus is the same as ExtractSecret except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractSecretStatus(secret *corev1.Secret, fieldManager string) (*SecretApplyConfiguration, error) {
+ return extractSecret(secret, fieldManager, "status")
+}
+
+func extractSecret(secret *corev1.Secret, fieldManager string, subresource string) (*SecretApplyConfiguration, error) {
b := &SecretApplyConfiguration{}
- err := managedfields.ExtractInto(secret, internal.Parser().Type("io.k8s.api.core.v1.Secret"), fieldManager, b)
+ err := managedfields.ExtractInto(secret, internal.Parser().Type("io.k8s.api.core.v1.Secret"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
index 38c439559..31b47311f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/service.go
@@ -50,7 +50,7 @@ func Service(name, namespace string) *ServiceApplyConfiguration {
// ExtractService extracts the applied configuration owned by fieldManager from
// service. If no managedFields are found in service for fieldManager, a
// ServiceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// service must be a unmodified Service API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Service(name, namespace string) *ServiceApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractService(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
+ return extractService(service, fieldManager, "")
+}
+
+// ExtractServiceStatus is the same as ExtractService except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractServiceStatus(service *apicorev1.Service, fieldManager string) (*ServiceApplyConfiguration, error) {
+ return extractService(service, fieldManager, "status")
+}
+
+func extractService(service *apicorev1.Service, fieldManager string, subresource string) (*ServiceApplyConfiguration, error) {
b := &ServiceApplyConfiguration{}
- err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b)
+ err := managedfields.ExtractInto(service, internal.Parser().Type("io.k8s.api.core.v1.Service"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
index 46983e4a7..459d025eb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/serviceaccount.go
@@ -51,7 +51,7 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration {
// ExtractServiceAccount extracts the applied configuration owned by fieldManager from
// serviceAccount. If no managedFields are found in serviceAccount for fieldManager, a
// ServiceAccountApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// serviceAccount must be a unmodified ServiceAccount API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func ServiceAccount(name, namespace string) *ServiceAccountApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
+ return extractServiceAccount(serviceAccount, fieldManager, "")
+}
+
+// ExtractServiceAccountStatus is the same as ExtractServiceAccount except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractServiceAccountStatus(serviceAccount *apicorev1.ServiceAccount, fieldManager string) (*ServiceAccountApplyConfiguration, error) {
+ return extractServiceAccount(serviceAccount, fieldManager, "status")
+}
+
+func extractServiceAccount(serviceAccount *apicorev1.ServiceAccount, fieldManager string, subresource string) (*ServiceAccountApplyConfiguration, error) {
b := &ServiceAccountApplyConfiguration{}
- err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b)
+ err := managedfields.ExtractInto(serviceAccount, internal.Parser().Type("io.k8s.api.core.v1.ServiceAccount"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
index 99361cecf..856cd4f9d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/servicespec.go
@@ -39,7 +39,6 @@ type ServiceSpecApplyConfiguration struct {
HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"`
PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"`
SessionAffinityConfig *SessionAffinityConfigApplyConfiguration `json:"sessionAffinityConfig,omitempty"`
- TopologyKeys []string `json:"topologyKeys,omitempty"`
IPFamilies []corev1.IPFamily `json:"ipFamilies,omitempty"`
IPFamilyPolicy *corev1.IPFamilyPolicyType `json:"ipFamilyPolicy,omitempty"`
AllocateLoadBalancerNodePorts *bool `json:"allocateLoadBalancerNodePorts,omitempty"`
@@ -182,16 +181,6 @@ func (b *ServiceSpecApplyConfiguration) WithSessionAffinityConfig(value *Session
return b
}
-// WithTopologyKeys adds the given value to the TopologyKeys field in the declarative configuration
-// and returns the receiver, so that objects can be build by chaining "With" function invocations.
-// If called multiple times, values provided by each call will be appended to the TopologyKeys field.
-func (b *ServiceSpecApplyConfiguration) WithTopologyKeys(values ...string) *ServiceSpecApplyConfiguration {
- for i := range values {
- b.TopologyKeys = append(b.TopologyKeys, values[i])
- }
- return b
-}
-
// WithIPFamilies adds the given value to the IPFamilies field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the IPFamilies field.
diff --git a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
index 2442063c4..20692e014 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/core/v1/windowssecuritycontextoptions.go
@@ -24,6 +24,7 @@ type WindowsSecurityContextOptionsApplyConfiguration struct {
GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty"`
GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty"`
RunAsUserName *string `json:"runAsUserName,omitempty"`
+ HostProcess *bool `json:"hostProcess,omitempty"`
}
// WindowsSecurityContextOptionsApplyConfiguration constructs an declarative configuration of the WindowsSecurityContextOptions type for use with
@@ -55,3 +56,11 @@ func (b *WindowsSecurityContextOptionsApplyConfiguration) WithRunAsUserName(valu
b.RunAsUserName = &value
return b
}
+
+// WithHostProcess sets the HostProcess field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the HostProcess field is set to the value of the last call.
+func (b *WindowsSecurityContextOptionsApplyConfiguration) WithHostProcess(value bool) *WindowsSecurityContextOptionsApplyConfiguration {
+ b.HostProcess = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
index 681013f04..6feaab25d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1/endpointslice.go
@@ -51,7 +51,7 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
// ExtractEndpointSlice extracts the applied configuration owned by fieldManager from
// endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a
// EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+ return extractEndpointSlice(endpointSlice, fieldManager, "")
+}
+
+// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEndpointSliceStatus(endpointSlice *discoveryv1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+ return extractEndpointSlice(endpointSlice, fieldManager, "status")
+}
+
+func extractEndpointSlice(endpointSlice *discoveryv1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
b := &EndpointSliceApplyConfiguration{}
- err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1.EndpointSlice"), fieldManager, b)
+ err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1.EndpointSlice"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
index a96708225..bacc1134d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/discovery/v1beta1/endpointslice.go
@@ -51,7 +51,7 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
// ExtractEndpointSlice extracts the applied configuration owned by fieldManager from
// endpointSlice. If no managedFields are found in endpointSlice for fieldManager, a
// EndpointSliceApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// endpointSlice must be a unmodified EndpointSlice API object that was retrieved from the Kubernetes API.
@@ -60,8 +60,19 @@ func EndpointSlice(name, namespace string) *EndpointSliceApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+ return extractEndpointSlice(endpointSlice, fieldManager, "")
+}
+
+// ExtractEndpointSliceStatus is the same as ExtractEndpointSlice except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEndpointSliceStatus(endpointSlice *v1beta1.EndpointSlice, fieldManager string) (*EndpointSliceApplyConfiguration, error) {
+ return extractEndpointSlice(endpointSlice, fieldManager, "status")
+}
+
+func extractEndpointSlice(endpointSlice *v1beta1.EndpointSlice, fieldManager string, subresource string) (*EndpointSliceApplyConfiguration, error) {
b := &EndpointSliceApplyConfiguration{}
- err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b)
+ err := managedfields.ExtractInto(endpointSlice, internal.Parser().Type("io.k8s.api.discovery.v1beta1.EndpointSlice"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
index 860fff586..19cc9e0ad 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1/event.go
@@ -63,7 +63,7 @@ func Event(name, namespace string) *EventApplyConfiguration {
// ExtractEvent extracts the applied configuration owned by fieldManager from
// event. If no managedFields are found in event for fieldManager, a
// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
@@ -72,8 +72,19 @@ func Event(name, namespace string) *EventApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEvent(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return extractEvent(event, fieldManager, "")
+}
+
+// ExtractEventStatus is the same as ExtractEvent except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEventStatus(event *apieventsv1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return extractEvent(event, fieldManager, "status")
+}
+
+func extractEvent(event *apieventsv1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
- err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b)
+ err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1.Event"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
index 65057f957..f02bdd2b9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/events/v1beta1/event.go
@@ -63,7 +63,7 @@ func Event(name, namespace string) *EventApplyConfiguration {
// ExtractEvent extracts the applied configuration owned by fieldManager from
// event. If no managedFields are found in event for fieldManager, a
// EventApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// event must be a unmodified Event API object that was retrieved from the Kubernetes API.
@@ -72,8 +72,19 @@ func Event(name, namespace string) *EventApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEvent(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return extractEvent(event, fieldManager, "")
+}
+
+// ExtractEventStatus is the same as ExtractEvent except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEventStatus(event *eventsv1beta1.Event, fieldManager string) (*EventApplyConfiguration, error) {
+ return extractEvent(event, fieldManager, "status")
+}
+
+func extractEvent(event *eventsv1beta1.Event, fieldManager string, subresource string) (*EventApplyConfiguration, error) {
b := &EventApplyConfiguration{}
- err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1beta1.Event"), fieldManager, b)
+ err := managedfields.ExtractInto(event, internal.Parser().Type("io.k8s.api.events.v1beta1.Event"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
index 09777e434..145587325 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/daemonset.go
@@ -50,7 +50,7 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// ExtractDaemonSet extracts the applied configuration owned by fieldManager from
// daemonSet. If no managedFields are found in daemonSet for fieldManager, a
// DaemonSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// daemonSet must be a unmodified DaemonSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func DaemonSet(name, namespace string) *DaemonSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+ return extractDaemonSet(daemonSet, fieldManager, "")
+}
+
+// ExtractDaemonSetStatus is the same as ExtractDaemonSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDaemonSetStatus(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string) (*DaemonSetApplyConfiguration, error) {
+ return extractDaemonSet(daemonSet, fieldManager, "status")
+}
+
+func extractDaemonSet(daemonSet *extensionsv1beta1.DaemonSet, fieldManager string, subresource string) (*DaemonSetApplyConfiguration, error) {
b := &DaemonSetApplyConfiguration{}
- err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.DaemonSet"), fieldManager, b)
+ err := managedfields.ExtractInto(daemonSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.DaemonSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
index cc9d8fdc3..e64e4ca38 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/deployment.go
@@ -50,7 +50,7 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// ExtractDeployment extracts the applied configuration owned by fieldManager from
// deployment. If no managedFields are found in deployment for fieldManager, a
// DeploymentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// deployment must be a unmodified Deployment API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Deployment(name, namespace string) *DeploymentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "")
+}
+
+// ExtractDeploymentStatus is the same as ExtractDeployment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractDeploymentStatus(deployment *extensionsv1beta1.Deployment, fieldManager string) (*DeploymentApplyConfiguration, error) {
+ return extractDeployment(deployment, fieldManager, "status")
+}
+
+func extractDeployment(deployment *extensionsv1beta1.Deployment, fieldManager string, subresource string) (*DeploymentApplyConfiguration, error) {
b := &DeploymentApplyConfiguration{}
- err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Deployment"), fieldManager, b)
+ err := managedfields.ExtractInto(deployment, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Deployment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
index ac3010666..df4fbc2cd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/ingress.go
@@ -50,7 +50,7 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// ExtractIngress extracts the applied configuration owned by fieldManager from
// ingress. If no managedFields are found in ingress for fieldManager, a
// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "")
+}
+
+// ExtractIngressStatus is the same as ExtractIngress except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractIngressStatus(ingress *extensionsv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "status")
+}
+
+func extractIngress(ingress *extensionsv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
b := &IngressApplyConfiguration{}
- err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Ingress"), fieldManager, b)
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.extensions.v1beta1.Ingress"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
index 0b25c9c96..965292516 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/networkpolicy.go
@@ -49,7 +49,7 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
// ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from
// networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a
// NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+ return extractNetworkPolicy(networkPolicy, fieldManager, "")
+}
+
+// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractNetworkPolicyStatus(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+ return extractNetworkPolicy(networkPolicy, fieldManager, "status")
+}
+
+func extractNetworkPolicy(networkPolicy *extensionsv1beta1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
b := &NetworkPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.NetworkPolicy"), fieldManager, b)
+ err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.NetworkPolicy"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go
index e2c8d8f81..cceec69f9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/podsecuritypolicy.go
@@ -48,7 +48,7 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration {
// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from
// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a
// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) {
+ return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "")
+}
+
+// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPodSecurityPolicyStatus(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) {
+ return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status")
+}
+
+func extractPodSecurityPolicy(podSecurityPolicy *extensionsv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) {
b := &PodSecurityPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b)
+ err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.extensions.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
index dc7e7da78..b57cefc9d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/replicaset.go
@@ -50,7 +50,7 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// ExtractReplicaSet extracts the applied configuration owned by fieldManager from
// replicaSet. If no managedFields are found in replicaSet for fieldManager, a
// ReplicaSetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// replicaSet must be a unmodified ReplicaSet API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func ReplicaSet(name, namespace string) *ReplicaSetApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return extractReplicaSet(replicaSet, fieldManager, "")
+}
+
+// ExtractReplicaSetStatus is the same as ExtractReplicaSet except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractReplicaSetStatus(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string) (*ReplicaSetApplyConfiguration, error) {
+ return extractReplicaSet(replicaSet, fieldManager, "status")
+}
+
+func extractReplicaSet(replicaSet *extensionsv1beta1.ReplicaSet, fieldManager string, subresource string) (*ReplicaSetApplyConfiguration, error) {
b := &ReplicaSetApplyConfiguration{}
- err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.ReplicaSet"), fieldManager, b)
+ err := managedfields.ExtractInto(replicaSet, internal.Parser().Type("io.k8s.api.extensions.v1beta1.ReplicaSet"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
new file mode 100644
index 000000000..701145825
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/extensions/v1beta1/scale.go
@@ -0,0 +1,233 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ v1beta1 "k8s.io/api/extensions/v1beta1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// ScaleApplyConfiguration represents an declarative configuration of the Scale type for use
+// with apply.
+type ScaleApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ Spec *v1beta1.ScaleSpec `json:"spec,omitempty"`
+ Status *v1beta1.ScaleStatus `json:"status,omitempty"`
+}
+
+// ScaleApplyConfiguration constructs an declarative configuration of the Scale type for use with
+// apply.
+func Scale() *ScaleApplyConfiguration {
+ return &ScaleApplyConfiguration{}
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithKind(value string) *ScaleApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithAPIVersion(value string) *ScaleApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithGenerateName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithNamespace(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SelfLink field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithSelfLink(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.SelfLink = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithUID(value types.UID) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithResourceVersion(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithGeneration(value int64) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *ScaleApplyConfiguration) WithLabels(entries map[string]string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *ScaleApplyConfiguration) WithAnnotations(entries map[string]string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *ScaleApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *ScaleApplyConfiguration) WithFinalizers(values ...string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+// WithClusterName sets the ClusterName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterName field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithClusterName(value string) *ScaleApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ClusterName = &value
+ return b
+}
+
+func (b *ScaleApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithSpec sets the Spec field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Spec field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithSpec(value v1beta1.ScaleSpec) *ScaleApplyConfiguration {
+ b.Spec = &value
+ return b
+}
+
+// WithStatus sets the Status field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Status field is set to the value of the last call.
+func (b *ScaleApplyConfiguration) WithStatus(value v1beta1.ScaleStatus) *ScaleApplyConfiguration {
+ b.Status = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go
index 76107d2d5..2a76cf32e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/flowschema.go
@@ -49,7 +49,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// ExtractFlowSchema extracts the applied configuration owned by fieldManager from
// flowSchema. If no managedFields are found in flowSchema for fieldManager, a
// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
+ return extractFlowSchema(flowSchema, fieldManager, "")
+}
+
+// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
+ return extractFlowSchema(flowSchema, fieldManager, "status")
+}
+
+func extractFlowSchema(flowSchema *flowcontrolv1alpha1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
b := &FlowSchemaApplyConfiguration{}
- err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b)
+ err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.FlowSchema"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go
index 5f497ac78..4f36afe53 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1alpha1/prioritylevelconfiguration.go
@@ -49,7 +49,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from
// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a
// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
+}
+
+// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
+}
+
+func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1alpha1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
b := &PriorityLevelConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b)
+ err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1alpha1.PriorityLevelConfiguration"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
index 2c23ff949..794ff25a7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/flowschema.go
@@ -49,7 +49,7 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// ExtractFlowSchema extracts the applied configuration owned by fieldManager from
// flowSchema. If no managedFields are found in flowSchema for fieldManager, a
// FlowSchemaApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// flowSchema must be a unmodified FlowSchema API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func FlowSchema(name string) *FlowSchemaApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
+ return extractFlowSchema(flowSchema, fieldManager, "")
+}
+
+// ExtractFlowSchemaStatus is the same as ExtractFlowSchema except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractFlowSchemaStatus(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string) (*FlowSchemaApplyConfiguration, error) {
+ return extractFlowSchema(flowSchema, fieldManager, "status")
+}
+
+func extractFlowSchema(flowSchema *flowcontrolv1beta1.FlowSchema, fieldManager string, subresource string) (*FlowSchemaApplyConfiguration, error) {
b := &FlowSchemaApplyConfiguration{}
- err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.FlowSchema"), fieldManager, b)
+ err := managedfields.ExtractInto(flowSchema, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.FlowSchema"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
index 57f118ad8..57d1cd397 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/flowcontrol/v1beta1/prioritylevelconfiguration.go
@@ -49,7 +49,7 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// ExtractPriorityLevelConfiguration extracts the applied configuration owned by fieldManager from
// priorityLevelConfiguration. If no managedFields are found in priorityLevelConfiguration for fieldManager, a
// PriorityLevelConfigurationApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// priorityLevelConfiguration must be a unmodified PriorityLevelConfiguration API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func PriorityLevelConfiguration(name string) *PriorityLevelConfigurationApplyCon
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "")
+}
+
+// ExtractPriorityLevelConfigurationStatus is the same as ExtractPriorityLevelConfiguration except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPriorityLevelConfigurationStatus(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string) (*PriorityLevelConfigurationApplyConfiguration, error) {
+ return extractPriorityLevelConfiguration(priorityLevelConfiguration, fieldManager, "status")
+}
+
+func extractPriorityLevelConfiguration(priorityLevelConfiguration *flowcontrolv1beta1.PriorityLevelConfiguration, fieldManager string, subresource string) (*PriorityLevelConfigurationApplyConfiguration, error) {
b := &PriorityLevelConfigurationApplyConfiguration{}
- err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"), fieldManager, b)
+ err := managedfields.ExtractInto(priorityLevelConfiguration, internal.Parser().Type("io.k8s.api.flowcontrol.v1beta1.PriorityLevelConfiguration"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
index abe43c0bf..5960666fd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/internal/internal.go
@@ -910,6 +910,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.apps.v1.StatefulSetSpec
map:
fields:
+ - name: minReadySeconds
+ type:
+ scalar: numeric
- name: podManagementPolicy
type:
scalar: string
@@ -943,6 +946,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.apps.v1.StatefulSetStatus
map:
fields:
+ - name: availableReplicas
+ type:
+ scalar: numeric
- name: collisionCount
type:
scalar: numeric
@@ -1191,6 +1197,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.apps.v1beta1.StatefulSetSpec
map:
fields:
+ - name: minReadySeconds
+ type:
+ scalar: numeric
- name: podManagementPolicy
type:
scalar: string
@@ -1224,6 +1233,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.apps.v1beta1.StatefulSetStatus
map:
fields:
+ - name: availableReplicas
+ type:
+ scalar: numeric
- name: collisionCount
type:
scalar: numeric
@@ -1670,6 +1682,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.apps.v1beta2.StatefulSetSpec
map:
fields:
+ - name: minReadySeconds
+ type:
+ scalar: numeric
- name: podManagementPolicy
type:
scalar: string
@@ -1703,6 +1718,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.apps.v1beta2.StatefulSetStatus
map:
fields:
+ - name: availableReplicas
+ type:
+ scalar: numeric
- name: collisionCount
type:
scalar: numeric
@@ -1759,6 +1777,7 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+ elementRelationship: atomic
- name: io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler
map:
fields:
@@ -2655,6 +2674,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: succeeded
type:
scalar: numeric
+ - name: uncountedTerminatedPods
+ type:
+ namedType: io.k8s.api.batch.v1.UncountedTerminatedPods
- name: io.k8s.api.batch.v1.JobTemplateSpec
map:
fields:
@@ -2666,6 +2688,21 @@ var schemaYAML = typed.YAMLObject(`types:
type:
namedType: io.k8s.api.batch.v1.JobSpec
default: {}
+- name: io.k8s.api.batch.v1.UncountedTerminatedPods
+ map:
+ fields:
+ - name: failed
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
+ - name: succeeded
+ type:
+ list:
+ elementType:
+ scalar: string
+ elementRelationship: associative
- name: io.k8s.api.batch.v1beta1.CronJob
map:
fields:
@@ -2788,6 +2825,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.certificates.v1.CertificateSigningRequestSpec
map:
fields:
+ - name: expirationSeconds
+ type:
+ scalar: numeric
- name: extra
type:
map:
@@ -2884,6 +2924,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: io.k8s.api.certificates.v1beta1.CertificateSigningRequestSpec
map:
fields:
+ - name: expirationSeconds
+ type:
+ scalar: numeric
- name: extra
type:
map:
@@ -3335,6 +3378,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: optional
type:
scalar: boolean
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.ConfigMapNodeConfigSource
map:
fields:
@@ -3683,6 +3727,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: targetRef
type:
namedType: io.k8s.api.core.v1.ObjectReference
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.EndpointPort
map:
fields:
@@ -3699,6 +3744,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: protocol
type:
scalar: string
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.EndpointSubset
map:
fields:
@@ -4373,6 +4419,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: name
type:
scalar: string
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.LocalVolumeSource
map:
fields:
@@ -4568,6 +4615,7 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.core.v1.NodeSelectorTerm
elementRelationship: atomic
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.NodeSelectorRequirement
map:
fields:
@@ -4600,6 +4648,7 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.core.v1.NodeSelectorRequirement
elementRelationship: atomic
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.NodeSpec
map:
fields:
@@ -4744,6 +4793,7 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.ObjectReference
map:
fields:
@@ -4768,6 +4818,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: uid
type:
scalar: string
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.PersistentVolume
map:
fields:
@@ -4847,6 +4898,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: dataSource
type:
namedType: io.k8s.api.core.v1.TypedLocalObjectReference
+ - name: dataSourceRef
+ type:
+ namedType: io.k8s.api.core.v1.TypedLocalObjectReference
- name: resources
type:
namedType: io.k8s.api.core.v1.ResourceRequirements
@@ -5290,6 +5344,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
+ elementRelationship: atomic
- name: overhead
type:
map:
@@ -5673,6 +5728,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
+ elementRelationship: atomic
- name: template
type:
namedType: io.k8s.api.core.v1.PodTemplateSpec
@@ -5717,6 +5773,7 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.ResourceQuota
map:
fields:
@@ -5875,6 +5932,7 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.core.v1.ScopedResourceSelectorRequirement
elementRelationship: atomic
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.ScopedResourceSelectorRequirement
map:
fields:
@@ -5958,6 +6016,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: optional
type:
scalar: boolean
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.SecretProjection
map:
fields:
@@ -5982,6 +6041,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: namespace
type:
scalar: string
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.SecretVolumeSource
map:
fields:
@@ -6195,18 +6255,13 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
+ elementRelationship: atomic
- name: sessionAffinity
type:
scalar: string
- name: sessionAffinityConfig
type:
namedType: io.k8s.api.core.v1.SessionAffinityConfig
- - name: topologyKeys
- type:
- list:
- elementType:
- scalar: string
- elementRelationship: atomic
- name: type
type:
scalar: string
@@ -6345,6 +6400,7 @@ var schemaYAML = typed.YAMLObject(`types:
elementType:
namedType: io.k8s.api.core.v1.TopologySelectorLabelRequirement
elementRelationship: atomic
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.TopologySpreadConstraint
map:
fields:
@@ -6377,6 +6433,7 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+ elementRelationship: atomic
- name: io.k8s.api.core.v1.Volume
map:
fields:
@@ -6562,6 +6619,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: gmsaCredentialSpecName
type:
scalar: string
+ - name: hostProcess
+ type:
+ scalar: boolean
- name: runAsUserName
type:
scalar: string
@@ -6634,6 +6694,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: protocol
type:
scalar: string
+ elementRelationship: atomic
- name: io.k8s.api.discovery.v1.EndpointSlice
map:
fields:
@@ -8859,6 +8920,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
+ elementRelationship: atomic
- name: tolerations
type:
list:
@@ -8911,6 +8973,7 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
+ elementRelationship: atomic
- name: tolerations
type:
list:
@@ -8956,12 +9019,29 @@ var schemaYAML = typed.YAMLObject(`types:
map:
elementType:
scalar: string
+ elementRelationship: atomic
- name: tolerations
type:
list:
elementType:
namedType: io.k8s.api.core.v1.Toleration
elementRelationship: atomic
+- name: io.k8s.api.policy.v1.Eviction
+ map:
+ fields:
+ - name: apiVersion
+ type:
+ scalar: string
+ - name: deleteOptions
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions
+ - name: kind
+ type:
+ scalar: string
+ - name: metadata
+ type:
+ namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
+ default: {}
- name: io.k8s.api.policy.v1.PodDisruptionBudget
map:
fields:
@@ -9504,6 +9584,7 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+ elementRelationship: atomic
- name: io.k8s.api.rbac.v1.Subject
map:
fields:
@@ -9521,6 +9602,7 @@ var schemaYAML = typed.YAMLObject(`types:
- name: namespace
type:
scalar: string
+ elementRelationship: atomic
- name: io.k8s.api.rbac.v1alpha1.AggregationRule
map:
fields:
@@ -10617,6 +10699,9 @@ var schemaYAML = typed.YAMLObject(`types:
- name: operation
type:
scalar: string
+ - name: subresource
+ type:
+ scalar: string
- name: time
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time
@@ -10714,6 +10799,7 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
+ elementRelationship: atomic
- name: io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions
map:
fields:
diff --git a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
index 919ad9f12..f4d7e2681 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/meta/v1/managedfieldsentry.go
@@ -25,12 +25,13 @@ import (
// ManagedFieldsEntryApplyConfiguration represents an declarative configuration of the ManagedFieldsEntry type for use
// with apply.
type ManagedFieldsEntryApplyConfiguration struct {
- Manager *string `json:"manager,omitempty"`
- Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"`
- APIVersion *string `json:"apiVersion,omitempty"`
- Time *v1.Time `json:"time,omitempty"`
- FieldsType *string `json:"fieldsType,omitempty"`
- FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"`
+ Manager *string `json:"manager,omitempty"`
+ Operation *v1.ManagedFieldsOperationType `json:"operation,omitempty"`
+ APIVersion *string `json:"apiVersion,omitempty"`
+ Time *v1.Time `json:"time,omitempty"`
+ FieldsType *string `json:"fieldsType,omitempty"`
+ FieldsV1 *v1.FieldsV1 `json:"fieldsV1,omitempty"`
+ Subresource *string `json:"subresource,omitempty"`
}
// ManagedFieldsEntryApplyConfiguration constructs an declarative configuration of the ManagedFieldsEntry type for use with
@@ -86,3 +87,11 @@ func (b *ManagedFieldsEntryApplyConfiguration) WithFieldsV1(value v1.FieldsV1) *
b.FieldsV1 = &value
return b
}
+
+// WithSubresource sets the Subresource field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Subresource field is set to the value of the last call.
+func (b *ManagedFieldsEntryApplyConfiguration) WithSubresource(value string) *ManagedFieldsEntryApplyConfiguration {
+ b.Subresource = &value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
index 08cbec904..74c0bb273 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingress.go
@@ -50,7 +50,7 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// ExtractIngress extracts the applied configuration owned by fieldManager from
// ingress. If no managedFields are found in ingress for fieldManager, a
// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractIngress(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "")
+}
+
+// ExtractIngressStatus is the same as ExtractIngress except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractIngressStatus(ingress *apinetworkingv1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "status")
+}
+
+func extractIngress(ingress *apinetworkingv1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
b := &IngressApplyConfiguration{}
- err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b)
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1.Ingress"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
index 105dc7d71..5b36992e4 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/ingressclass.go
@@ -48,7 +48,7 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
// ExtractIngressClass extracts the applied configuration owned by fieldManager from
// ingressClass. If no managedFields are found in ingressClass for fieldManager, a
// IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+ return extractIngressClass(ingressClass, fieldManager, "")
+}
+
+// ExtractIngressClassStatus is the same as ExtractIngressClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractIngressClassStatus(ingressClass *apinetworkingv1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+ return extractIngressClass(ingressClass, fieldManager, "status")
+}
+
+func extractIngressClass(ingressClass *apinetworkingv1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
b := &IngressClassApplyConfiguration{}
- err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b)
+ err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1.IngressClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
index 061111183..7091d7cfd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1/networkpolicy.go
@@ -49,7 +49,7 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
// ExtractNetworkPolicy extracts the applied configuration owned by fieldManager from
// networkPolicy. If no managedFields are found in networkPolicy for fieldManager, a
// NetworkPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// networkPolicy must be a unmodified NetworkPolicy API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func NetworkPolicy(name, namespace string) *NetworkPolicyApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+ return extractNetworkPolicy(networkPolicy, fieldManager, "")
+}
+
+// ExtractNetworkPolicyStatus is the same as ExtractNetworkPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractNetworkPolicyStatus(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string) (*NetworkPolicyApplyConfiguration, error) {
+ return extractNetworkPolicy(networkPolicy, fieldManager, "status")
+}
+
+func extractNetworkPolicy(networkPolicy *apinetworkingv1.NetworkPolicy, fieldManager string, subresource string) (*NetworkPolicyApplyConfiguration, error) {
b := &NetworkPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b)
+ err := managedfields.ExtractInto(networkPolicy, internal.Parser().Type("io.k8s.api.networking.v1.NetworkPolicy"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
index 3d15cf127..6b87d1ff3 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingress.go
@@ -50,7 +50,7 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// ExtractIngress extracts the applied configuration owned by fieldManager from
// ingress. If no managedFields are found in ingress for fieldManager, a
// IngressApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// ingress must be a unmodified Ingress API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func Ingress(name, namespace string) *IngressApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractIngress(ingress *networkingv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "")
+}
+
+// ExtractIngressStatus is the same as ExtractIngress except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractIngressStatus(ingress *networkingv1beta1.Ingress, fieldManager string) (*IngressApplyConfiguration, error) {
+ return extractIngress(ingress, fieldManager, "status")
+}
+
+func extractIngress(ingress *networkingv1beta1.Ingress, fieldManager string, subresource string) (*IngressApplyConfiguration, error) {
b := &IngressApplyConfiguration{}
- err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1beta1.Ingress"), fieldManager, b)
+ err := managedfields.ExtractInto(ingress, internal.Parser().Type("io.k8s.api.networking.v1beta1.Ingress"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
index f80fae107..3a13cd083 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/networking/v1beta1/ingressclass.go
@@ -48,7 +48,7 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
// ExtractIngressClass extracts the applied configuration owned by fieldManager from
// ingressClass. If no managedFields are found in ingressClass for fieldManager, a
// IngressClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// ingressClass must be a unmodified IngressClass API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func IngressClass(name string) *IngressClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+ return extractIngressClass(ingressClass, fieldManager, "")
+}
+
+// ExtractIngressClassStatus is the same as ExtractIngressClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractIngressClassStatus(ingressClass *networkingv1beta1.IngressClass, fieldManager string) (*IngressClassApplyConfiguration, error) {
+ return extractIngressClass(ingressClass, fieldManager, "status")
+}
+
+func extractIngressClass(ingressClass *networkingv1beta1.IngressClass, fieldManager string, subresource string) (*IngressClassApplyConfiguration, error) {
b := &IngressClassApplyConfiguration{}
- err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1beta1.IngressClass"), fieldManager, b)
+ err := managedfields.ExtractInto(ingressClass, internal.Parser().Type("io.k8s.api.networking.v1beta1.IngressClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
index 9f6cbef59..1521f2cef 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1/runtimeclass.go
@@ -50,7 +50,7 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return extractRuntimeClass(runtimeClass, fieldManager, "")
+}
+
+// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRuntimeClassStatus(runtimeClass *apinodev1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return extractRuntimeClass(runtimeClass, fieldManager, "status")
+}
+
+func extractRuntimeClass(runtimeClass *apinodev1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
- err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b)
+ err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
index d40ba68b9..295e763d7 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1alpha1/runtimeclass.go
@@ -48,7 +48,7 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return extractRuntimeClass(runtimeClass, fieldManager, "")
+}
+
+// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRuntimeClassStatus(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return extractRuntimeClass(runtimeClass, fieldManager, "status")
+}
+
+func extractRuntimeClass(runtimeClass *nodev1alpha1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
- err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1alpha1.RuntimeClass"), fieldManager, b)
+ err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1alpha1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
index b56bc0ce7..2424e205e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/node/v1beta1/runtimeclass.go
@@ -50,7 +50,7 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// ExtractRuntimeClass extracts the applied configuration owned by fieldManager from
// runtimeClass. If no managedFields are found in runtimeClass for fieldManager, a
// RuntimeClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// runtimeClass must be a unmodified RuntimeClass API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func RuntimeClass(name string) *RuntimeClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return extractRuntimeClass(runtimeClass, fieldManager, "")
+}
+
+// ExtractRuntimeClassStatus is the same as ExtractRuntimeClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRuntimeClassStatus(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string) (*RuntimeClassApplyConfiguration, error) {
+ return extractRuntimeClass(runtimeClass, fieldManager, "status")
+}
+
+func extractRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass, fieldManager string, subresource string) (*RuntimeClassApplyConfiguration, error) {
b := &RuntimeClassApplyConfiguration{}
- err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1beta1.RuntimeClass"), fieldManager, b)
+ err := managedfields.ExtractInto(runtimeClass, internal.Parser().Type("io.k8s.api.node.v1beta1.RuntimeClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
new file mode 100644
index 000000000..07bda7467
--- /dev/null
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/eviction.go
@@ -0,0 +1,267 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ policyv1 "k8s.io/api/policy/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ managedfields "k8s.io/apimachinery/pkg/util/managedfields"
+ internal "k8s.io/client-go/applyconfigurations/internal"
+ v1 "k8s.io/client-go/applyconfigurations/meta/v1"
+)
+
+// EvictionApplyConfiguration represents an declarative configuration of the Eviction type for use
+// with apply.
+type EvictionApplyConfiguration struct {
+ v1.TypeMetaApplyConfiguration `json:",inline"`
+ *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
+ DeleteOptions *v1.DeleteOptionsApplyConfiguration `json:"deleteOptions,omitempty"`
+}
+
+// Eviction constructs an declarative configuration of the Eviction type for use with
+// apply.
+func Eviction(name, namespace string) *EvictionApplyConfiguration {
+ b := &EvictionApplyConfiguration{}
+ b.WithName(name)
+ b.WithNamespace(namespace)
+ b.WithKind("Eviction")
+ b.WithAPIVersion("policy/v1")
+ return b
+}
+
+// ExtractEviction extracts the applied configuration owned by fieldManager from
+// eviction. If no managedFields are found in eviction for fieldManager, a
+// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable),
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
+// field managers have taken ownership of all the fields previously owned by fieldManager, or because
+// the fieldManager never owned fields any fields.
+// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API.
+// ExtractEviction provides a way to perform a extract/modify-in-place/apply workflow.
+// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
+// applied if another fieldManager has updated or force applied any of the previously applied fields.
+// Experimental!
+func ExtractEviction(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+ return extractEviction(eviction, fieldManager, "")
+}
+
+// ExtractEvictionStatus is the same as ExtractEviction except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEvictionStatus(eviction *policyv1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+ return extractEviction(eviction, fieldManager, "status")
+}
+
+func extractEviction(eviction *policyv1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
+ b := &EvictionApplyConfiguration{}
+ err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1.Eviction"), fieldManager, b, subresource)
+ if err != nil {
+ return nil, err
+ }
+ b.WithName(eviction.Name)
+ b.WithNamespace(eviction.Namespace)
+
+ b.WithKind("Eviction")
+ b.WithAPIVersion("policy/v1")
+ return b, nil
+}
+
+// WithKind sets the Kind field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Kind field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithKind(value string) *EvictionApplyConfiguration {
+ b.Kind = &value
+ return b
+}
+
+// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the APIVersion field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithAPIVersion(value string) *EvictionApplyConfiguration {
+ b.APIVersion = &value
+ return b
+}
+
+// WithName sets the Name field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Name field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithName(value string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Name = &value
+ return b
+}
+
+// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the GenerateName field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithGenerateName(value string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.GenerateName = &value
+ return b
+}
+
+// WithNamespace sets the Namespace field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Namespace field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithNamespace(value string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Namespace = &value
+ return b
+}
+
+// WithSelfLink sets the SelfLink field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the SelfLink field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithSelfLink(value string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.SelfLink = &value
+ return b
+}
+
+// WithUID sets the UID field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the UID field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithUID(value types.UID) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.UID = &value
+ return b
+}
+
+// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ResourceVersion field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithResourceVersion(value string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ResourceVersion = &value
+ return b
+}
+
+// WithGeneration sets the Generation field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the Generation field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithGeneration(value int64) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.Generation = &value
+ return b
+}
+
+// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the CreationTimestamp field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithCreationTimestamp(value metav1.Time) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.CreationTimestamp = &value
+ return b
+}
+
+// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionTimestamp = &value
+ return b
+}
+
+// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.DeletionGracePeriodSeconds = &value
+ return b
+}
+
+// WithLabels puts the entries into the Labels field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Labels field,
+// overwriting an existing map entries in Labels field with the same key.
+func (b *EvictionApplyConfiguration) WithLabels(entries map[string]string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Labels == nil && len(entries) > 0 {
+ b.Labels = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Labels[k] = v
+ }
+ return b
+}
+
+// WithAnnotations puts the entries into the Annotations field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, the entries provided by each call will be put on the Annotations field,
+// overwriting an existing map entries in Annotations field with the same key.
+func (b *EvictionApplyConfiguration) WithAnnotations(entries map[string]string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ if b.Annotations == nil && len(entries) > 0 {
+ b.Annotations = make(map[string]string, len(entries))
+ }
+ for k, v := range entries {
+ b.Annotations[k] = v
+ }
+ return b
+}
+
+// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
+func (b *EvictionApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ if values[i] == nil {
+ panic("nil value passed to WithOwnerReferences")
+ }
+ b.OwnerReferences = append(b.OwnerReferences, *values[i])
+ }
+ return b
+}
+
+// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the Finalizers field.
+func (b *EvictionApplyConfiguration) WithFinalizers(values ...string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ for i := range values {
+ b.Finalizers = append(b.Finalizers, values[i])
+ }
+ return b
+}
+
+// WithClusterName sets the ClusterName field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the ClusterName field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithClusterName(value string) *EvictionApplyConfiguration {
+ b.ensureObjectMetaApplyConfigurationExists()
+ b.ClusterName = &value
+ return b
+}
+
+func (b *EvictionApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
+ if b.ObjectMetaApplyConfiguration == nil {
+ b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
+ }
+}
+
+// WithDeleteOptions sets the DeleteOptions field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the DeleteOptions field is set to the value of the last call.
+func (b *EvictionApplyConfiguration) WithDeleteOptions(value *v1.DeleteOptionsApplyConfiguration) *EvictionApplyConfiguration {
+ b.DeleteOptions = value
+ return b
+}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
index 3233f5386..888c20f60 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1/poddisruptionbudget.go
@@ -50,7 +50,7 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// ExtractPodDisruptionBudget extracts the applied configuration owned by fieldManager from
// podDisruptionBudget. If no managedFields are found in podDisruptionBudget for fieldManager, a
// PodDisruptionBudgetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// podDisruptionBudget must be a unmodified PodDisruptionBudget API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
+ return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "")
+}
+
+// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
+ return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status")
+}
+
+func extractPodDisruptionBudget(podDisruptionBudget *apipolicyv1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
b := &PodDisruptionBudgetApplyConfiguration{}
- err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b)
+ err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1.PodDisruptionBudget"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
index 1db2695f7..e1f0f137e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/eviction.go
@@ -49,7 +49,7 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration {
// ExtractEviction extracts the applied configuration owned by fieldManager from
// eviction. If no managedFields are found in eviction for fieldManager, a
// EvictionApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// eviction must be a unmodified Eviction API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Eviction(name, namespace string) *EvictionApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractEviction(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+ return extractEviction(eviction, fieldManager, "")
+}
+
+// ExtractEvictionStatus is the same as ExtractEviction except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractEvictionStatus(eviction *v1beta1.Eviction, fieldManager string) (*EvictionApplyConfiguration, error) {
+ return extractEviction(eviction, fieldManager, "status")
+}
+
+func extractEviction(eviction *v1beta1.Eviction, fieldManager string, subresource string) (*EvictionApplyConfiguration, error) {
b := &EvictionApplyConfiguration{}
- err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b)
+ err := managedfields.ExtractInto(eviction, internal.Parser().Type("io.k8s.api.policy.v1beta1.Eviction"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
index 36d039445..fc28026f5 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/poddisruptionbudget.go
@@ -50,7 +50,7 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// ExtractPodDisruptionBudget extracts the applied configuration owned by fieldManager from
// podDisruptionBudget. If no managedFields are found in podDisruptionBudget for fieldManager, a
// PodDisruptionBudgetApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// podDisruptionBudget must be a unmodified PodDisruptionBudget API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func PodDisruptionBudget(name, namespace string) *PodDisruptionBudgetApplyConfig
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
+ return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "")
+}
+
+// ExtractPodDisruptionBudgetStatus is the same as ExtractPodDisruptionBudget except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPodDisruptionBudgetStatus(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string) (*PodDisruptionBudgetApplyConfiguration, error) {
+ return extractPodDisruptionBudget(podDisruptionBudget, fieldManager, "status")
+}
+
+func extractPodDisruptionBudget(podDisruptionBudget *policyv1beta1.PodDisruptionBudget, fieldManager string, subresource string) (*PodDisruptionBudgetApplyConfiguration, error) {
b := &PodDisruptionBudgetApplyConfiguration{}
- err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodDisruptionBudget"), fieldManager, b)
+ err := managedfields.ExtractInto(podDisruptionBudget, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodDisruptionBudget"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go
index 3ff73633c..0500824c9 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/policy/v1beta1/podsecuritypolicy.go
@@ -48,7 +48,7 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration {
// ExtractPodSecurityPolicy extracts the applied configuration owned by fieldManager from
// podSecurityPolicy. If no managedFields are found in podSecurityPolicy for fieldManager, a
// PodSecurityPolicyApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// podSecurityPolicy must be a unmodified PodSecurityPolicy API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func PodSecurityPolicy(name string) *PodSecurityPolicyApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) {
+ return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "")
+}
+
+// ExtractPodSecurityPolicyStatus is the same as ExtractPodSecurityPolicy except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPodSecurityPolicyStatus(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string) (*PodSecurityPolicyApplyConfiguration, error) {
+ return extractPodSecurityPolicy(podSecurityPolicy, fieldManager, "status")
+}
+
+func extractPodSecurityPolicy(podSecurityPolicy *policyv1beta1.PodSecurityPolicy, fieldManager string, subresource string) (*PodSecurityPolicyApplyConfiguration, error) {
b := &PodSecurityPolicyApplyConfiguration{}
- err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b)
+ err := managedfields.ExtractInto(podSecurityPolicy, internal.Parser().Type("io.k8s.api.policy.v1beta1.PodSecurityPolicy"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
index 92ade083e..1b4b51596 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrole.go
@@ -49,7 +49,7 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// ExtractClusterRole extracts the applied configuration owned by fieldManager from
// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return extractClusterRole(clusterRole, fieldManager, "")
+}
+
+// ExtractClusterRoleStatus is the same as ExtractClusterRole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterRoleStatus(clusterRole *apirbacv1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return extractClusterRole(clusterRole, fieldManager, "status")
+}
+
+func extractClusterRole(clusterRole *apirbacv1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
- err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b)
+ err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
index 7bbbdaec9..d17fc6e55 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/clusterrolebinding.go
@@ -49,7 +49,7 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
+}
+
+// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterRoleBindingStatus(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
+}
+
+func extractClusterRoleBinding(clusterRoleBinding *apirbacv1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
- err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b)
+ err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
index 772122f45..854441bbd 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/role.go
@@ -49,7 +49,7 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// ExtractRole extracts the applied configuration owned by fieldManager from
// role. If no managedFields are found in role for fieldManager, a
// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRole(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return extractRole(role, fieldManager, "")
+}
+
+// ExtractRoleStatus is the same as ExtractRole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRoleStatus(role *apirbacv1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return extractRole(role, fieldManager, "status")
+}
+
+func extractRole(role *apirbacv1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
- err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b)
+ err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1.Role"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
index 85dc476cd..88075a70d 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1/rolebinding.go
@@ -50,7 +50,7 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return extractRoleBinding(roleBinding, fieldManager, "")
+}
+
+// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRoleBindingStatus(roleBinding *apirbacv1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return extractRoleBinding(roleBinding, fieldManager, "status")
+}
+
+func extractRoleBinding(roleBinding *apirbacv1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
- err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b)
+ err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
index 4e2d4a63c..ae9cfc474 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrole.go
@@ -49,7 +49,7 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// ExtractClusterRole extracts the applied configuration owned by fieldManager from
// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return extractClusterRole(clusterRole, fieldManager, "")
+}
+
+// ExtractClusterRoleStatus is the same as ExtractClusterRole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterRoleStatus(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return extractClusterRole(clusterRole, fieldManager, "status")
+}
+
+func extractClusterRole(clusterRole *rbacv1alpha1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
- err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRole"), fieldManager, b)
+ err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
index 10c93e5a5..f5a2c03bb 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/clusterrolebinding.go
@@ -49,7 +49,7 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
+}
+
+// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
+}
+
+func extractClusterRoleBinding(clusterRoleBinding *rbacv1alpha1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
- err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"), fieldManager, b)
+ err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
index d9bf15b4c..ec5bebcec 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/role.go
@@ -49,7 +49,7 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// ExtractRole extracts the applied configuration owned by fieldManager from
// role. If no managedFields are found in role for fieldManager, a
// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRole(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return extractRole(role, fieldManager, "")
+}
+
+// ExtractRoleStatus is the same as ExtractRole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRoleStatus(role *rbacv1alpha1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return extractRole(role, fieldManager, "status")
+}
+
+func extractRole(role *rbacv1alpha1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
- err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.Role"), fieldManager, b)
+ err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.Role"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
index ca6f563a3..930f9489f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1alpha1/rolebinding.go
@@ -50,7 +50,7 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return extractRoleBinding(roleBinding, fieldManager, "")
+}
+
+// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRoleBindingStatus(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return extractRoleBinding(roleBinding, fieldManager, "status")
+}
+
+func extractRoleBinding(roleBinding *rbacv1alpha1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
- err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.RoleBinding"), fieldManager, b)
+ err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1alpha1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
index 4f5f28170..1574f8f66 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrole.go
@@ -49,7 +49,7 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// ExtractClusterRole extracts the applied configuration owned by fieldManager from
// clusterRole. If no managedFields are found in clusterRole for fieldManager, a
// ClusterRoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterRole must be a unmodified ClusterRole API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func ClusterRole(name string) *ClusterRoleApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return extractClusterRole(clusterRole, fieldManager, "")
+}
+
+// ExtractClusterRoleStatus is the same as ExtractClusterRole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterRoleStatus(clusterRole *rbacv1beta1.ClusterRole, fieldManager string) (*ClusterRoleApplyConfiguration, error) {
+ return extractClusterRole(clusterRole, fieldManager, "status")
+}
+
+func extractClusterRole(clusterRole *rbacv1beta1.ClusterRole, fieldManager string, subresource string) (*ClusterRoleApplyConfiguration, error) {
b := &ClusterRoleApplyConfiguration{}
- err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRole"), fieldManager, b)
+ err := managedfields.ExtractInto(clusterRole, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRole"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
index 6fbe0aa98..152a23b49 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/clusterrolebinding.go
@@ -49,7 +49,7 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// ExtractClusterRoleBinding extracts the applied configuration owned by fieldManager from
// clusterRoleBinding. If no managedFields are found in clusterRoleBinding for fieldManager, a
// ClusterRoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// clusterRoleBinding must be a unmodified ClusterRoleBinding API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func ClusterRoleBinding(name string) *ClusterRoleBindingApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "")
+}
+
+// ExtractClusterRoleBindingStatus is the same as ExtractClusterRoleBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractClusterRoleBindingStatus(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string) (*ClusterRoleBindingApplyConfiguration, error) {
+ return extractClusterRoleBinding(clusterRoleBinding, fieldManager, "status")
+}
+
+func extractClusterRoleBinding(clusterRoleBinding *rbacv1beta1.ClusterRoleBinding, fieldManager string, subresource string) (*ClusterRoleBindingApplyConfiguration, error) {
b := &ClusterRoleBindingApplyConfiguration{}
- err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRoleBinding"), fieldManager, b)
+ err := managedfields.ExtractInto(clusterRoleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.ClusterRoleBinding"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
index 7e3744afb..dc6ff2a40 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/role.go
@@ -49,7 +49,7 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// ExtractRole extracts the applied configuration owned by fieldManager from
// role. If no managedFields are found in role for fieldManager, a
// RoleApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// role must be a unmodified Role API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func Role(name, namespace string) *RoleApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRole(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return extractRole(role, fieldManager, "")
+}
+
+// ExtractRoleStatus is the same as ExtractRole except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRoleStatus(role *rbacv1beta1.Role, fieldManager string) (*RoleApplyConfiguration, error) {
+ return extractRole(role, fieldManager, "status")
+}
+
+func extractRole(role *rbacv1beta1.Role, fieldManager string, subresource string) (*RoleApplyConfiguration, error) {
b := &RoleApplyConfiguration{}
- err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1beta1.Role"), fieldManager, b)
+ err := managedfields.ExtractInto(role, internal.Parser().Type("io.k8s.api.rbac.v1beta1.Role"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
index ee3e6c55d..aeef6ec8f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/rbac/v1beta1/rolebinding.go
@@ -50,7 +50,7 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// ExtractRoleBinding extracts the applied configuration owned by fieldManager from
// roleBinding. If no managedFields are found in roleBinding for fieldManager, a
// RoleBindingApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// roleBinding must be a unmodified RoleBinding API object that was retrieved from the Kubernetes API.
@@ -59,8 +59,19 @@ func RoleBinding(name, namespace string) *RoleBindingApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return extractRoleBinding(roleBinding, fieldManager, "")
+}
+
+// ExtractRoleBindingStatus is the same as ExtractRoleBinding except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractRoleBindingStatus(roleBinding *rbacv1beta1.RoleBinding, fieldManager string) (*RoleBindingApplyConfiguration, error) {
+ return extractRoleBinding(roleBinding, fieldManager, "status")
+}
+
+func extractRoleBinding(roleBinding *rbacv1beta1.RoleBinding, fieldManager string, subresource string) (*RoleBindingApplyConfiguration, error) {
b := &RoleBindingApplyConfiguration{}
- err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.RoleBinding"), fieldManager, b)
+ err := managedfields.ExtractInto(roleBinding, internal.Parser().Type("io.k8s.api.rbac.v1beta1.RoleBinding"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
index 6a942ecf1..5d528ea7c 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1/priorityclass.go
@@ -52,7 +52,7 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
@@ -61,8 +61,19 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return extractPriorityClass(priorityClass, fieldManager, "")
+}
+
+// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPriorityClassStatus(priorityClass *schedulingv1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return extractPriorityClass(priorityClass, fieldManager, "status")
+}
+
+func extractPriorityClass(priorityClass *schedulingv1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
- err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1.PriorityClass"), fieldManager, b)
+ err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
index 46dc278d9..2b2aac316 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1alpha1/priorityclass.go
@@ -52,7 +52,7 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
@@ -61,8 +61,19 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return extractPriorityClass(priorityClass, fieldManager, "")
+}
+
+// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPriorityClassStatus(priorityClass *v1alpha1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return extractPriorityClass(priorityClass, fieldManager, "status")
+}
+
+func extractPriorityClass(priorityClass *v1alpha1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
- err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b)
+ err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1alpha1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
index 9327b7b61..14b1feea6 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/scheduling/v1beta1/priorityclass.go
@@ -52,7 +52,7 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// ExtractPriorityClass extracts the applied configuration owned by fieldManager from
// priorityClass. If no managedFields are found in priorityClass for fieldManager, a
// PriorityClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// priorityClass must be a unmodified PriorityClass API object that was retrieved from the Kubernetes API.
@@ -61,8 +61,19 @@ func PriorityClass(name string) *PriorityClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return extractPriorityClass(priorityClass, fieldManager, "")
+}
+
+// ExtractPriorityClassStatus is the same as ExtractPriorityClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractPriorityClassStatus(priorityClass *v1beta1.PriorityClass, fieldManager string) (*PriorityClassApplyConfiguration, error) {
+ return extractPriorityClass(priorityClass, fieldManager, "status")
+}
+
+func extractPriorityClass(priorityClass *v1beta1.PriorityClass, fieldManager string, subresource string) (*PriorityClassApplyConfiguration, error) {
b := &PriorityClassApplyConfiguration{}
- err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b)
+ err := managedfields.ExtractInto(priorityClass, internal.Parser().Type("io.k8s.api.scheduling.v1beta1.PriorityClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
index 31b35446e..cf9073a0f 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csidriver.go
@@ -48,7 +48,7 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
// ExtractCSIDriver extracts the applied configuration owned by fieldManager from
// cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a
// CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+ return extractCSIDriver(cSIDriver, fieldManager, "")
+}
+
+// ExtractCSIDriverStatus is the same as ExtractCSIDriver except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCSIDriverStatus(cSIDriver *apistoragev1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+ return extractCSIDriver(cSIDriver, fieldManager, "status")
+}
+
+func extractCSIDriver(cSIDriver *apistoragev1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
b := &CSIDriverApplyConfiguration{}
- err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b)
+ err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1.CSIDriver"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
index 8da150bd5..e65582d89 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/csinode.go
@@ -48,7 +48,7 @@ func CSINode(name string) *CSINodeApplyConfiguration {
// ExtractCSINode extracts the applied configuration owned by fieldManager from
// cSINode. If no managedFields are found in cSINode for fieldManager, a
// CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func CSINode(name string) *CSINodeApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCSINode(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+ return extractCSINode(cSINode, fieldManager, "")
+}
+
+// ExtractCSINodeStatus is the same as ExtractCSINode except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCSINodeStatus(cSINode *apistoragev1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+ return extractCSINode(cSINode, fieldManager, "status")
+}
+
+func extractCSINode(cSINode *apistoragev1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
b := &CSINodeApplyConfiguration{}
- err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b)
+ err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1.CSINode"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
index ac5b8ca8d..2df999c24 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/storageclass.go
@@ -56,7 +56,7 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
// ExtractStorageClass extracts the applied configuration owned by fieldManager from
// storageClass. If no managedFields are found in storageClass for fieldManager, a
// StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API.
@@ -65,8 +65,19 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractStorageClass(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+ return extractStorageClass(storageClass, fieldManager, "")
+}
+
+// ExtractStorageClassStatus is the same as ExtractStorageClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractStorageClassStatus(storageClass *storagev1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+ return extractStorageClass(storageClass, fieldManager, "status")
+}
+
+func extractStorageClass(storageClass *storagev1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
b := &StorageClassApplyConfiguration{}
- err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1.StorageClass"), fieldManager, b)
+ err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1.StorageClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
index 97c37c609..5fd3d4d8e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1/volumeattachment.go
@@ -49,7 +49,7 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from
// volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a
// VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+ return extractVolumeAttachment(volumeAttachment, fieldManager, "")
+}
+
+// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractVolumeAttachmentStatus(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+ return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
+}
+
+func extractVolumeAttachment(volumeAttachment *apistoragev1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
b := &VolumeAttachmentApplyConfiguration{}
- err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b)
+ err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1.VolumeAttachment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
index 32cf1b9f8..a7ad0717b 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/csistoragecapacity.go
@@ -53,7 +53,7 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
@@ -62,8 +62,19 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
+}
+
+// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
+}
+
+func extractCSIStorageCapacity(cSIStorageCapacity *v1alpha1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
- err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b)
+ err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1alpha1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
index bcc0f7729..7a3091964 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1alpha1/volumeattachment.go
@@ -49,7 +49,7 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from
// volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a
// VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+ return extractVolumeAttachment(volumeAttachment, fieldManager, "")
+}
+
+// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+ return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
+}
+
+func extractVolumeAttachment(volumeAttachment *storagev1alpha1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
b := &VolumeAttachmentApplyConfiguration{}
- err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttachment"), fieldManager, b)
+ err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1alpha1.VolumeAttachment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
index aad9ce500..4ff0fcdf1 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csidriver.go
@@ -48,7 +48,7 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
// ExtractCSIDriver extracts the applied configuration owned by fieldManager from
// cSIDriver. If no managedFields are found in cSIDriver for fieldManager, a
// CSIDriverApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cSIDriver must be a unmodified CSIDriver API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func CSIDriver(name string) *CSIDriverApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+ return extractCSIDriver(cSIDriver, fieldManager, "")
+}
+
+// ExtractCSIDriverStatus is the same as ExtractCSIDriver except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCSIDriverStatus(cSIDriver *storagev1beta1.CSIDriver, fieldManager string) (*CSIDriverApplyConfiguration, error) {
+ return extractCSIDriver(cSIDriver, fieldManager, "status")
+}
+
+func extractCSIDriver(cSIDriver *storagev1beta1.CSIDriver, fieldManager string, subresource string) (*CSIDriverApplyConfiguration, error) {
b := &CSIDriverApplyConfiguration{}
- err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIDriver"), fieldManager, b)
+ err := managedfields.ExtractInto(cSIDriver, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIDriver"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
index 5d151f28e..fce97b456 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csinode.go
@@ -48,7 +48,7 @@ func CSINode(name string) *CSINodeApplyConfiguration {
// ExtractCSINode extracts the applied configuration owned by fieldManager from
// cSINode. If no managedFields are found in cSINode for fieldManager, a
// CSINodeApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cSINode must be a unmodified CSINode API object that was retrieved from the Kubernetes API.
@@ -57,8 +57,19 @@ func CSINode(name string) *CSINodeApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+ return extractCSINode(cSINode, fieldManager, "")
+}
+
+// ExtractCSINodeStatus is the same as ExtractCSINode except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCSINodeStatus(cSINode *storagev1beta1.CSINode, fieldManager string) (*CSINodeApplyConfiguration, error) {
+ return extractCSINode(cSINode, fieldManager, "status")
+}
+
+func extractCSINode(cSINode *storagev1beta1.CSINode, fieldManager string, subresource string) (*CSINodeApplyConfiguration, error) {
b := &CSINodeApplyConfiguration{}
- err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSINode"), fieldManager, b)
+ err := managedfields.ExtractInto(cSINode, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSINode"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
index e0c6bd635..afd2e2a9e 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/csistoragecapacity.go
@@ -53,7 +53,7 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
// ExtractCSIStorageCapacity extracts the applied configuration owned by fieldManager from
// cSIStorageCapacity. If no managedFields are found in cSIStorageCapacity for fieldManager, a
// CSIStorageCapacityApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// cSIStorageCapacity must be a unmodified CSIStorageCapacity API object that was retrieved from the Kubernetes API.
@@ -62,8 +62,19 @@ func CSIStorageCapacity(name, namespace string) *CSIStorageCapacityApplyConfigur
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "")
+}
+
+// ExtractCSIStorageCapacityStatus is the same as ExtractCSIStorageCapacity except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractCSIStorageCapacityStatus(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string) (*CSIStorageCapacityApplyConfiguration, error) {
+ return extractCSIStorageCapacity(cSIStorageCapacity, fieldManager, "status")
+}
+
+func extractCSIStorageCapacity(cSIStorageCapacity *v1beta1.CSIStorageCapacity, fieldManager string, subresource string) (*CSIStorageCapacityApplyConfiguration, error) {
b := &CSIStorageCapacityApplyConfiguration{}
- err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b)
+ err := managedfields.ExtractInto(cSIStorageCapacity, internal.Parser().Type("io.k8s.api.storage.v1beta1.CSIStorageCapacity"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
index d88da6a20..a4b924ee8 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/storageclass.go
@@ -56,7 +56,7 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
// ExtractStorageClass extracts the applied configuration owned by fieldManager from
// storageClass. If no managedFields are found in storageClass for fieldManager, a
// StorageClassApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// storageClass must be a unmodified StorageClass API object that was retrieved from the Kubernetes API.
@@ -65,8 +65,19 @@ func StorageClass(name string) *StorageClassApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+ return extractStorageClass(storageClass, fieldManager, "")
+}
+
+// ExtractStorageClassStatus is the same as ExtractStorageClass except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractStorageClassStatus(storageClass *v1beta1.StorageClass, fieldManager string) (*StorageClassApplyConfiguration, error) {
+ return extractStorageClass(storageClass, fieldManager, "status")
+}
+
+func extractStorageClass(storageClass *v1beta1.StorageClass, fieldManager string, subresource string) (*StorageClassApplyConfiguration, error) {
b := &StorageClassApplyConfiguration{}
- err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b)
+ err := managedfields.ExtractInto(storageClass, internal.Parser().Type("io.k8s.api.storage.v1beta1.StorageClass"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
index 3a3ed874f..553bfee98 100644
--- a/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
+++ b/vendor/k8s.io/client-go/applyconfigurations/storage/v1beta1/volumeattachment.go
@@ -49,7 +49,7 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// ExtractVolumeAttachment extracts the applied configuration owned by fieldManager from
// volumeAttachment. If no managedFields are found in volumeAttachment for fieldManager, a
// VolumeAttachmentApplyConfiguration is returned with only the Name, Namespace (if applicable),
-// APIVersion and Kind populated. Is is possible that no managed fields were found for because other
+// APIVersion and Kind populated. It is possible that no managed fields were found for because other
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
// the fieldManager never owned fields any fields.
// volumeAttachment must be a unmodified VolumeAttachment API object that was retrieved from the Kubernetes API.
@@ -58,8 +58,19 @@ func VolumeAttachment(name string) *VolumeAttachmentApplyConfiguration {
// applied if another fieldManager has updated or force applied any of the previously applied fields.
// Experimental!
func ExtractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+ return extractVolumeAttachment(volumeAttachment, fieldManager, "")
+}
+
+// ExtractVolumeAttachmentStatus is the same as ExtractVolumeAttachment except
+// that it extracts the status subresource applied configuration.
+// Experimental!
+func ExtractVolumeAttachmentStatus(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string) (*VolumeAttachmentApplyConfiguration, error) {
+ return extractVolumeAttachment(volumeAttachment, fieldManager, "status")
+}
+
+func extractVolumeAttachment(volumeAttachment *storagev1beta1.VolumeAttachment, fieldManager string, subresource string) (*VolumeAttachmentApplyConfiguration, error) {
b := &VolumeAttachmentApplyConfiguration{}
- err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttachment"), fieldManager, b)
+ err := managedfields.ExtractInto(volumeAttachment, internal.Parser().Type("io.k8s.api.storage.v1beta1.VolumeAttachment"), fieldManager, b, subresource)
if err != nil {
return nil, err
}
diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go
index 57404e0b2..87de32970 100644
--- a/vendor/k8s.io/client-go/discovery/discovery_client.go
+++ b/vendor/k8s.io/client-go/discovery/discovery_client.go
@@ -26,6 +26,7 @@ import (
"sync"
"time"
+ //lint:ignore SA1019 Keep using module since it's still being maintained and the api of google.golang.org/protobuf/proto differs
"github.com/golang/protobuf/proto"
openapi_v2 "github.com/googleapis/gnostic/openapiv2"
@@ -41,7 +42,7 @@ import (
)
const (
- // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources).
+ // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. CustomResourceDefinitions).
defaultRetries = 2
// protobuf mime type
mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf"
@@ -95,7 +96,7 @@ type ServerResourcesInterface interface {
//
// Deprecated: use ServerGroupsAndResources instead.
ServerResources() ([]*metav1.APIResourceList, error)
- // ServerResources returns the supported groups and resources for all groups and versions.
+ // ServerGroupsAndResources returns the supported groups and resources for all groups and versions.
//
// The returned group and resource lists might be non-nil with partial results even in the
// case of non-nil error.
diff --git a/vendor/k8s.io/client-go/dynamic/fake/simple.go b/vendor/k8s.io/client-go/dynamic/fake/simple.go
index cdcbd54d1..dee16b245 100644
--- a/vendor/k8s.io/client-go/dynamic/fake/simple.go
+++ b/vendor/k8s.io/client-go/dynamic/fake/simple.go
@@ -111,7 +111,7 @@ func NewSimpleDynamicClientWithCustomListKinds(scheme *runtime.Scheme, gvrToList
}
}
- cs := &FakeDynamicClient{scheme: scheme, gvrToListKind: completeGVRToListKind}
+ cs := &FakeDynamicClient{scheme: scheme, gvrToListKind: completeGVRToListKind, tracker: o}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
@@ -133,6 +133,7 @@ type FakeDynamicClient struct {
testing.Fake
scheme *runtime.Scheme
gvrToListKind map[schema.GroupVersionResource]string
+ tracker testing.ObjectTracker
}
type dynamicResourceClient struct {
@@ -142,7 +143,14 @@ type dynamicResourceClient struct {
listKind string
}
-var _ dynamic.Interface = &FakeDynamicClient{}
+var (
+ _ dynamic.Interface = &FakeDynamicClient{}
+ _ testing.FakeClient = &FakeDynamicClient{}
+)
+
+func (c *FakeDynamicClient) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
func (c *FakeDynamicClient) Resource(resource schema.GroupVersionResource) dynamic.NamespaceableResourceInterface {
return &dynamicResourceClient{client: c, resource: resource, listKind: c.gvrToListKind[resource]}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
index 51545107d..ccc2049ff 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go
@@ -30,6 +30,7 @@ import (
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -55,6 +56,7 @@ type DeploymentInterface interface {
ApplyStatus(ctx context.Context, deployment *appsv1.DeploymentApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Deployment, err error)
GetScale(ctx context.Context, deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, deploymentName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
+ ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
DeploymentExpansion
}
@@ -287,3 +289,28 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc
Into(result)
return
}
+
+// ApplyScale takes top resource name and the apply declarative configuration for scale,
+// applies it and returns the applied scale, and an error, if there is any.
+func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) {
+ if scale == nil {
+ return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scale)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &autoscalingv1.Scale{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("deployments").
+ Name(deploymentName).
+ SubResource("scale").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
index 7a8f0cd84..917ed521f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go
@@ -30,6 +30,7 @@ import (
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -55,6 +56,7 @@ type ReplicaSetInterface interface {
ApplyStatus(ctx context.Context, replicaSet *appsv1.ReplicaSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ReplicaSet, err error)
GetScale(ctx context.Context, replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, replicaSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
+ ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
ReplicaSetExpansion
}
@@ -287,3 +289,28 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc
Into(result)
return
}
+
+// ApplyScale takes top resource name and the apply declarative configuration for scale,
+// applies it and returns the applied scale, and an error, if there is any.
+func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) {
+ if scale == nil {
+ return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scale)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &autoscalingv1.Scale{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("replicasets").
+ Name(replicaSetName).
+ SubResource("scale").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
index 5626e2baa..d1fbb915d 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go
@@ -30,6 +30,7 @@ import (
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
appsv1 "k8s.io/client-go/applyconfigurations/apps/v1"
+ applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1"
scheme "k8s.io/client-go/kubernetes/scheme"
rest "k8s.io/client-go/rest"
)
@@ -55,6 +56,7 @@ type StatefulSetInterface interface {
ApplyStatus(ctx context.Context, statefulSet *appsv1.StatefulSetApplyConfiguration, opts metav1.ApplyOptions) (result *v1.StatefulSet, err error)
GetScale(ctx context.Context, statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, statefulSetName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
+ ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error)
StatefulSetExpansion
}
@@ -287,3 +289,28 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string,
Into(result)
return
}
+
+// ApplyScale takes top resource name and the apply declarative configuration for scale,
+// applies it and returns the applied scale, and an error, if there is any.
+func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (result *autoscalingv1.Scale, err error) {
+ if scale == nil {
+ return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scale)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &autoscalingv1.Scale{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("statefulsets").
+ Name(statefulSetName).
+ SubResource("scale").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
index 73a12c996..0416675d6 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go
@@ -54,6 +54,7 @@ type StatefulSetInterface interface {
ApplyStatus(ctx context.Context, statefulSet *appsv1beta2.StatefulSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.StatefulSet, err error)
GetScale(ctx context.Context, statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error)
UpdateScale(ctx context.Context, statefulSetName string, scale *v1beta2.Scale, opts v1.UpdateOptions) (*v1beta2.Scale, error)
+ ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta2.Scale, error)
StatefulSetExpansion
}
@@ -286,3 +287,28 @@ func (c *statefulSets) UpdateScale(ctx context.Context, statefulSetName string,
Into(result)
return
}
+
+// ApplyScale takes top resource name and the apply declarative configuration for scale,
+// applies it and returns the applied scale, and an error, if there is any.
+func (c *statefulSets) ApplyScale(ctx context.Context, statefulSetName string, scale *appsv1beta2.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta2.Scale, err error) {
+ if scale == nil {
+ return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scale)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &v1beta2.Scale{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("statefulsets").
+ Name(statefulSetName).
+ SubResource("scale").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
index 14b7bd0f0..63122cf3f 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go
@@ -52,8 +52,7 @@ type PodInterface interface {
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Pod, err error)
Apply(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error)
ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Pod, err error)
- GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (*v1.EphemeralContainers, error)
- UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (*v1.EphemeralContainers, error)
+ UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (*v1.Pod, error)
PodExpansion
}
@@ -258,30 +257,16 @@ func (c *pods) ApplyStatus(ctx context.Context, pod *corev1.PodApplyConfiguratio
return
}
-// GetEphemeralContainers takes name of the pod, and returns the corresponding v1.EphemeralContainers object, and an error if there is any.
-func (c *pods) GetEphemeralContainers(ctx context.Context, podName string, options metav1.GetOptions) (result *v1.EphemeralContainers, err error) {
- result = &v1.EphemeralContainers{}
- err = c.client.Get().
- Namespace(c.ns).
- Resource("pods").
- Name(podName).
- SubResource("ephemeralcontainers").
- VersionedParams(&options, scheme.ParameterCodec).
- Do(ctx).
- Into(result)
- return
-}
-
-// UpdateEphemeralContainers takes the top resource name and the representation of a ephemeralContainers and updates it. Returns the server's representation of the ephemeralContainers, and an error, if there is any.
-func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, ephemeralContainers *v1.EphemeralContainers, opts metav1.UpdateOptions) (result *v1.EphemeralContainers, err error) {
- result = &v1.EphemeralContainers{}
+// UpdateEphemeralContainers takes the top resource name and the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any.
+func (c *pods) UpdateEphemeralContainers(ctx context.Context, podName string, pod *v1.Pod, opts metav1.UpdateOptions) (result *v1.Pod, err error) {
+ result = &v1.Pod{}
err = c.client.Put().
Namespace(c.ns).
Resource("pods").
Name(podName).
SubResource("ephemeralcontainers").
VersionedParams(&opts, scheme.ParameterCodec).
- Body(ephemeralContainers).
+ Body(pod).
Do(ctx).
Into(result)
return
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
index 759fe0ff4..8b6e0e932 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go
@@ -20,7 +20,8 @@ import (
"context"
v1 "k8s.io/api/core/v1"
- policy "k8s.io/api/policy/v1beta1"
+ policyv1 "k8s.io/api/policy/v1"
+ policyv1beta1 "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/net"
"k8s.io/client-go/kubernetes/scheme"
@@ -30,7 +31,16 @@ import (
// The PodExpansion interface allows manually adding extra methods to the PodInterface.
type PodExpansion interface {
Bind(ctx context.Context, binding *v1.Binding, opts metav1.CreateOptions) error
- Evict(ctx context.Context, eviction *policy.Eviction) error
+ // Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource.
+ // Equivalent to calling EvictV1beta1.
+ // Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1().
+ Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error
+ // EvictV1 submits a policy/v1 Eviction request to the pod's eviction subresource.
+ // Supported in 1.22+.
+ EvictV1(ctx context.Context, eviction *policyv1.Eviction) error
+ // EvictV1beta1 submits a policy/v1beta1 Eviction request to the pod's eviction subresource.
+ // Supported in 1.22+.
+ EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error
GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request
ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper
}
@@ -40,7 +50,18 @@ func (c *pods) Bind(ctx context.Context, binding *v1.Binding, opts metav1.Create
return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).VersionedParams(&opts, scheme.ParameterCodec).SubResource("binding").Body(binding).Do(ctx).Error()
}
-func (c *pods) Evict(ctx context.Context, eviction *policy.Eviction) error {
+// Evict submits a policy/v1beta1 Eviction request to the pod's eviction subresource.
+// Equivalent to calling EvictV1beta1.
+// Deprecated: Use EvictV1() (supported in 1.22+) or EvictV1beta1().
+func (c *pods) Evict(ctx context.Context, eviction *policyv1beta1.Eviction) error {
+ return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
+}
+
+func (c *pods) EvictV1beta1(ctx context.Context, eviction *policyv1beta1.Eviction) error {
+ return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
+}
+
+func (c *pods) EvictV1(ctx context.Context, eviction *policyv1.Eviction) error {
return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do(ctx).Error()
}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
index 45c90aca3..c41d8dbc2 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go
@@ -54,6 +54,7 @@ type DeploymentInterface interface {
ApplyStatus(ctx context.Context, deployment *extensionsv1beta1.DeploymentApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Deployment, err error)
GetScale(ctx context.Context, deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error)
UpdateScale(ctx context.Context, deploymentName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
+ ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error)
DeploymentExpansion
}
@@ -286,3 +287,28 @@ func (c *deployments) UpdateScale(ctx context.Context, deploymentName string, sc
Into(result)
return
}
+
+// ApplyScale takes top resource name and the apply declarative configuration for scale,
+// applies it and returns the applied scale, and an error, if there is any.
+func (c *deployments) ApplyScale(ctx context.Context, deploymentName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) {
+ if scale == nil {
+ return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scale)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &v1beta1.Scale{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("deployments").
+ Name(deploymentName).
+ SubResource("scale").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
index ee897f75a..3c907a3a0 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go
@@ -54,6 +54,7 @@ type ReplicaSetInterface interface {
ApplyStatus(ctx context.Context, replicaSet *extensionsv1beta1.ReplicaSetApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.ReplicaSet, err error)
GetScale(ctx context.Context, replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error)
UpdateScale(ctx context.Context, replicaSetName string, scale *v1beta1.Scale, opts v1.UpdateOptions) (*v1beta1.Scale, error)
+ ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (*v1beta1.Scale, error)
ReplicaSetExpansion
}
@@ -286,3 +287,28 @@ func (c *replicaSets) UpdateScale(ctx context.Context, replicaSetName string, sc
Into(result)
return
}
+
+// ApplyScale takes top resource name and the apply declarative configuration for scale,
+// applies it and returns the applied scale, and an error, if there is any.
+func (c *replicaSets) ApplyScale(ctx context.Context, replicaSetName string, scale *extensionsv1beta1.ScaleApplyConfiguration, opts v1.ApplyOptions) (result *v1beta1.Scale, err error) {
+ if scale == nil {
+ return nil, fmt.Errorf("scale provided to ApplyScale must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := json.Marshal(scale)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &v1beta1.Scale{}
+ err = c.client.Patch(types.ApplyPatchType).
+ Namespace(c.ns).
+ Resource("replicasets").
+ Name(replicaSetName).
+ SubResource("scale").
+ VersionedParams(&patchOpts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
new file mode 100644
index 000000000..cd1aac9c2
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction.go
@@ -0,0 +1,48 @@
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ rest "k8s.io/client-go/rest"
+)
+
+// EvictionsGetter has a method to return a EvictionInterface.
+// A group's client should implement this interface.
+type EvictionsGetter interface {
+ Evictions(namespace string) EvictionInterface
+}
+
+// EvictionInterface has methods to work with Eviction resources.
+type EvictionInterface interface {
+ EvictionExpansion
+}
+
+// evictions implements EvictionInterface
+type evictions struct {
+ client rest.Interface
+ ns string
+}
+
+// newEvictions returns a Evictions
+func newEvictions(c *PolicyV1Client, namespace string) *evictions {
+ return &evictions{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go
new file mode 100644
index 000000000..853187feb
--- /dev/null
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/eviction_expansion.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ policy "k8s.io/api/policy/v1"
+)
+
+// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface.
+type EvictionExpansion interface {
+ Evict(ctx context.Context, eviction *policy.Eviction) error
+}
+
+func (c *evictions) Evict(ctx context.Context, eviction *policy.Eviction) error {
+ return c.client.Post().
+ AbsPath("/api/v1").
+ Namespace(eviction.Namespace).
+ Resource("pods").
+ Name(eviction.Name).
+ SubResource("eviction").
+ Body(eviction).
+ Do(ctx).
+ Error()
+}
diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go
index bb05a686a..ecd29deb8 100644
--- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go
+++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1/policy_client.go
@@ -26,6 +26,7 @@ import (
type PolicyV1Interface interface {
RESTClient() rest.Interface
+ EvictionsGetter
PodDisruptionBudgetsGetter
}
@@ -34,6 +35,10 @@ type PolicyV1Client struct {
restClient rest.Interface
}
+func (c *PolicyV1Client) Evictions(namespace string) EvictionInterface {
+ return newEvictions(c, namespace)
+}
+
func (c *PolicyV1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface {
return newPodDisruptionBudgets(c, namespace)
}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go
new file mode 100644
index 000000000..9040bb9a4
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/install/install.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package install installs the experimental API group, making it available as
+// an option to all of the API encoding/decoding machinery.
+package install
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ "k8s.io/client-go/pkg/apis/clientauthentication"
+ "k8s.io/client-go/pkg/apis/clientauthentication/v1"
+ "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
+ "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
+)
+
+// Install registers the API group and adds types to a scheme
+func Install(scheme *runtime.Scheme) {
+ utilruntime.Must(clientauthentication.AddToScheme(scheme))
+ utilruntime.Must(v1.AddToScheme(scheme))
+ utilruntime.Must(v1beta1.AddToScheme(scheme))
+ utilruntime.Must(v1alpha1.AddToScheme(scheme))
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
index c10899792..8daaa3f8f 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go
@@ -47,7 +47,7 @@ type ExecCredentialSpec struct {
Response *Response
// Interactive is true when the transport detects the command is being called from an
- // interactive prompt.
+ // interactive prompt, i.e., when stdin has been passed to this exec plugin.
// +optional
Interactive bool
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go
new file mode 100644
index 000000000..5c5f70d25
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/conversion.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/conversion"
+ "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+ // This conversion intentionally omits the Response field, which were only
+ // supported in v1alpha1.
+ return autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in, out, s)
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
new file mode 100644
index 000000000..94ca35c2c
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication
+// +k8s:openapi-gen=true
+// +k8s:defaulter-gen=TypeMeta
+
+// +groupName=client.authentication.k8s.io
+
+package v1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1"
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go
new file mode 100644
index 000000000..bfc719a42
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// GroupName is the group name use in this package
+const GroupName = "client.authentication.k8s.io"
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder runtime.SchemeBuilder
+ localSchemeBuilder = &SchemeBuilder
+ AddToScheme = localSchemeBuilder.AddToScheme
+)
+
+func init() {
+ // We only register manually written functions here. The registration of the
+ // generated functions takes place in the generated files. The separation
+ // makes the code compile even when the generated files are missing.
+ localSchemeBuilder.Register(addKnownTypes)
+}
+
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &ExecCredential{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go
new file mode 100644
index 000000000..0c4754dd2
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/types.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ExecCredential is used by exec-based plugins to communicate credentials to
+// HTTP transports.
+type ExecCredential struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec holds information passed to the plugin by the transport.
+ Spec ExecCredentialSpec `json:"spec,omitempty"`
+
+ // Status is filled in by the plugin and holds the credentials that the transport
+ // should use to contact the API.
+ // +optional
+ Status *ExecCredentialStatus `json:"status,omitempty"`
+}
+
+// ExecCredentialSpec holds request and runtime specific information provided by
+// the transport.
+type ExecCredentialSpec struct {
+ // Cluster contains information to allow an exec plugin to communicate with the
+ // kubernetes cluster being authenticated to. Note that Cluster is non-nil only
+ // when provideClusterInfo is set to true in the exec provider config (i.e.,
+ // ExecConfig.ProvideClusterInfo).
+ // +optional
+ Cluster *Cluster `json:"cluster,omitempty"`
+
+ // Interactive declares whether stdin has been passed to this exec plugin.
+ Interactive bool `json:"interactive"`
+}
+
+// ExecCredentialStatus holds credentials for the transport to use.
+//
+// Token and ClientKeyData are sensitive fields. This data should only be
+// transmitted in-memory between client and exec plugin process. Exec plugin
+// itself should at least be protected via file permissions.
+type ExecCredentialStatus struct {
+ // ExpirationTimestamp indicates a time when the provided credentials expire.
+ // +optional
+ ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"`
+ // Token is a bearer token used by the client for request authentication.
+ Token string `json:"token,omitempty" datapolicy:"token"`
+ // PEM-encoded client TLS certificates (including intermediates, if any).
+ ClientCertificateData string `json:"clientCertificateData,omitempty"`
+ // PEM-encoded private key for the above certificate.
+ ClientKeyData string `json:"clientKeyData,omitempty" datapolicy:"security-key"`
+}
+
+// Cluster contains information to allow an exec plugin to communicate
+// with the kubernetes cluster being authenticated to.
+//
+// To ensure that this struct contains everything someone would need to communicate
+// with a kubernetes cluster (just like they would via a kubeconfig), the fields
+// should shadow "k8s.io/client-go/tools/clientcmd/api/v1".Cluster, with the exception
+// of CertificateAuthority, since CA data will always be passed to the plugin as bytes.
+type Cluster struct {
+ // Server is the address of the kubernetes cluster (https://hostname:port).
+ Server string `json:"server"`
+ // TLSServerName is passed to the server for SNI and is used in the client to
+ // check server certificates against. If ServerName is empty, the hostname
+ // used to contact the server is used.
+ // +optional
+ TLSServerName string `json:"tls-server-name,omitempty"`
+ // InsecureSkipTLSVerify skips the validity check for the server's certificate.
+ // This will make your HTTPS connections insecure.
+ // +optional
+ InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"`
+ // CAData contains PEM-encoded certificate authority certificates.
+ // If empty, system roots should be used.
+ // +listType=atomic
+ // +optional
+ CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"`
+ // ProxyURL is the URL to the proxy to be used for all requests to this
+ // cluster.
+ // +optional
+ ProxyURL string `json:"proxy-url,omitempty"`
+ // Config holds additional config data that is specific to the exec
+ // plugin with regards to the cluster being authenticated to.
+ //
+ // This data is sourced from the clientcmd Cluster object's
+ // extensions[client.authentication.k8s.io/exec] field:
+ //
+ // clusters:
+ // - name: my-cluster
+ // cluster:
+ // ...
+ // extensions:
+ // - name: client.authentication.k8s.io/exec # reserved extension name for per cluster exec config
+ // extension:
+ // audience: 06e3fbd18de8 # arbitrary config
+ //
+ // In some environments, the user config may be exactly the same across many clusters
+ // (i.e. call this exec plugin) minus some details that are specific to each cluster
+ // such as the audience. This field allows the per cluster config to be directly
+ // specified with the cluster info. Using this field to store secret data is not
+ // recommended as one of the prime benefits of exec plugins is that no secrets need
+ // to be stored directly in the kubeconfig.
+ // +optional
+ Config runtime.RawExtension `json:"config,omitempty"`
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go
new file mode 100644
index 000000000..39e7ef259
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.conversion.go
@@ -0,0 +1,200 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by conversion-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ unsafe "unsafe"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ conversion "k8s.io/apimachinery/pkg/conversion"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication"
+)
+
+func init() {
+ localSchemeBuilder.Register(RegisterConversions)
+}
+
+// RegisterConversions adds conversion functions to the given scheme.
+// Public to allow building arbitrary schemes.
+func RegisterConversions(s *runtime.Scheme) error {
+ if err := s.AddGeneratedConversionFunc((*Cluster)(nil), (*clientauthentication.Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_Cluster_To_clientauthentication_Cluster(a.(*Cluster), b.(*clientauthentication.Cluster), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.Cluster)(nil), (*Cluster)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_Cluster_To_v1_Cluster(a.(*clientauthentication.Cluster), b.(*Cluster), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope)
+ }); err != nil {
+ return err
+ }
+ if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
+ return Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope)
+ }); err != nil {
+ return err
+ }
+ return nil
+}
+
+func autoConvert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error {
+ out.Server = in.Server
+ out.TLSServerName = in.TLSServerName
+ out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+ out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+ out.ProxyURL = in.ProxyURL
+ if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Config, &out.Config, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_v1_Cluster_To_clientauthentication_Cluster is an autogenerated conversion function.
+func Convert_v1_Cluster_To_clientauthentication_Cluster(in *Cluster, out *clientauthentication.Cluster, s conversion.Scope) error {
+ return autoConvert_v1_Cluster_To_clientauthentication_Cluster(in, out, s)
+}
+
+func autoConvert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error {
+ out.Server = in.Server
+ out.TLSServerName = in.TLSServerName
+ out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify
+ out.CertificateAuthorityData = *(*[]byte)(unsafe.Pointer(&in.CertificateAuthorityData))
+ out.ProxyURL = in.ProxyURL
+ if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Config, &out.Config, s); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Convert_clientauthentication_Cluster_To_v1_Cluster is an autogenerated conversion function.
+func Convert_clientauthentication_Cluster_To_v1_Cluster(in *clientauthentication.Cluster, out *Cluster, s conversion.Scope) error {
+ return autoConvert_clientauthentication_Cluster_To_v1_Cluster(in, out, s)
+}
+
+func autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+ if err := Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status))
+ return nil
+}
+
+// Convert_v1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function.
+func Convert_v1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error {
+ return autoConvert_v1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+ if err := Convert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil {
+ return err
+ }
+ out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status))
+ return nil
+}
+
+// Convert_clientauthentication_ExecCredential_To_v1_ExecCredential is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredential_To_v1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error {
+ return autoConvert_clientauthentication_ExecCredential_To_v1_ExecCredential(in, out, s)
+}
+
+func autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+ if in.Cluster != nil {
+ in, out := &in.Cluster, &out.Cluster
+ *out = new(clientauthentication.Cluster)
+ if err := Convert_v1_Cluster_To_clientauthentication_Cluster(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cluster = nil
+ }
+ out.Interactive = in.Interactive
+ return nil
+}
+
+// Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function.
+func Convert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error {
+ return autoConvert_v1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialSpec_To_v1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
+ // WARNING: in.Response requires manual conversion: does not exist in peer-type
+ out.Interactive = in.Interactive
+ if in.Cluster != nil {
+ in, out := &in.Cluster, &out.Cluster
+ *out = new(Cluster)
+ if err := Convert_clientauthentication_Cluster_To_v1_Cluster(*in, *out, s); err != nil {
+ return err
+ }
+ } else {
+ out.Cluster = nil
+ }
+ return nil
+}
+
+func autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+ out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+ out.Token = in.Token
+ out.ClientCertificateData = in.ClientCertificateData
+ out.ClientKeyData = in.ClientKeyData
+ return nil
+}
+
+// Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error {
+ return autoConvert_v1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s)
+}
+
+func autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+ out.ExpirationTimestamp = (*metav1.Time)(unsafe.Pointer(in.ExpirationTimestamp))
+ out.Token = in.Token
+ out.ClientCertificateData = in.ClientCertificateData
+ out.ClientKeyData = in.ClientKeyData
+ return nil
+}
+
+// Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus is an autogenerated conversion function.
+func Convert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error {
+ return autoConvert_clientauthentication_ExecCredentialStatus_To_v1_ExecCredentialStatus(in, out, s)
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..60ab25d81
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.deepcopy.go
@@ -0,0 +1,119 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Cluster) DeepCopyInto(out *Cluster) {
+ *out = *in
+ if in.CertificateAuthorityData != nil {
+ in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData
+ *out = make([]byte, len(*in))
+ copy(*out, *in)
+ }
+ in.Config.DeepCopyInto(&out.Config)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster.
+func (in *Cluster) DeepCopy() *Cluster {
+ if in == nil {
+ return nil
+ }
+ out := new(Cluster)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredential) DeepCopyInto(out *ExecCredential) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.Spec.DeepCopyInto(&out.Spec)
+ if in.Status != nil {
+ in, out := &in.Status, &out.Status
+ *out = new(ExecCredentialStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential.
+func (in *ExecCredential) DeepCopy() *ExecCredential {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecCredential)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExecCredential) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) {
+ *out = *in
+ if in.Cluster != nil {
+ in, out := &in.Cluster, &out.Cluster
+ *out = new(Cluster)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec.
+func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecCredentialSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) {
+ *out = *in
+ if in.ExpirationTimestamp != nil {
+ in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp
+ *out = (*in).DeepCopy()
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus.
+func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ExecCredentialStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go
new file mode 100644
index 000000000..cce2e603a
--- /dev/null
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1/zz_generated.defaults.go
@@ -0,0 +1,32 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ return nil
+}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
index 441b7c44b..6741114dd 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go
@@ -22,7 +22,7 @@ import (
)
func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
- // This conversion intentionally omits the Response and Interactive fields, which were only
+ // This conversion intentionally omits the Response field, which were only
// supported in v1alpha1.
return autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in, out, s)
}
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
index fabc6f65e..714b0273a 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go
@@ -46,6 +46,9 @@ type ExecCredentialSpec struct {
// ExecConfig.ProvideClusterInfo).
// +optional
Cluster *Cluster `json:"cluster,omitempty"`
+
+ // Interactive declares whether stdin has been passed to this exec plugin.
+ Interactive bool `json:"interactive"`
}
// ExecCredentialStatus holds credentials for the transport to use.
diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
index 90f7935fe..8daed8052 100644
--- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
+++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go
@@ -149,6 +149,7 @@ func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredenti
} else {
out.Cluster = nil
}
+ out.Interactive = in.Interactive
return nil
}
@@ -159,7 +160,7 @@ func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSp
func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error {
// WARNING: in.Response requires manual conversion: does not exist in peer-type
- // WARNING: in.Interactive requires manual conversion: does not exist in peer-type
+ out.Interactive = in.Interactive
if in.Cluster != nil {
in, out := &in.Cluster, &out.Cluster
*out = new(Cluster)
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
index b23e57dde..7a0984626 100644
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go
@@ -35,15 +35,15 @@ import (
"github.com/davecgh/go-spew/spew"
"golang.org/x/term"
- v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/clock"
- utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/pkg/apis/clientauthentication"
- "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
- "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
+ "k8s.io/client-go/pkg/apis/clientauthentication/install"
+ clientauthenticationv1 "k8s.io/client-go/pkg/apis/clientauthentication/v1"
+ clientauthenticationv1alpha1 "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1"
+ clientauthenticationv1beta1 "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1"
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/metrics"
"k8s.io/client-go/transport"
@@ -63,10 +63,7 @@ var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
func init() {
- v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
- utilruntime.Must(v1alpha1.AddToScheme(scheme))
- utilruntime.Must(v1beta1.AddToScheme(scheme))
- utilruntime.Must(clientauthentication.AddToScheme(scheme))
+ install.Install(scheme)
}
var (
@@ -75,8 +72,9 @@ var (
globalCache = newCache()
// The list of API versions we accept.
apiVersions = map[string]schema.GroupVersion{
- v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion,
- v1beta1.SchemeGroupVersion.String(): v1beta1.SchemeGroupVersion,
+ clientauthenticationv1alpha1.SchemeGroupVersion.String(): clientauthenticationv1alpha1.SchemeGroupVersion,
+ clientauthenticationv1beta1.SchemeGroupVersion.String(): clientauthenticationv1beta1.SchemeGroupVersion,
+ clientauthenticationv1.SchemeGroupVersion.String(): clientauthenticationv1.SchemeGroupVersion,
}
)
@@ -162,10 +160,10 @@ func (s *sometimes) Do(f func()) {
// GetAuthenticator returns an exec-based plugin for providing client credentials.
func GetAuthenticator(config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) {
- return newAuthenticator(globalCache, config, cluster)
+ return newAuthenticator(globalCache, term.IsTerminal, config, cluster)
}
-func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) {
+func newAuthenticator(c *cache, isTerminalFunc func(int) bool, config *api.ExecConfig, cluster *clientauthentication.Cluster) (*Authenticator, error) {
key := cacheKey(config, cluster)
if a, ok := c.get(key); ok {
return a, nil
@@ -196,11 +194,11 @@ func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentic
clock: clock.RealClock{},
},
- stdin: os.Stdin,
- stderr: os.Stderr,
- interactive: term.IsTerminal(int(os.Stdin.Fd())),
- now: time.Now,
- environ: os.Environ,
+ stdin: os.Stdin,
+ stderr: os.Stderr,
+ interactiveFunc: func() (bool, error) { return isInteractive(isTerminalFunc, config) },
+ now: time.Now,
+ environ: os.Environ,
defaultDialer: defaultDialer,
connTracker: connTracker,
@@ -213,6 +211,33 @@ func newAuthenticator(c *cache, config *api.ExecConfig, cluster *clientauthentic
return c.put(key, a), nil
}
+func isInteractive(isTerminalFunc func(int) bool, config *api.ExecConfig) (bool, error) {
+ var shouldBeInteractive bool
+ switch config.InteractiveMode {
+ case api.NeverExecInteractiveMode:
+ shouldBeInteractive = false
+ case api.IfAvailableExecInteractiveMode:
+ shouldBeInteractive = !config.StdinUnavailable && isTerminalFunc(int(os.Stdin.Fd()))
+ case api.AlwaysExecInteractiveMode:
+ if !isTerminalFunc(int(os.Stdin.Fd())) {
+ return false, errors.New("standard input is not a terminal")
+ }
+ if config.StdinUnavailable {
+ suffix := ""
+ if len(config.StdinUnavailableMessage) > 0 {
+ // only print extra ": " if the user actually specified a message
+ suffix = fmt.Sprintf(": %s", config.StdinUnavailableMessage)
+ }
+ return false, fmt.Errorf("standard input is unavailable%s", suffix)
+ }
+ shouldBeInteractive = true
+ default:
+ return false, fmt.Errorf("unknown interactiveMode: %q", config.InteractiveMode)
+ }
+
+ return shouldBeInteractive, nil
+}
+
// Authenticator is a client credential provider that rotates credentials by executing a plugin.
// The plugin input and output are defined by the API group client.authentication.k8s.io.
type Authenticator struct {
@@ -231,11 +256,11 @@ type Authenticator struct {
installHint string
// Stubbable for testing
- stdin io.Reader
- stderr io.Writer
- interactive bool
- now func() time.Time
- environ func() []string
+ stdin io.Reader
+ stderr io.Writer
+ interactiveFunc func() (bool, error)
+ now func() time.Time
+ environ func() []string
// defaultDialer is used for clients which don't specify a custom dialer
defaultDialer *connrotation.Dialer
@@ -263,8 +288,9 @@ func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error {
// setting up the transport, as that triggers the exec action if the server is
// also configured to allow client certificates for authentication. For requests
// like "kubectl get --token (token) pods" we should assume the intention is to
- // use the provided token for authentication.
- if c.HasTokenAuth() {
+ // use the provided token for authentication. The same can be said for when the
+ // user specifies basic auth.
+ if c.HasTokenAuth() || c.HasBasicAuth() {
return nil
}
@@ -375,10 +401,15 @@ func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentic
// refreshCredsLocked executes the plugin and reads the credentials from
// stdout. It must be called while holding the Authenticator's mutex.
func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error {
+ interactive, err := a.interactiveFunc()
+ if err != nil {
+ return fmt.Errorf("exec plugin cannot support interactive mode: %w", err)
+ }
+
cred := &clientauthentication.ExecCredential{
Spec: clientauthentication.ExecCredentialSpec{
Response: r,
- Interactive: a.interactive,
+ Interactive: interactive,
},
}
if a.provideClusterInfo {
@@ -397,7 +428,7 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
cmd.Env = env
cmd.Stderr = a.stderr
cmd.Stdout = stdout
- if a.interactive {
+ if interactive {
cmd.Stdin = a.stdin
}
@@ -461,7 +492,7 @@ func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) err
if oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) {
// Can be nil if the exec auth plugin only returned token auth.
if oldCreds.cert != nil && oldCreds.cert.Leaf != nil {
- metrics.ClientCertRotationAge.Observe(time.Now().Sub(oldCreds.cert.Leaf.NotBefore))
+ metrics.ClientCertRotationAge.Observe(time.Since(oldCreds.cert.Leaf.NotBefore))
}
a.connTracker.CloseAll()
}
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
index 3a2cc251a..51210975a 100644
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/metrics.go
@@ -18,6 +18,7 @@ package exec
import (
"errors"
+ "io/fs"
"os/exec"
"reflect"
"sync"
@@ -92,6 +93,7 @@ func (c *certificateExpirationTracker) set(a *Authenticator, t time.Time) {
func incrementCallsMetric(err error) {
execExitError := &exec.ExitError{}
execError := &exec.Error{}
+ pathError := &fs.PathError{}
switch {
case err == nil: // Binary execution succeeded.
metrics.ExecPluginCalls.Increment(successExitCode, noError)
@@ -99,7 +101,7 @@ func incrementCallsMetric(err error) {
case errors.As(err, &execExitError): // Binary execution failed (see "os/exec".Cmd.Run()).
metrics.ExecPluginCalls.Increment(execExitError.ExitCode(), pluginExecutionError)
- case errors.As(err, &execError): // Binary does not exist (see exec.Error).
+ case errors.As(err, &execError), errors.As(err, &pathError): // Binary does not exist (see exec.Error, fs.PathError).
metrics.ExecPluginCalls.Increment(failureExitCode, pluginNotFoundError)
default: // We don't know about this error type.
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go
index ee1bfdef3..7ec872e83 100644
--- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go
@@ -113,7 +113,16 @@ type gcpAuthProvider struct {
persister restclient.AuthProviderConfigPersister
}
+var warnOnce sync.Once
+
func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
+ // deprecated in v1.22, remove in v1.25
+ // this should be updated to use klog.Warningf in v1.24 to more actively warn consumers
+ warnOnce.Do(func() {
+ klog.V(1).Infof(`WARNING: the gcp auth plugin is deprecated in v1.22+, unavailable in v1.25+; use gcloud instead.
+To learn more, consult https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins`)
+ })
+
ts, err := tokenSource(isCmdTokenSource(gcpConfig), gcpConfig)
if err != nil {
return nil, err
diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go
index 0ac0e8eab..e5a8100be 100644
--- a/vendor/k8s.io/client-go/rest/request.go
+++ b/vendor/k8s.io/client-go/rest/request.go
@@ -93,7 +93,6 @@ type Request struct {
rateLimiter flowcontrol.RateLimiter
backoff BackoffManager
timeout time.Duration
- maxRetries int
// generic components accessible via method setters
verb string
@@ -110,8 +109,9 @@ type Request struct {
subresource string
// output
- err error
- body io.Reader
+ err error
+ body io.Reader
+ retry WithRetry
}
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
@@ -142,7 +142,7 @@ func NewRequest(c *RESTClient) *Request {
backoff: backoff,
timeout: timeout,
pathPrefix: pathPrefix,
- maxRetries: 10,
+ retry: &withRetry{maxRetries: 10},
warningHandler: c.warningHandler,
}
@@ -408,10 +408,7 @@ func (r *Request) Timeout(d time.Duration) *Request {
// function is specifically called with a different value.
// A zero maxRetries prevent it from doing retires and return an error immediately.
func (r *Request) MaxRetries(maxRetries int) *Request {
- if maxRetries < 0 {
- maxRetries = 0
- }
- r.maxRetries = maxRetries
+ r.retry.SetMaxRetries(maxRetries)
return r
}
@@ -602,7 +599,7 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err
if latency > extraLongThrottleLatency {
// If the rate limiter latency is very high, the log message should be printed at a higher log level,
// but we use a throttled logger to prevent spamming.
- globalThrottledLogger.Infof(message)
+ globalThrottledLogger.Infof("%s", message)
}
metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency)
@@ -678,43 +675,88 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
return nil, r.err
}
- url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, r.body)
- if err != nil {
- return nil, err
- }
- req = req.WithContext(ctx)
- req.Header = r.headers
client := r.c.Client
if client == nil {
client = http.DefaultClient
}
- r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
- resp, err := client.Do(req)
- updateURLMetrics(ctx, r, resp, err)
- if r.c.base != nil {
- if err != nil {
- r.backoff.UpdateBackoff(r.c.base, err, 0)
- } else {
- r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode)
- }
- }
- if err != nil {
+
+ isErrRetryableFunc := func(request *http.Request, err error) bool {
// The watch stream mechanism handles many common partial data errors, so closed
// connections can be retried in many cases.
if net.IsProbableEOF(err) || net.IsTimeout(err) {
- return watch.NewEmptyWatch(), nil
+ return true
}
- return nil, err
+ return false
}
- if resp.StatusCode != http.StatusOK {
- defer resp.Body.Close()
- if result := r.transformResponse(resp, req); result.err != nil {
- return nil, result.err
+ var retryAfter *RetryAfter
+ url := r.URL().String()
+ for {
+ req, err := r.newHTTPRequest(ctx)
+ if err != nil {
+ return nil, err
}
- return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
- }
+ r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
+ if retryAfter != nil {
+ // We are retrying the request that we already send to apiserver
+ // at least once before.
+ // This request should also be throttled with the client-internal rate limiter.
+ if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
+ return nil, err
+ }
+ retryAfter = nil
+ }
+
+ resp, err := client.Do(req)
+ updateURLMetrics(ctx, r, resp, err)
+ if r.c.base != nil {
+ if err != nil {
+ r.backoff.UpdateBackoff(r.c.base, err, 0)
+ } else {
+ r.backoff.UpdateBackoff(r.c.base, err, resp.StatusCode)
+ }
+ }
+ if err == nil && resp.StatusCode == http.StatusOK {
+ return r.newStreamWatcher(resp)
+ }
+
+ done, transformErr := func() (bool, error) {
+ defer readAndCloseResponseBody(resp)
+
+ var retry bool
+ retryAfter, retry = r.retry.NextRetry(req, resp, err, isErrRetryableFunc)
+ if retry {
+ err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
+ if err == nil {
+ return false, nil
+ }
+ klog.V(4).Infof("Could not retry request - %v", err)
+ }
+
+ if resp == nil {
+ // the server must have sent us an error in 'err'
+ return true, nil
+ }
+ if result := r.transformResponse(resp, req); result.err != nil {
+ return true, result.err
+ }
+ return true, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
+ }()
+ if done {
+ if isErrRetryableFunc(req, err) {
+ return watch.NewEmptyWatch(), nil
+ }
+ if err == nil {
+ // if the server sent us an HTTP Response object,
+ // we need to return the error object from that.
+ err = transformErr
+ }
+ return nil, err
+ }
+ }
+}
+
+func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, error) {
contentType := resp.Header.Get("Content-Type")
mediaType, params, err := mime.ParseMediaType(contentType)
if err != nil {
@@ -769,49 +811,75 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
return nil, err
}
- url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, nil)
- if err != nil {
- return nil, err
- }
- if r.body != nil {
- req.Body = ioutil.NopCloser(r.body)
- }
- req = req.WithContext(ctx)
- req.Header = r.headers
client := r.c.Client
if client == nil {
client = http.DefaultClient
}
- r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
- resp, err := client.Do(req)
- updateURLMetrics(ctx, r, resp, err)
- if r.c.base != nil {
+
+ var retryAfter *RetryAfter
+ url := r.URL().String()
+ for {
+ req, err := r.newHTTPRequest(ctx)
if err != nil {
- r.backoff.UpdateBackoff(r.URL(), err, 0)
- } else {
- r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
+ return nil, err
}
- }
- if err != nil {
- return nil, err
- }
-
- switch {
- case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
- handleWarnings(resp.Header, r.warningHandler)
- return resp.Body, nil
-
- default:
- // ensure we close the body before returning the error
- defer resp.Body.Close()
-
- result := r.transformResponse(resp, req)
- err := result.Error()
- if err == nil {
- err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
+ if r.body != nil {
+ req.Body = ioutil.NopCloser(r.body)
+ }
+
+ r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
+ if retryAfter != nil {
+ // We are retrying the request that we already send to apiserver
+ // at least once before.
+ // This request should also be throttled with the client-internal rate limiter.
+ if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
+ return nil, err
+ }
+ retryAfter = nil
+ }
+
+ resp, err := client.Do(req)
+ updateURLMetrics(ctx, r, resp, err)
+ if r.c.base != nil {
+ if err != nil {
+ r.backoff.UpdateBackoff(r.URL(), err, 0)
+ } else {
+ r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
+ }
+ }
+ if err != nil {
+ // we only retry on an HTTP response with 'Retry-After' header
+ return nil, err
+ }
+
+ switch {
+ case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
+ handleWarnings(resp.Header, r.warningHandler)
+ return resp.Body, nil
+
+ default:
+ done, transformErr := func() (bool, error) {
+ defer resp.Body.Close()
+
+ var retry bool
+ retryAfter, retry = r.retry.NextRetry(req, resp, err, neverRetryError)
+ if retry {
+ err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, url, r.body)
+ if err == nil {
+ return false, nil
+ }
+ klog.V(4).Infof("Could not retry request - %v", err)
+ }
+ result := r.transformResponse(resp, req)
+ if err := result.Error(); err != nil {
+ return true, err
+ }
+ return true, fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body))
+ }()
+ if done {
+ return nil, transformErr
+ }
}
- return nil, err
}
}
@@ -842,6 +910,17 @@ func (r *Request) requestPreflightCheck() error {
return nil
}
+func (r *Request) newHTTPRequest(ctx context.Context) (*http.Request, error) {
+ url := r.URL().String()
+ req, err := http.NewRequest(r.verb, url, r.body)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ req.Header = r.headers
+ return req, nil
+}
+
// request connects to the server and invokes the provided function when a server response is
// received. It handles retry behavior and up front validation of requests. It will invoke
// fn at most once. It will return an error if a problem occurred prior to connecting to the
@@ -881,27 +960,22 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
}
// Right now we make about ten retry attempts if we get a Retry-After response.
- retries := 0
- var retryInfo string
+ var retryAfter *RetryAfter
for {
-
- url := r.URL().String()
- req, err := http.NewRequest(r.verb, url, r.body)
+ req, err := r.newHTTPRequest(ctx)
if err != nil {
return err
}
- req = req.WithContext(ctx)
- req.Header = r.headers
r.backoff.Sleep(r.backoff.CalculateBackoff(r.URL()))
- if retries > 0 {
+ if retryAfter != nil {
// We are retrying the request that we already send to apiserver
// at least once before.
// This request should also be throttled with the client-internal rate limiter.
- if err := r.tryThrottleWithInfo(ctx, retryInfo); err != nil {
+ if err := r.tryThrottleWithInfo(ctx, retryAfter.Reason); err != nil {
return err
}
- retryInfo = ""
+ retryAfter = nil
}
resp, err := client.Do(req)
updateURLMetrics(ctx, r, resp, err)
@@ -910,61 +984,45 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
} else {
r.backoff.UpdateBackoff(r.URL(), err, resp.StatusCode)
}
- if err != nil {
- // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
- // Thus in case of "GET" operations, we simply retry it.
- // We are not automatically retrying "write" operations, as
- // they are not idempotent.
- if r.verb != "GET" {
- return err
- }
- // For connection errors and apiserver shutdown errors retry.
- if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
- // For the purpose of retry, we set the artificial "retry-after" response.
- // TODO: Should we clean the original response if it exists?
- resp = &http.Response{
- StatusCode: http.StatusInternalServerError,
- Header: http.Header{"Retry-After": []string{"1"}},
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- } else {
- return err
- }
- }
done := func() bool {
- // Ensure the response body is fully read and closed
- // before we reconnect, so that we reuse the same TCP
- // connection.
- defer func() {
- const maxBodySlurpSize = 2 << 10
- if resp.ContentLength <= maxBodySlurpSize {
- io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
- }
- resp.Body.Close()
- }()
+ defer readAndCloseResponseBody(resp)
- retries++
- if seconds, wait := checkWait(resp); wait && retries <= r.maxRetries {
- retryInfo = getRetryReason(retries, seconds, resp, err)
- if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
- _, err := seeker.Seek(0, 0)
- if err != nil {
- klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
- fn(req, resp)
- return true
- }
+ // if the the server returns an error in err, the response will be nil.
+ f := func(req *http.Request, resp *http.Response) {
+ if resp == nil {
+ return
}
-
- klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
- r.backoff.Sleep(time.Duration(seconds) * time.Second)
- return false
+ fn(req, resp)
}
- fn(req, resp)
+
+ var retry bool
+ retryAfter, retry = r.retry.NextRetry(req, resp, err, func(req *http.Request, err error) bool {
+ // "Connection reset by peer" or "apiserver is shutting down" are usually a transient errors.
+ // Thus in case of "GET" operations, we simply retry it.
+ // We are not automatically retrying "write" operations, as they are not idempotent.
+ if r.verb != "GET" {
+ return false
+ }
+ // For connection errors and apiserver shutdown errors retry.
+ if net.IsConnectionReset(err) || net.IsProbableEOF(err) {
+ return true
+ }
+ return false
+ })
+ if retry {
+ err := r.retry.BeforeNextRetry(ctx, r.backoff, retryAfter, req.URL.String(), r.body)
+ if err == nil {
+ return false
+ }
+ klog.V(4).Infof("Could not retry request - %v", err)
+ }
+
+ f(req, resp)
return true
}()
if done {
- return nil
+ return err
}
}
}
@@ -1019,13 +1077,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
// 3. Apiserver closes connection.
// 4. client-go should catch this and return an error.
klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
- streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
+ streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err)
return Result{
err: streamErr,
}
default:
klog.Errorf("Unexpected error when reading response body: %v", err)
- unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %v", err)
+ unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err)
return Result{
err: unexpectedErr,
}
@@ -1196,19 +1254,6 @@ func isTextResponse(resp *http.Response) bool {
return strings.HasPrefix(media, "text/")
}
-// checkWait returns true along with a number of seconds if the server instructed us to wait
-// before retrying.
-func checkWait(resp *http.Response) (int, bool) {
- switch r := resp.StatusCode; {
- // any 500 error code and 429 can trigger a wait
- case r == http.StatusTooManyRequests, r >= 500:
- default:
- return 0, false
- }
- i, ok := retryAfterSeconds(resp)
- return i, ok
-}
-
// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if
// the header was missing or not a valid number.
func retryAfterSeconds(resp *http.Response) (int, bool) {
@@ -1220,26 +1265,6 @@ func retryAfterSeconds(resp *http.Response) (int, bool) {
return 0, false
}
-func getRetryReason(retries, seconds int, resp *http.Response, err error) string {
- // priority and fairness sets the UID of the FlowSchema associated with a request
- // in the following response Header.
- const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
-
- message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds)
-
- switch {
- case resp.StatusCode == http.StatusTooManyRequests:
- // it is server-side throttling from priority and fairness
- flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID)
- return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID)
- case err != nil:
- // it's a retriable error
- return fmt.Sprintf("%s - retry-reason: due to retriable error, error: %v", message, err)
- default:
- return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode)
- }
-}
-
// Result contains the result of calling Request.Do().
type Result struct {
body []byte
diff --git a/vendor/k8s.io/client-go/rest/with_retry.go b/vendor/k8s.io/client-go/rest/with_retry.go
new file mode 100644
index 000000000..1b7360b53
--- /dev/null
+++ b/vendor/k8s.io/client-go/rest/with_retry.go
@@ -0,0 +1,232 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package rest
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "k8s.io/klog/v2"
+)
+
+// IsRetryableErrorFunc allows the client to provide its own function
+// that determines whether the specified err from the server is retryable.
+//
+// request: the original request sent to the server
+// err: the server sent this error to us
+//
+// The function returns true if the error is retryable and the request
+// can be retried, otherwise it returns false.
+// We have four mode of communications - 'Stream', 'Watch', 'Do' and 'DoRaw', this
+// function allows us to customize the retryability aspect of each.
+type IsRetryableErrorFunc func(request *http.Request, err error) bool
+
+func (r IsRetryableErrorFunc) IsErrorRetryable(request *http.Request, err error) bool {
+ return r(request, err)
+}
+
+var neverRetryError = IsRetryableErrorFunc(func(_ *http.Request, _ error) bool {
+ return false
+})
+
+// WithRetry allows the client to retry a request up to a certain number of times
+// Note that WithRetry is not safe for concurrent use by multiple
+// goroutines without additional locking or coordination.
+type WithRetry interface {
+ // SetMaxRetries makes the request use the specified integer as a ceiling
+ // for retries upon receiving a 429 status code and the "Retry-After" header
+ // in the response.
+ // A zero maxRetries should prevent from doing any retry and return immediately.
+ SetMaxRetries(maxRetries int)
+
+ // NextRetry advances the retry counter appropriately and returns true if the
+ // request should be retried, otherwise it returns false if:
+ // - we have already reached the maximum retry threshold.
+ // - the error does not fall into the retryable category.
+ // - the server has not sent us a 429, or 5xx status code and the
+ // 'Retry-After' response header is not set with a value.
+ //
+ // if retry is set to true, retryAfter will contain the information
+ // regarding the next retry.
+ //
+ // request: the original request sent to the server
+ // resp: the response sent from the server, it is set if err is nil
+ // err: the server sent this error to us, if err is set then resp is nil.
+ // f: a IsRetryableErrorFunc function provided by the client that determines
+ // if the err sent by the server is retryable.
+ NextRetry(req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool)
+
+ // BeforeNextRetry is responsible for carrying out operations that need
+ // to be completed before the next retry is initiated:
+ // - if the request context is already canceled there is no need to
+ // retry, the function will return ctx.Err().
+ // - we need to seek to the beginning of the request body before we
+ // initiate the next retry, the function should return an error if
+ // it fails to do so.
+ // - we should wait the number of seconds the server has asked us to
+ // in the 'Retry-After' response header.
+ //
+ // If BeforeNextRetry returns an error the client should abort the retry,
+ // otherwise it is safe to initiate the next retry.
+ BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string, body io.Reader) error
+}
+
+// RetryAfter holds information associated with the next retry.
+type RetryAfter struct {
+ // Wait is the duration the server has asked us to wait before
+ // the next retry is initiated.
+ // This is the value of the 'Retry-After' response header in seconds.
+ Wait time.Duration
+
+ // Attempt is the Nth attempt after which we have received a retryable
+ // error or a 'Retry-After' response header from the server.
+ Attempt int
+
+ // Reason describes why we are retrying the request
+ Reason string
+}
+
+type withRetry struct {
+ maxRetries int
+ attempts int
+}
+
+func (r *withRetry) SetMaxRetries(maxRetries int) {
+ if maxRetries < 0 {
+ maxRetries = 0
+ }
+ r.maxRetries = maxRetries
+}
+
+func (r *withRetry) NextRetry(req *http.Request, resp *http.Response, err error, f IsRetryableErrorFunc) (*RetryAfter, bool) {
+ if req == nil || (resp == nil && err == nil) {
+ // bad input, we do nothing.
+ return nil, false
+ }
+
+ r.attempts++
+ retryAfter := &RetryAfter{Attempt: r.attempts}
+ if r.attempts > r.maxRetries {
+ return retryAfter, false
+ }
+
+ // if the server returned an error, it takes precedence over the http response.
+ var errIsRetryable bool
+ if f != nil && err != nil && f.IsErrorRetryable(req, err) {
+ errIsRetryable = true
+ // we have a retryable error, for which we will create an
+ // artificial "Retry-After" response.
+ resp = retryAfterResponse()
+ }
+ if err != nil && !errIsRetryable {
+ return retryAfter, false
+ }
+
+ // if we are here, we have either a or b:
+ // a: we have a retryable error, for which we already
+ // have an artificial "Retry-After" response.
+ // b: we have a response from the server for which we
+ // need to check if it is retryable
+ seconds, wait := checkWait(resp)
+ if !wait {
+ return retryAfter, false
+ }
+
+ retryAfter.Wait = time.Duration(seconds) * time.Second
+ retryAfter.Reason = getRetryReason(r.attempts, seconds, resp, err)
+ return retryAfter, true
+}
+
+func (r *withRetry) BeforeNextRetry(ctx context.Context, backoff BackoffManager, retryAfter *RetryAfter, url string, body io.Reader) error {
+ // Ensure the response body is fully read and closed before
+ // we reconnect, so that we reuse the same TCP connection.
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ if seeker, ok := body.(io.Seeker); ok && body != nil {
+ if _, err := seeker.Seek(0, 0); err != nil {
+ return fmt.Errorf("can't Seek() back to beginning of body for %T", r)
+ }
+ }
+
+ klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", retryAfter.Wait, retryAfter.Attempt, url)
+ if backoff != nil {
+ backoff.Sleep(retryAfter.Wait)
+ }
+ return nil
+}
+
+// checkWait returns true along with a number of seconds if
+// the server instructed us to wait before retrying.
+func checkWait(resp *http.Response) (int, bool) {
+ switch r := resp.StatusCode; {
+ // any 500 error code and 429 can trigger a wait
+ case r == http.StatusTooManyRequests, r >= 500:
+ default:
+ return 0, false
+ }
+ i, ok := retryAfterSeconds(resp)
+ return i, ok
+}
+
+func getRetryReason(retries, seconds int, resp *http.Response, err error) string {
+ // priority and fairness sets the UID of the FlowSchema
+ // associated with a request in the following response Header.
+ const responseHeaderMatchedFlowSchemaUID = "X-Kubernetes-PF-FlowSchema-UID"
+
+ message := fmt.Sprintf("retries: %d, retry-after: %ds", retries, seconds)
+
+ switch {
+ case resp.StatusCode == http.StatusTooManyRequests:
+ // it is server-side throttling from priority and fairness
+ flowSchemaUID := resp.Header.Get(responseHeaderMatchedFlowSchemaUID)
+ return fmt.Sprintf("%s - retry-reason: due to server-side throttling, FlowSchema UID: %q", message, flowSchemaUID)
+ case err != nil:
+ // it's a retryable error
+ return fmt.Sprintf("%s - retry-reason: due to retryable error, error: %v", message, err)
+ default:
+ return fmt.Sprintf("%s - retry-reason: %d", message, resp.StatusCode)
+ }
+}
+
+func readAndCloseResponseBody(resp *http.Response) {
+ if resp == nil {
+ return
+ }
+
+ // Ensure the response body is fully read and closed
+ // before we reconnect, so that we reuse the same TCP
+ // connection.
+ const maxBodySlurpSize = 2 << 10
+ defer resp.Body.Close()
+
+ if resp.ContentLength <= maxBodySlurpSize {
+ io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize})
+ }
+}
+
+func retryAfterResponse() *http.Response {
+ return &http.Response{
+ StatusCode: http.StatusInternalServerError,
+ Header: http.Header{"Retry-After": []string{"1"}},
+ }
+}
diff --git a/vendor/k8s.io/client-go/testing/fake.go b/vendor/k8s.io/client-go/testing/fake.go
index 8b9ee149c..3ab9c1b07 100644
--- a/vendor/k8s.io/client-go/testing/fake.go
+++ b/vendor/k8s.io/client-go/testing/fake.go
@@ -106,11 +106,15 @@ func (c *Fake) PrependReactor(verb, resource string, reaction ReactionFunc) {
// AddWatchReactor appends a reactor to the end of the chain.
func (c *Fake) AddWatchReactor(resource string, reaction WatchReactionFunc) {
+ c.Lock()
+ defer c.Unlock()
c.WatchReactionChain = append(c.WatchReactionChain, &SimpleWatchReactor{resource, reaction})
}
// PrependWatchReactor adds a reactor to the beginning of the chain.
func (c *Fake) PrependWatchReactor(resource string, reaction WatchReactionFunc) {
+ c.Lock()
+ defer c.Unlock()
c.WatchReactionChain = append([]WatchReactor{&SimpleWatchReactor{resource, reaction}}, c.WatchReactionChain...)
}
diff --git a/vendor/k8s.io/client-go/testing/fixture.go b/vendor/k8s.io/client-go/testing/fixture.go
index d3b937247..fe7f0cd32 100644
--- a/vendor/k8s.io/client-go/testing/fixture.go
+++ b/vendor/k8s.io/client-go/testing/fixture.go
@@ -20,6 +20,7 @@ import (
"fmt"
"reflect"
"sort"
+ "strings"
"sync"
jsonpatch "github.com/evanphx/json-patch"
@@ -399,7 +400,8 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st
if _, ok = t.objects[gvr][namespacedName]; ok {
if replaceExisting {
for _, w := range t.getWatches(gvr, ns) {
- w.Modify(obj)
+ // To avoid the object from being accidentally modified by watcher
+ w.Modify(obj.DeepCopyObject())
}
t.objects[gvr][namespacedName] = obj
return nil
@@ -415,7 +417,8 @@ func (t *tracker) add(gvr schema.GroupVersionResource, obj runtime.Object, ns st
t.objects[gvr][namespacedName] = obj
for _, w := range t.getWatches(gvr, ns) {
- w.Add(obj)
+ // To avoid the object from being accidentally modified by watcher
+ w.Add(obj.DeepCopyObject())
}
return nil
@@ -455,7 +458,7 @@ func (t *tracker) Delete(gvr schema.GroupVersionResource, ns, name string) error
delete(objs, namespacedName)
for _, w := range t.getWatches(gvr, ns) {
- w.Delete(obj)
+ w.Delete(obj.DeepCopyObject())
}
return nil
}
@@ -509,12 +512,8 @@ func (r *SimpleReactor) Handles(action Action) bool {
if !verbCovers {
return false
}
- resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource
- if !resourceCovers {
- return false
- }
- return true
+ return resourceCovers(r.Resource, action)
}
func (r *SimpleReactor) React(action Action) (bool, runtime.Object, error) {
@@ -530,12 +529,7 @@ type SimpleWatchReactor struct {
}
func (r *SimpleWatchReactor) Handles(action Action) bool {
- resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource
- if !resourceCovers {
- return false
- }
-
- return true
+ return resourceCovers(r.Resource, action)
}
func (r *SimpleWatchReactor) React(action Action) (bool, watch.Interface, error) {
@@ -551,14 +545,27 @@ type SimpleProxyReactor struct {
}
func (r *SimpleProxyReactor) Handles(action Action) bool {
- resourceCovers := r.Resource == "*" || r.Resource == action.GetResource().Resource
- if !resourceCovers {
- return false
- }
-
- return true
+ return resourceCovers(r.Resource, action)
}
func (r *SimpleProxyReactor) React(action Action) (bool, restclient.ResponseWrapper, error) {
return r.Reaction(action)
}
+
+func resourceCovers(resource string, action Action) bool {
+ if resource == "*" {
+ return true
+ }
+
+ if resource == action.GetResource().Resource {
+ return true
+ }
+
+ if index := strings.Index(resource, "/"); index != -1 &&
+ resource[:index] == action.GetResource().Resource &&
+ resource[index+1:] == action.GetSubresource() {
+ return true
+ }
+
+ return false
+}
diff --git a/vendor/k8s.io/client-go/testing/interface.go b/vendor/k8s.io/client-go/testing/interface.go
new file mode 100644
index 000000000..266c6ba3f
--- /dev/null
+++ b/vendor/k8s.io/client-go/testing/interface.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package testing
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ restclient "k8s.io/client-go/rest"
+)
+
+type FakeClient interface {
+ // Tracker gives access to the ObjectTracker internal to the fake client.
+ Tracker() ObjectTracker
+
+ // AddReactor appends a reactor to the end of the chain.
+ AddReactor(verb, resource string, reaction ReactionFunc)
+
+ // PrependReactor adds a reactor to the beginning of the chain.
+ PrependReactor(verb, resource string, reaction ReactionFunc)
+
+ // AddWatchReactor appends a reactor to the end of the chain.
+ AddWatchReactor(resource string, reaction WatchReactionFunc)
+
+ // PrependWatchReactor adds a reactor to the beginning of the chain.
+ PrependWatchReactor(resource string, reaction WatchReactionFunc)
+
+ // AddProxyReactor appends a reactor to the end of the chain.
+ AddProxyReactor(resource string, reaction ProxyReactionFunc)
+
+ // PrependProxyReactor adds a reactor to the beginning of the chain.
+ PrependProxyReactor(resource string, reaction ProxyReactionFunc)
+
+ // Invokes records the provided Action and then invokes the ReactionFunc that
+ // handles the action if one exists. defaultReturnObj is expected to be of the
+ // same type a normal call would return.
+ Invokes(action Action, defaultReturnObj runtime.Object) (runtime.Object, error)
+
+ // InvokesWatch records the provided Action and then invokes the ReactionFunc
+ // that handles the action if one exists.
+ InvokesWatch(action Action) (watch.Interface, error)
+
+ // InvokesProxy records the provided Action and then invokes the ReactionFunc
+ // that handles the action if one exists.
+ InvokesProxy(action Action) restclient.ResponseWrapper
+
+ // ClearActions clears the history of actions called on the fake client.
+ ClearActions()
+
+ // Actions returns a chronologically ordered slice fake actions called on the
+ // fake client.
+ Actions() []Action
+}
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go
index 684d1a8d3..fe394d16c 100644
--- a/vendor/k8s.io/client-go/tools/cache/controller.go
+++ b/vendor/k8s.io/client-go/tools/cache/controller.go
@@ -325,7 +325,7 @@ func NewInformer(
return clientState, newInformer(lw, objType, resyncPeriod, h, clientState)
}
-// NewIndexerInformer returns a Indexer and a controller for populating the index
+// NewIndexerInformer returns an Indexer and a Controller for populating the index
// while also providing event notifications. You should only used the returned
// Index for Get/List operations; Add/Modify/Deletes will cause the event
// notifications to be faulty.
diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
index f648673e1..82038e0f9 100644
--- a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go
@@ -153,7 +153,7 @@ const (
// change happened, and the object's state after* that change.
//
// [*] Unless the change is a deletion, and then you'll get the final
-// state of the object before it was deleted.
+// state of the object before it was deleted.
type Delta struct {
Type DeltaType
Object interface{}
@@ -174,9 +174,10 @@ type Deltas []Delta
// modifications.
//
// TODO: consider merging keyLister with this object, tracking a list of
-// "known" keys when Pop() is called. Have to think about how that
-// affects error retrying.
-// NOTE: It is possible to misuse this and cause a race when using an
+// "known" keys when Pop() is called. Have to think about how that
+// affects error retrying.
+//
+// NOTE: It is possible to misuse this and cause a race when using an
// external known object source.
// Whether there is a potential race depends on how the consumer
// modifies knownObjects. In Pop(), process function is called under
@@ -185,8 +186,7 @@ type Deltas []Delta
//
// Example:
// In case of sharedIndexInformer being a consumer
-// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/
-// src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
+// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192),
// there is no race as knownObjects (s.indexer) is modified safely
// under DeltaFIFO's lock. The only exceptions are GetStore() and
// GetIndexer() methods, which expose ways to modify the underlying
@@ -340,7 +340,7 @@ func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error {
if !ok {
return fmt.Errorf("object must be of type deltas, but got: %#v", obj)
}
- id, err := f.KeyOf(deltas.Newest().Object)
+ id, err := f.KeyOf(deltas)
if err != nil {
return KeyError{obj, err}
}
@@ -373,13 +373,8 @@ func dedupDeltas(deltas Deltas) Deltas {
a := &deltas[n-1]
b := &deltas[n-2]
if out := isDup(a, b); out != nil {
- // `a` and `b` are duplicates. Only keep the one returned from isDup().
- // TODO: This extra array allocation and copy seems unnecessary if
- // all we do to dedup is compare the new delta with the last element
- // in `items`, which could be done by mutating `items` directly.
- // Might be worth profiling and investigating if it is safe to optimize.
- d := append(Deltas{}, deltas[:n-2]...)
- return append(d, *out)
+ deltas[n-2] = *out
+ return deltas[:n-1]
}
return deltas
}
diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go
index edb2c8ed6..f82bf22d2 100644
--- a/vendor/k8s.io/client-go/tools/cache/fifo.go
+++ b/vendor/k8s.io/client-go/tools/cache/fifo.go
@@ -263,10 +263,7 @@ func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) {
func (f *FIFO) IsClosed() bool {
f.lock.Lock()
defer f.lock.Unlock()
- if f.closed {
- return true
- }
- return false
+ return f.closed
}
// Pop waits until an item is ready and processes it. If multiple items are
diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go
index e503a45a4..819325e9e 100644
--- a/vendor/k8s.io/client-go/tools/cache/heap.go
+++ b/vendor/k8s.io/client-go/tools/cache/heap.go
@@ -304,10 +304,7 @@ func (h *Heap) GetByKey(key string) (interface{}, bool, error) {
func (h *Heap) IsClosed() bool {
h.lock.RLock()
defer h.lock.RUnlock()
- if h.closed {
- return true
- }
- return false
+ return h.closed
}
// NewHeap returns a Heap which can be used to queue up items to process.
diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
index 4611e80ff..b37537cbd 100644
--- a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
+++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go
@@ -99,7 +99,7 @@ func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) {
for {
if d.lastRotated.IsZero() {
d.lastRotated = time.Now()
- } else if time.Now().Sub(d.lastRotated) > d.retainDuration {
+ } else if time.Since(d.lastRotated) > d.retainDuration {
d.retainedCachedObjs = d.cachedObjs
d.cachedObjs = nil
d.lastRotated = time.Now()
diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go
index c8f35eb8b..90fa45ddb 100644
--- a/vendor/k8s.io/client-go/tools/cache/reflector.go
+++ b/vendor/k8s.io/client-go/tools/cache/reflector.go
@@ -417,7 +417,8 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
// It doesn't make sense to re-list all objects because most likely we will be able to restart
// watch where we ended.
// If that's the case begin exponentially backing off and resend watch request.
- if utilnet.IsConnectionRefused(err) {
+ // Do the same for "429" errors.
+ if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) {
<-r.initConnBackoffManager.Backoff().C()
continue
}
@@ -432,6 +433,10 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
// has a semantic that it returns data at least as fresh as provided RV.
// So first try to LIST with setting RV to resource version of last observed object.
klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err)
+ case apierrors.IsTooManyRequests(err):
+ klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName)
+ <-r.initConnBackoffManager.Backoff().C()
+ continue
default:
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err)
}
diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go
index 886e95d2d..24ffabab7 100644
--- a/vendor/k8s.io/client-go/tools/cache/store.go
+++ b/vendor/k8s.io/client-go/tools/cache/store.go
@@ -85,6 +85,11 @@ func (k KeyError) Error() string {
return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err)
}
+// Unwrap implements errors.Unwrap
+func (k KeyError) Unwrap() error {
+ return k.Err
+}
+
// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for
// the object but not the object itself.
type ExplicitKey string
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
index 24f469236..31716abf1 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go
@@ -245,6 +245,33 @@ type ExecConfig struct {
// to be stored directly in the kubeconfig.
// +k8s:conversion-gen=false
Config runtime.Object
+
+ // InteractiveMode determines this plugin's relationship with standard input. Valid
+ // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
+ // exec plugin wants to use standard input if it is available), or "Always" (this exec
+ // plugin requires standard input to function). See ExecInteractiveMode values for more
+ // details.
+ //
+ // If APIVersion is client.authentication.k8s.io/v1alpha1 or
+ // client.authentication.k8s.io/v1beta1, then this field is optional and defaults
+ // to "IfAvailable" when unset. Otherwise, this field is required.
+ // +optional
+ InteractiveMode ExecInteractiveMode
+
+ // StdinUnavailable indicates whether the exec authenticator can pass standard
+ // input through to this exec plugin. For example, a higher level entity might be using
+ // standard input for something else and therefore it would not be safe for the exec
+ // plugin to use standard input. This is kept here in order to keep all of the exec configuration
+ // together, but it is never serialized.
+ // +k8s:conversion-gen=false
+ StdinUnavailable bool
+
+ // StdinUnavailableMessage is an optional message to be displayed when the exec authenticator
+ // cannot successfully run this exec plugin because it needs to use standard input and
+ // StdinUnavailable is true. For example, a process that is already using standard input to
+ // read user instructions might set this to "used by my-program to read user instructions".
+ // +k8s:conversion-gen=false
+ StdinUnavailableMessage string
}
var _ fmt.Stringer = new(ExecConfig)
@@ -271,7 +298,7 @@ func (c ExecConfig) String() string {
if c.Config != nil {
config = "runtime.Object(--- REDACTED ---)"
}
- return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config)
+ return fmt.Sprintf("api.ExecConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q, ProvideClusterInfo: %t, Config: %s, StdinUnavailable: %t}", c.Command, args, env, c.APIVersion, c.ProvideClusterInfo, config, c.StdinUnavailable)
}
// ExecEnvVar is used for setting environment variables when executing an exec-based
@@ -281,6 +308,26 @@ type ExecEnvVar struct {
Value string `json:"value"`
}
+// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input.
+type ExecInteractiveMode string
+
+const (
+ // NeverExecInteractiveMode declares that this exec plugin never needs to use standard
+ // input, and therefore the exec plugin will be run regardless of whether standard input is
+ // available for user input.
+ NeverExecInteractiveMode ExecInteractiveMode = "Never"
+ // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input
+ // if it is available, but can still operate if standard input is not available. Therefore, the
+ // exec plugin will be run regardless of whether stdin is available for user input. If standard
+ // input is available for user input, then it will be provided to this exec plugin.
+ IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable"
+ // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to
+ // run, and therefore the exec plugin will only be run if standard input is available for user
+ // input. If standard input is not available for user input, then the exec plugin will not be run
+ // and an error will be returned by the exec plugin runner.
+ AlwaysExecInteractiveMode ExecInteractiveMode = "Always"
+)
+
// NewConfig is a convenience function that returns a new Config object with non-nil maps
func NewConfig() *Config {
return &Config{
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
index c38ebc076..6eee281bc 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/conversion.go
@@ -165,7 +165,7 @@ func Convert_Map_string_To_runtime_Object_To_Slice_v1_NamedExtension(in *map[str
newExtension := (*in)[key]
oldExtension := runtime.RawExtension{}
if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&newExtension, &oldExtension, s); err != nil {
- return nil
+ return err
}
namedExtension := NamedExtension{key, oldExtension}
*out = append(*out, namedExtension)
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go
new file mode 100644
index 000000000..bf513dc7c
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/defaults.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+func addDefaultingFuncs(scheme *runtime.Scheme) error {
+ return RegisterDefaults(scheme)
+}
+
+func SetDefaults_ExecConfig(exec *ExecConfig) {
+ if len(exec.InteractiveMode) == 0 {
+ switch exec.APIVersion {
+ case "client.authentication.k8s.io/v1beta1", "client.authentication.k8s.io/v1alpha1":
+ // default to IfAvailableExecInteractiveMode for backwards compatibility
+ exec.InteractiveMode = IfAvailableExecInteractiveMode
+ default:
+ // require other versions to explicitly declare whether they want stdin or not
+ }
+ }
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
index ba5572ab0..3ccdebc1c 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/doc.go
@@ -16,5 +16,6 @@ limitations under the License.
// +k8s:conversion-gen=k8s.io/client-go/tools/clientcmd/api
// +k8s:deepcopy-gen=package
+// +k8s:defaulter-gen=Kind
package v1
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
index 24f6284c5..4a4d4a55f 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/register.go
@@ -37,7 +37,7 @@ func init() {
// We only register manually written functions here. The registration of the
// generated functions takes place in the generated files. The separation
// makes the code compile even when the generated files are missing.
- localSchemeBuilder.Register(addKnownTypes)
+ localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs)
}
func addKnownTypes(scheme *runtime.Scheme) error {
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
index 8c29b39c1..7e8351032 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/types.go
@@ -221,6 +221,18 @@ type ExecConfig struct {
// to false. Package k8s.io/client-go/tools/auth/exec provides helper methods for
// reading this environment variable.
ProvideClusterInfo bool `json:"provideClusterInfo"`
+
+ // InteractiveMode determines this plugin's relationship with standard input. Valid
+ // values are "Never" (this exec plugin never uses standard input), "IfAvailable" (this
+ // exec plugin wants to use standard input if it is available), or "Always" (this exec
+ // plugin requires standard input to function). See ExecInteractiveMode values for more
+ // details.
+ //
+ // If APIVersion is client.authentication.k8s.io/v1alpha1 or
+ // client.authentication.k8s.io/v1beta1, then this field is optional and defaults
+ // to "IfAvailable" when unset. Otherwise, this field is required.
+ //+optional
+ InteractiveMode ExecInteractiveMode `json:"interactiveMode,omitempty"`
}
// ExecEnvVar is used for setting environment variables when executing an exec-based
@@ -229,3 +241,23 @@ type ExecEnvVar struct {
Name string `json:"name"`
Value string `json:"value"`
}
+
+// ExecInteractiveMode is a string that describes an exec plugin's relationship with standard input.
+type ExecInteractiveMode string
+
+const (
+ // NeverExecInteractiveMode declares that this exec plugin never needs to use standard
+ // input, and therefore the exec plugin will be run regardless of whether standard input is
+ // available for user input.
+ NeverExecInteractiveMode ExecInteractiveMode = "Never"
+ // IfAvailableExecInteractiveMode declares that this exec plugin would like to use standard input
+ // if it is available, but can still operate if standard input is not available. Therefore, the
+ // exec plugin will be run regardless of whether stdin is available for user input. If standard
+ // input is available for user input, then it will be provided to this exec plugin.
+ IfAvailableExecInteractiveMode ExecInteractiveMode = "IfAvailable"
+ // AlwaysExecInteractiveMode declares that this exec plugin requires standard input in order to
+ // run, and therefore the exec plugin will only be run if standard input is available for user
+ // input. If standard input is not available for user input, then the exec plugin will not be run
+ // and an error will be returned by the exec plugin runner.
+ AlwaysExecInteractiveMode ExecInteractiveMode = "Always"
+)
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
index 26e96529d..13dfa903c 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.conversion.go
@@ -376,6 +376,7 @@ func autoConvert_v1_ExecConfig_To_api_ExecConfig(in *ExecConfig, out *api.ExecCo
out.APIVersion = in.APIVersion
out.InstallHint = in.InstallHint
out.ProvideClusterInfo = in.ProvideClusterInfo
+ out.InteractiveMode = api.ExecInteractiveMode(in.InteractiveMode)
return nil
}
@@ -392,6 +393,9 @@ func autoConvert_api_ExecConfig_To_v1_ExecConfig(in *api.ExecConfig, out *ExecCo
out.InstallHint = in.InstallHint
out.ProvideClusterInfo = in.ProvideClusterInfo
// INFO: in.Config opted out of conversion generation
+ out.InteractiveMode = ExecInteractiveMode(in.InteractiveMode)
+ // INFO: in.StdinUnavailable opted out of conversion generation
+ // INFO: in.StdinUnavailableMessage opted out of conversion generation
return nil
}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go
new file mode 100644
index 000000000..aae469033
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/clientcmd/api/v1/zz_generated.defaults.go
@@ -0,0 +1,42 @@
+// +build !ignore_autogenerated
+
+/*
+Copyright The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by defaulter-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// RegisterDefaults adds defaulters functions to the given scheme.
+// Public to allow building arbitrary schemes.
+// All generated defaulters are covering - they call all nested defaulters.
+func RegisterDefaults(scheme *runtime.Scheme) error {
+ scheme.AddTypeDefaultingFunc(&Config{}, func(obj interface{}) { SetObjectDefaults_Config(obj.(*Config)) })
+ return nil
+}
+
+func SetObjectDefaults_Config(in *Config) {
+ for i := range in.AuthInfos {
+ a := &in.AuthInfos[i]
+ if a.AuthInfo.Exec != nil {
+ SetDefaults_ExecConfig(a.AuthInfo.Exec)
+ }
+ }
+}
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/config.go b/vendor/k8s.io/client-go/tools/clientcmd/config.go
index 12c8b84f3..31f896316 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/config.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/config.go
@@ -135,11 +135,7 @@ func (o *PathOptions) GetDefaultFilename() string {
}
func (o *PathOptions) IsExplicitFile() bool {
- if len(o.LoadingRules.ExplicitPath) > 0 {
- return true
- }
-
- return false
+ return len(o.LoadingRules.ExplicitPath) > 0
}
func (o *PathOptions) GetExplicitFile() string {
diff --git a/vendor/k8s.io/client-go/tools/clientcmd/validation.go b/vendor/k8s.io/client-go/tools/clientcmd/validation.go
index f77ef04fe..8541a08b2 100644
--- a/vendor/k8s.io/client-go/tools/clientcmd/validation.go
+++ b/vendor/k8s.io/client-go/tools/clientcmd/validation.go
@@ -308,6 +308,14 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdapi.AuthInfo) []err
validationErrors = append(validationErrors, fmt.Errorf("env variable name must be specified for %v to use exec authentication plugin", authInfoName))
}
}
+ switch authInfo.Exec.InteractiveMode {
+ case "":
+ validationErrors = append(validationErrors, fmt.Errorf("interactiveMode must be specified for %v to use exec authentication plugin", authInfoName))
+ case clientcmdapi.NeverExecInteractiveMode, clientcmdapi.IfAvailableExecInteractiveMode, clientcmdapi.AlwaysExecInteractiveMode:
+ // These are valid
+ default:
+ validationErrors = append(validationErrors, fmt.Errorf("invalid interactiveMode for %v: %q", authInfoName, authInfo.Exec.InteractiveMode))
+ }
}
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
diff --git a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
index 3f6b898e3..55f34fd3e 100644
--- a/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
+++ b/vendor/k8s.io/client-go/tools/leaderelection/leaderelection.go
@@ -56,6 +56,7 @@ import (
"bytes"
"context"
"fmt"
+ "sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
@@ -187,6 +188,9 @@ type LeaderElector struct {
// clock is wrapper around time to allow for less flaky testing
clock clock.Clock
+ // used to lock the observedRecord
+ observedRecordLock sync.Mutex
+
metrics leaderMetricsAdapter
}
@@ -224,13 +228,14 @@ func RunOrDie(ctx context.Context, lec LeaderElectionConfig) {
// GetLeader returns the identity of the last observed leader or returns the empty string if
// no leader has yet been observed.
+// This function is for informational purposes. (e.g. monitoring, logs, etc.)
func (le *LeaderElector) GetLeader() string {
- return le.observedRecord.HolderIdentity
+ return le.getObservedRecord().HolderIdentity
}
// IsLeader returns true if the last observed leader was this client else returns false.
func (le *LeaderElector) IsLeader() bool {
- return le.observedRecord.HolderIdentity == le.config.Lock.Identity()
+ return le.getObservedRecord().HolderIdentity == le.config.Lock.Identity()
}
// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds.
@@ -301,8 +306,8 @@ func (le *LeaderElector) release() bool {
klog.Errorf("Failed to release lock: %v", err)
return false
}
- le.observedRecord = leaderElectionRecord
- le.observedTime = le.clock.Now()
+
+ le.setObservedRecord(&leaderElectionRecord)
return true
}
@@ -329,16 +334,17 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
klog.Errorf("error initially creating leader election record: %v", err)
return false
}
- le.observedRecord = leaderElectionRecord
- le.observedTime = le.clock.Now()
+
+ le.setObservedRecord(&leaderElectionRecord)
+
return true
}
// 2. Record obtained, check the Identity & Time
if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) {
- le.observedRecord = *oldLeaderElectionRecord
+ le.setObservedRecord(oldLeaderElectionRecord)
+
le.observedRawRecord = oldLeaderElectionRawRecord
- le.observedTime = le.clock.Now()
}
if len(oldLeaderElectionRecord.HolderIdentity) > 0 &&
le.observedTime.Add(le.config.LeaseDuration).After(now.Time) &&
@@ -362,8 +368,7 @@ func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool {
return false
}
- le.observedRecord = leaderElectionRecord
- le.observedTime = le.clock.Now()
+ le.setObservedRecord(&leaderElectionRecord)
return true
}
@@ -392,3 +397,22 @@ func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error {
return nil
}
+
+// setObservedRecord will set a new observedRecord and update observedTime to the current time.
+// Protect critical sections with lock.
+func (le *LeaderElector) setObservedRecord(observedRecord *rl.LeaderElectionRecord) {
+ le.observedRecordLock.Lock()
+ defer le.observedRecordLock.Unlock()
+
+ le.observedRecord = *observedRecord
+ le.observedTime = le.clock.Now()
+}
+
+// getObservedRecord returns observersRecord.
+// Protect critical sections with lock.
+func (le *LeaderElector) getObservedRecord() rl.LeaderElectionRecord {
+ le.observedRecordLock.Lock()
+ defer le.observedRecordLock.Unlock()
+
+ return le.observedRecord
+}
diff --git a/vendor/k8s.io/client-go/tools/record/events_cache.go b/vendor/k8s.io/client-go/tools/record/events_cache.go
index 9374612f2..329c7ce57 100644
--- a/vendor/k8s.io/client-go/tools/record/events_cache.go
+++ b/vendor/k8s.io/client-go/tools/record/events_cache.go
@@ -25,7 +25,7 @@ import (
"github.com/golang/groupcache/lru"
- "k8s.io/api/core/v1"
+ v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
@@ -175,7 +175,7 @@ func EventAggregatorByReasonFunc(event *v1.Event) (string, string) {
// EventAggregatorMessageFunc is responsible for producing an aggregation message
type EventAggregatorMessageFunc func(event *v1.Event) string
-// EventAggregratorByReasonMessageFunc returns an aggregate message by prefixing the incoming message
+// EventAggregatorByReasonMessageFunc returns an aggregate message by prefixing the incoming message
func EventAggregatorByReasonMessageFunc(event *v1.Event) string {
return "(combined from similar events): " + event.Message
}
@@ -420,7 +420,7 @@ type EventCorrelateResult struct {
// prior to interacting with the API server to record the event.
//
// The default behavior is as follows:
-// * Aggregation is performed if a similar event is recorded 10 times in a
+// * Aggregation is performed if a similar event is recorded 10 times
// in a 10 minute rolling interval. A similar event is an event that varies only by
// the Event.Message field. Rather than recording the precise event, aggregation
// will create a new event whose message reports that it has combined events with
diff --git a/vendor/k8s.io/client-go/tools/record/util/util.go b/vendor/k8s.io/client-go/tools/record/util/util.go
index d1818a8d9..afcc6a6a0 100644
--- a/vendor/k8s.io/client-go/tools/record/util/util.go
+++ b/vendor/k8s.io/client-go/tools/record/util/util.go
@@ -36,9 +36,5 @@ func ValidateEventType(eventtype string) bool {
func IsKeyNotFoundError(err error) bool {
statusErr, _ := err.(*errors.StatusError)
- if statusErr != nil && statusErr.Status().Code == http.StatusNotFound {
- return true
- }
-
- return false
+ return statusErr != nil && statusErr.Status().Code == http.StatusNotFound
}
diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go
index cd0a4455f..818fd52d6 100644
--- a/vendor/k8s.io/client-go/transport/round_trippers.go
+++ b/vendor/k8s.io/client-go/transport/round_trippers.go
@@ -346,7 +346,7 @@ func (r *requestInfo) toCurl() string {
}
}
- return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
+ return fmt.Sprintf("curl -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL)
}
// debuggingRoundTripper will display information about the requests passing
diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go
index 88d89494d..b4a7bfa67 100644
--- a/vendor/k8s.io/client-go/transport/transport.go
+++ b/vendor/k8s.io/client-go/transport/transport.go
@@ -20,6 +20,7 @@ import (
"context"
"crypto/tls"
"crypto/x509"
+ "encoding/pem"
"fmt"
"io/ioutil"
"net/http"
@@ -79,7 +80,11 @@ func TLSConfigFor(c *Config) (*tls.Config, error) {
}
if c.HasCA() {
- tlsConfig.RootCAs = rootCertPool(c.TLS.CAData)
+ rootCAs, err := rootCertPool(c.TLS.CAData)
+ if err != nil {
+ return nil, fmt.Errorf("unable to load root certificates: %w", err)
+ }
+ tlsConfig.RootCAs = rootCAs
}
var staticCert *tls.Certificate
@@ -176,18 +181,41 @@ func dataFromSliceOrFile(data []byte, file string) ([]byte, error) {
// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs".
// When caData is not empty, it will be the ONLY information used in the CertPool.
-func rootCertPool(caData []byte) *x509.CertPool {
+func rootCertPool(caData []byte) (*x509.CertPool, error) {
// What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go
// code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values
// It doesn't allow trusting either/or, but hopefully that won't be an issue
if len(caData) == 0 {
- return nil
+ return nil, nil
}
// if we have caData, use it
certPool := x509.NewCertPool()
- certPool.AppendCertsFromPEM(caData)
- return certPool
+ if ok := certPool.AppendCertsFromPEM(caData); !ok {
+ return nil, createErrorParsingCAData(caData)
+ }
+ return certPool, nil
+}
+
+// createErrorParsingCAData ALWAYS returns an error. We call it because know we failed to AppendCertsFromPEM
+// but we don't know the specific error because that API is just true/false
+func createErrorParsingCAData(pemCerts []byte) error {
+ for len(pemCerts) > 0 {
+ var block *pem.Block
+ block, pemCerts = pem.Decode(pemCerts)
+ if block == nil {
+ return fmt.Errorf("unable to parse bytes as PEM block")
+ }
+
+ if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
+ continue
+ }
+
+ if _, err := x509.ParseCertificate(block.Bytes); err != nil {
+ return fmt.Errorf("failed to parse certificate: %w", err)
+ }
+ }
+ return fmt.Errorf("no valid certificate authority data seen")
}
// WrapperFunc wraps an http.RoundTripper when a new transport
@@ -269,7 +297,7 @@ type certificateCacheEntry struct {
// isStale returns true when this cache entry is too old to be usable
func (c *certificateCacheEntry) isStale() bool {
- return time.Now().Sub(c.birth) > time.Second
+ return time.Since(c.birth) > time.Second
}
func newCertificateCacheEntry(certFile, keyFile string) certificateCacheEntry {
diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go
index 3da144163..bffb15262 100644
--- a/vendor/k8s.io/client-go/util/cert/cert.go
+++ b/vendor/k8s.io/client-go/util/cert/cert.go
@@ -62,6 +62,7 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
CommonName: cfg.CommonName,
Organization: cfg.Organization,
},
+ DNSNames: []string{cfg.CommonName},
NotBefore: now.UTC(),
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go
index 5b27a1127..6739a3fa4 100644
--- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go
+++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/client_generator.go
@@ -359,7 +359,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
}
} else {
// User has not specified any override for this group version.
- // filter out types which dont have genclient.
+ // filter out types which don't have genclient.
if tags := util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)); !tags.GenerateClient {
continue
}
diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go
index 7e11d7820..179490914 100644
--- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go
+++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/fake_client_generator.go
@@ -54,14 +54,14 @@ func PackageForGroup(gv clientgentypes.GroupVersion, typeList []*types.Type, cli
DefaultGen: generator.DefaultGen{
OptionalName: "fake_" + strings.ToLower(c.Namers["private"].Name(t)),
},
- outputPackage: outputPackage,
- inputPackage: inputPackage,
- group: gv.Group.NonEmpty(),
- version: gv.Version.String(),
- groupGoName: groupGoName,
- typeToMatch: t,
- imports: generator.NewImportTracker(),
- applyBuilderPackage: applyBuilderPackage,
+ outputPackage: outputPackage,
+ inputPackage: inputPackage,
+ group: gv.Group.NonEmpty(),
+ version: gv.Version.String(),
+ groupGoName: groupGoName,
+ typeToMatch: t,
+ imports: generator.NewImportTracker(),
+ applyConfigurationPackage: applyBuilderPackage,
})
}
diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go
index d23b8005f..cd731cb9d 100644
--- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go
+++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_clientset.go
@@ -156,7 +156,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker {
`
var checkImpl = `
-var _ clientset.Interface = &Clientset{}
+var (
+ _ clientset.Interface = &Clientset{}
+ _ testing.FakeClient = &Clientset{}
+)
`
var clientsetInterfaceImplTemplate = `
diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go
index 2ea24c1ac..884abbde9 100644
--- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go
+++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go
@@ -33,14 +33,14 @@ import (
// genFakeForType produces a file for each top-level type.
type genFakeForType struct {
generator.DefaultGen
- outputPackage string
- group string
- version string
- groupGoName string
- inputPackage string
- typeToMatch *types.Type
- imports namer.ImportTracker
- applyBuilderPackage string
+ outputPackage string
+ group string
+ version string
+ groupGoName string
+ inputPackage string
+ typeToMatch *types.Type
+ imports namer.ImportTracker
+ applyConfigurationPackage string
}
var _ generator.Generator = &genFakeForType{}
@@ -166,11 +166,11 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.
"ExtractFromListOptions": c.Universe.Function(types.Name{Package: pkgClientGoTesting, Name: "ExtractFromListOptions"}),
}
- generateApply := len(g.applyBuilderPackage) > 0
+ generateApply := len(g.applyConfigurationPackage) > 0
if generateApply {
// Generated apply builder type references required for generated Apply function
_, gvString := util.ParsePathGroupVersion(g.inputPackage)
- m["applyConfig"] = types.Ref(gopath.Join(g.applyBuilderPackage, gvString), t.Name.Name+"ApplyConfiguration")
+ m["inputApplyConfig"] = types.Ref(gopath.Join(g.applyConfigurationPackage, gvString), t.Name.Name+"ApplyConfiguration")
}
if tags.NonNamespaced {
@@ -223,13 +223,19 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.
if tags.HasVerb("applyStatus") && generateApply && genStatus(t) {
sw.Do(applyStatusTemplate, m)
}
+ _, typeGVString := util.ParsePathGroupVersion(g.inputPackage)
// generate extended client methods
for _, e := range tags.Extensions {
+ if e.HasVerb("apply") && !generateApply {
+ continue
+ }
inputType := *t
resultType := *t
+ inputGVString := typeGVString
if len(e.InputTypeOverride) > 0 {
if name, pkg := e.Input(); len(pkg) > 0 {
+ _, inputGVString = util.ParsePathGroupVersion(pkg)
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
inputType = *newType
} else {
@@ -247,6 +253,9 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.
m["inputType"] = &inputType
m["resultType"] = &resultType
m["subresourcePath"] = e.SubResourcePath
+ if e.HasVerb("apply") {
+ m["inputApplyConfig"] = types.Ref(gopath.Join(g.applyConfigurationPackage, inputGVString), inputType.Name.Name+"ApplyConfiguration")
+ }
if e.HasVerb("get") {
if e.IsSubresource() {
@@ -293,8 +302,11 @@ func (g *genFakeForType) GenerateType(c *generator.Context, t *types.Type, w io.
}
if e.HasVerb("apply") && generateApply {
- // TODO: Support apply on arbitrary subresource once it is supported by the api-server.
- sw.Do(adjustTemplate(e.VerbName, e.VerbType, applyTemplate), m)
+ if e.IsSubresource() {
+ sw.Do(adjustTemplate(e.VerbName, e.VerbType, applySubresourceTemplate), m)
+ } else {
+ sw.Do(adjustTemplate(e.VerbName, e.VerbType, applyTemplate), m)
+ }
}
}
@@ -506,7 +518,7 @@ func (c *Fake$.type|publicPlural$) Patch(ctx context.Context, name string, pt $.
var applyTemplate = `
// Apply takes the given apply declarative configuration, applies it and returns the applied $.resultType|private$.
-func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.inputType|private$ *$.applyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
+func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
if $.inputType|private$ == nil {
return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil")
}
@@ -531,7 +543,7 @@ func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.inputType|privat
var applyStatusTemplate = `
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
-func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType|private$ *$.applyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
+func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
if $.inputType|private$ == nil {
return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil")
}
@@ -552,3 +564,24 @@ func (c *Fake$.type|publicPlural$) ApplyStatus(ctx context.Context, $.inputType|
return obj.(*$.resultType|raw$), err
}
`
+
+var applySubresourceTemplate = `
+// Apply takes top resource name and the apply declarative configuration for $.subresourcePath$,
+// applies it and returns the applied $.resultType|private$, and an error, if there is any.
+func (c *Fake$.type|publicPlural$) Apply(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
+ if $.inputType|private$ == nil {
+ return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil")
+ }
+ data, err := $.jsonMarshal|raw$($.inputType|private$)
+ if err != nil {
+ return nil, err
+ }
+ obj, err := c.Fake.
+ $if .namespaced$Invokes($.NewPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, c.ns, $.type|private$Name, $.ApplyPatchType|raw$, data, "status"), &$.resultType|raw${})
+ $else$Invokes($.NewRootPatchSubresourceAction|raw$($.type|allLowercasePlural$Resource, $.type|private$Name, $.ApplyPatchType|raw$, data, "status"), &$.resultType|raw${})$end$
+ if obj == nil {
+ return nil, err
+ }
+ return obj.(*$.resultType|raw$), err
+}
+`
diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go
index 37dc4a365..fe63dd198 100644
--- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go
+++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/generator_for_type.go
@@ -90,14 +90,20 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
template string
args map[string]interface{}
}
+ _, typeGVString := util.ParsePathGroupVersion(g.inputPackage)
extendedMethods := []extendedInterfaceMethod{}
for _, e := range tags.Extensions {
+ if e.HasVerb("apply") && !generateApply {
+ continue
+ }
inputType := *t
resultType := *t
+ inputGVString := typeGVString
// TODO: Extract this to some helper method as this code is copied into
// 2 other places.
if len(e.InputTypeOverride) > 0 {
if name, pkg := e.Input(); len(pkg) > 0 {
+ _, inputGVString = util.ParsePathGroupVersion(pkg)
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
inputType = *newType
} else {
@@ -118,7 +124,7 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
} else {
updatedVerbtemplate = e.VerbName + "(" + strings.TrimPrefix(defaultVerbTemplates[e.VerbType], strings.Title(e.VerbType)+"(")
}
- extendedMethods = append(extendedMethods, extendedInterfaceMethod{
+ extendedMethod := extendedInterfaceMethod{
template: updatedVerbtemplate,
args: map[string]interface{}{
"type": t,
@@ -128,9 +134,15 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
"GetOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "GetOptions"}),
"ListOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ListOptions"}),
"UpdateOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "UpdateOptions"}),
+ "ApplyOptions": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/apis/meta/v1", Name: "ApplyOptions"}),
"PatchType": c.Universe.Type(types.Name{Package: "k8s.io/apimachinery/pkg/types", Name: "PatchType"}),
+ "jsonMarshal": c.Universe.Type(types.Name{Package: "encoding/json", Name: "Marshal"}),
},
- })
+ }
+ if e.HasVerb("apply") {
+ extendedMethod.args["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, inputGVString), inputType.Name.Name+"ApplyConfiguration")
+ }
+ extendedMethods = append(extendedMethods, extendedMethod)
}
m := map[string]interface{}{
"type": t,
@@ -162,7 +174,7 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
if generateApply {
// Generated apply configuration type references required for generated Apply function
_, gvString := util.ParsePathGroupVersion(g.inputPackage)
- m["applyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, gvString), t.Name.Name+"ApplyConfiguration")
+ m["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, gvString), t.Name.Name+"ApplyConfiguration")
}
sw.Do(getterComment, m)
@@ -237,14 +249,18 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
if tags.HasVerb("applyStatus") && generateApply {
sw.Do(applyStatusTemplate, m)
}
- // TODO: Add subresource support once apply subresources are supported on the server side
// generate expansion methods
for _, e := range tags.Extensions {
+ if e.HasVerb("apply") && !generateApply {
+ continue
+ }
inputType := *t
resultType := *t
+ inputGVString := typeGVString
if len(e.InputTypeOverride) > 0 {
if name, pkg := e.Input(); len(pkg) > 0 {
+ _, inputGVString = util.ParsePathGroupVersion(pkg)
newType := c.Universe.Type(types.Name{Package: pkg, Name: name})
inputType = *newType
} else {
@@ -262,6 +278,9 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
m["inputType"] = &inputType
m["resultType"] = &resultType
m["subresourcePath"] = e.SubResourcePath
+ if e.HasVerb("apply") {
+ m["inputApplyConfig"] = types.Ref(path.Join(g.applyConfigurationPackage, inputGVString), inputType.Name.Name+"ApplyConfiguration")
+ }
if e.HasVerb("get") {
if e.IsSubresource() {
@@ -310,9 +329,12 @@ func (g *genClientForType) GenerateType(c *generator.Context, t *types.Type, w i
sw.Do(adjustTemplate(e.VerbName, e.VerbType, patchTemplate), m)
}
- if e.HasVerb("apply") && generateApply {
- // TODO: Support apply on arbitrary subresource once it is supported by the api-server.
- sw.Do(adjustTemplate(e.VerbName, e.VerbType, applyTemplate), m)
+ if e.HasVerb("apply") {
+ if e.IsSubresource() {
+ sw.Do(adjustTemplate(e.VerbName, e.VerbType, applySubresourceTemplate), m)
+ } else {
+ sw.Do(adjustTemplate(e.VerbName, e.VerbType, applyTemplate), m)
+ }
}
}
@@ -344,7 +366,9 @@ func buildSubresourceDefaultVerbTemplates(generateApply bool) map[string]string
"update": `Update(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputType|raw$, opts $.UpdateOptions|raw$) (*$.resultType|raw$, error)`,
"get": `Get(ctx context.Context, $.type|private$Name string, options $.GetOptions|raw$) (*$.resultType|raw$, error)`,
}
- // TODO: Support apply on arbitrary subresource once it is supported by the api-server.
+ if generateApply {
+ m["apply"] = `Apply(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (*$.resultType|raw$, error)`
+ }
return m
}
@@ -361,8 +385,8 @@ func buildDefaultVerbTemplates(generateApply bool) map[string]string {
"patch": `Patch(ctx context.Context, name string, pt $.PatchType|raw$, data []byte, opts $.PatchOptions|raw$, subresources ...string) (result *$.resultType|raw$, err error)`,
}
if generateApply {
- m["apply"] = `Apply(ctx context.Context, $.inputType|private$ *$.applyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)`
- m["applyStatus"] = `ApplyStatus(ctx context.Context, $.inputType|private$ *$.applyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)`
+ m["apply"] = `Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)`
+ m["applyStatus"] = `ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error)`
}
return m
}
@@ -650,7 +674,7 @@ func (c *$.type|privatePlural$) Patch(ctx context.Context, name string, pt $.Pat
var applyTemplate = `
// Apply takes the given apply declarative configuration, applies it and returns the applied $.resultType|private$.
-func (c *$.type|privatePlural$) Apply(ctx context.Context, $.inputType|private$ *$.applyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
+func (c *$.type|privatePlural$) Apply(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
if $.inputType|private$ == nil {
return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil")
}
@@ -679,7 +703,7 @@ func (c *$.type|privatePlural$) Apply(ctx context.Context, $.inputType|private$
var applyStatusTemplate = `
// ApplyStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
-func (c *$.type|privatePlural$) ApplyStatus(ctx context.Context, $.inputType|private$ *$.applyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
+func (c *$.type|privatePlural$) ApplyStatus(ctx context.Context, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
if $.inputType|private$ == nil {
return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil")
}
@@ -707,3 +731,30 @@ func (c *$.type|privatePlural$) ApplyStatus(ctx context.Context, $.inputType|pri
return
}
`
+
+var applySubresourceTemplate = `
+// Apply takes top resource name and the apply declarative configuration for $.subresourcePath$,
+// applies it and returns the applied $.resultType|private$, and an error, if there is any.
+func (c *$.type|privatePlural$) Apply(ctx context.Context, $.type|private$Name string, $.inputType|private$ *$.inputApplyConfig|raw$, opts $.ApplyOptions|raw$) (result *$.resultType|raw$, err error) {
+ if $.inputType|private$ == nil {
+ return nil, fmt.Errorf("$.inputType|private$ provided to Apply must not be nil")
+ }
+ patchOpts := opts.ToPatchOptions()
+ data, err := $.jsonMarshal|raw$($.inputType|private$)
+ if err != nil {
+ return nil, err
+ }
+
+ result = &$.resultType|raw${}
+ err = c.client.Patch($.ApplyPatchType|raw$).
+ $if .namespaced$Namespace(c.ns).$end$
+ Resource("$.type|resource$").
+ Name($.type|private$Name).
+ SubResource("$.subresourcePath$").
+ VersionedParams(&patchOpts, $.schemeParameterCodec|raw$).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
+`
diff --git a/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go
index 71346061f..b5736c40d 100644
--- a/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go
+++ b/vendor/k8s.io/code-generator/cmd/client-gen/generators/util/tags.go
@@ -74,6 +74,7 @@ var unsupportedExtensionVerbs = []string{
var inputTypeSupportedVerbs = []string{
"create",
"update",
+ "apply",
}
// resultTypeSupportedVerbs is a list of verb types that supports overriding the
@@ -84,6 +85,7 @@ var resultTypeSupportedVerbs = []string{
"get",
"list",
"patch",
+ "apply",
}
// Extensions allows to extend the default set of client verbs
diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go
index 847a6a5a0..009973389 100644
--- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go
+++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/main.go
@@ -23,11 +23,13 @@ import (
flag "github.com/spf13/pflag"
"k8s.io/code-generator/cmd/go-to-protobuf/protobuf"
+ "k8s.io/klog/v2"
)
var g = protobuf.New()
func init() {
+ klog.InitFlags(nil)
g.BindFlags(flag.CommandLine)
goflag.Set("logtostderr", "true")
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
diff --git a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go
index de782e98f..f7c8fb7ff 100644
--- a/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go
+++ b/vendor/k8s.io/code-generator/cmd/go-to-protobuf/protobuf/cmd.go
@@ -364,7 +364,12 @@ func Run(g *Generator) {
func deps(c *generator.Context, pkgs []*protobufPackage) map[string][]string {
ret := map[string][]string{}
for _, p := range pkgs {
- for _, d := range c.Universe[p.PackagePath].Imports {
+ pkg, ok := c.Universe[p.PackagePath]
+ if !ok {
+ log.Fatalf("Unrecognized package: %s", p.PackagePath)
+ }
+
+ for _, d := range pkg.Imports {
ret[p.PackagePath] = append(ret[p.PackagePath], d.Path)
}
}
diff --git a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go
index 496145b14..8ada49469 100644
--- a/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go
+++ b/vendor/k8s.io/code-generator/cmd/lister-gen/generators/lister.go
@@ -162,7 +162,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
func objectMetaForPackage(p *types.Package) (*types.Type, bool, error) {
generatingForPackage := false
for _, t := range p.Types {
- // filter out types which dont have genclient.
+ // filter out types which don't have genclient.
if !util.MustParseClientGenTags(append(t.SecondClosestCommentLines, t.CommentLines...)).GenerateClient {
continue
}
diff --git a/vendor/k8s.io/code-generator/go.mod b/vendor/k8s.io/code-generator/go.mod
index 59b6c8a15..bf4a834da 100644
--- a/vendor/k8s.io/code-generator/go.mod
+++ b/vendor/k8s.io/code-generator/go.mod
@@ -6,29 +6,22 @@ go 1.16
require (
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
- github.com/go-openapi/spec v0.19.5
+ github.com/go-openapi/jsonpointer v0.19.5 // indirect
+ github.com/go-openapi/jsonreference v0.19.5 // indirect
+ github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2
- github.com/golang/protobuf v1.4.3 // indirect
- github.com/google/go-cmp v0.5.4 // indirect
- github.com/googleapis/gnostic v0.4.1
- github.com/json-iterator/go v1.1.10 // indirect
- github.com/kr/text v0.2.0 // indirect
- github.com/mailru/easyjson v0.7.0 // indirect
- github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
- github.com/onsi/ginkgo v1.11.0 // indirect
- github.com/onsi/gomega v1.7.0 // indirect
+ github.com/googleapis/gnostic v0.5.5
+ github.com/json-iterator/go v1.1.11 // indirect
+ github.com/onsi/ginkgo v1.14.0 // indirect
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.6.1 // indirect
- golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 // indirect
- golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 // indirect
- golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 // indirect
- golang.org/x/text v0.3.4 // indirect
- golang.org/x/tools v0.1.0 // indirect
- google.golang.org/protobuf v1.25.0 // indirect
- gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
+ github.com/stretchr/testify v1.7.0 // indirect
+ golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
+ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
+ golang.org/x/tools v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027
- k8s.io/klog/v2 v2.8.0
- k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
+ k8s.io/klog/v2 v2.9.0
+ k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
)
diff --git a/vendor/k8s.io/code-generator/go.sum b/vendor/k8s.io/code-generator/go.sum
index 50afc6f97..d8517cdee 100644
--- a/vendor/k8s.io/code-generator/go.sum
+++ b/vendor/k8s.io/code-generator/go.sum
@@ -19,68 +19,68 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
+github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=
-github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
-github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
-github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M=
-github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
-github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
-github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
@@ -91,28 +91,34 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -121,57 +127,63 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ=
-golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
-golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 h1:dXfMednGJh/SUUFjTLsWJz3P+TQt9qnR11GgeI3vWKs=
-golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
-golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY=
-golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -182,6 +194,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@@ -193,14 +206,15 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
-google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@@ -208,10 +222,13 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@@ -219,10 +236,10 @@ k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkC
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
-k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
-k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
-k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
+k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE
new file mode 100644
index 000000000..6a66aea5e
--- /dev/null
+++ b/vendor/k8s.io/code-generator/third_party/forked/golang/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS
new file mode 100644
index 000000000..733099041
--- /dev/null
+++ b/vendor/k8s.io/code-generator/third_party/forked/golang/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go
index 25483fad1..1e187f763 100644
--- a/vendor/k8s.io/klog/v2/klog.go
+++ b/vendor/k8s.io/klog/v2/klog.go
@@ -284,6 +284,7 @@ func (m *moduleSpec) Get() interface{} {
var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+// Set will sets module value
// Syntax: -vmodule=recordio=2,file=1,gfs*=3
func (m *moduleSpec) Set(value string) error {
var filter []modulePat
@@ -362,6 +363,7 @@ func (t *traceLocation) Get() interface{} {
var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+// Set will sets backtrace value
// Syntax: -log_backtrace_at=gopherflakes.go:234
// Note that unlike vmodule the file extension is included here.
func (t *traceLocation) Set(value string) error {
@@ -708,7 +710,7 @@ func (l *loggingT) println(s severity, logr logr.Logger, filter LogFilter, args
args = filter.Filter(args)
}
fmt.Fprintln(buf, args...)
- l.output(s, logr, buf, file, line, false)
+ l.output(s, logr, buf, 0 /* depth */, file, line, false)
}
func (l *loggingT) print(s severity, logr logr.Logger, filter LogFilter, args ...interface{}) {
@@ -730,7 +732,7 @@ func (l *loggingT) printDepth(s severity, logr logr.Logger, filter LogFilter, de
if buf.Bytes()[buf.Len()-1] != '\n' {
buf.WriteByte('\n')
}
- l.output(s, logr, buf, file, line, false)
+ l.output(s, logr, buf, depth, file, line, false)
}
func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format string, args ...interface{}) {
@@ -748,7 +750,7 @@ func (l *loggingT) printf(s severity, logr logr.Logger, filter LogFilter, format
if buf.Bytes()[buf.Len()-1] != '\n' {
buf.WriteByte('\n')
}
- l.output(s, logr, buf, file, line, false)
+ l.output(s, logr, buf, 0 /* depth */, file, line, false)
}
// printWithFileLine behaves like print but uses the provided file and line number. If
@@ -769,7 +771,7 @@ func (l *loggingT) printWithFileLine(s severity, logr logr.Logger, filter LogFil
if buf.Bytes()[buf.Len()-1] != '\n' {
buf.WriteByte('\n')
}
- l.output(s, logr, buf, file, line, alsoToStderr)
+ l.output(s, logr, buf, 2 /* depth */, file, line, alsoToStderr)
}
// if loggr is specified, will call loggr.Error, otherwise output with logging module.
@@ -778,7 +780,7 @@ func (l *loggingT) errorS(err error, loggr logr.Logger, filter LogFilter, depth
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
}
if loggr != nil {
- loggr.Error(err, msg, keysAndValues...)
+ logr.WithCallDepth(loggr, depth+2).Error(err, msg, keysAndValues...)
return
}
l.printS(err, errorLog, depth+1, msg, keysAndValues...)
@@ -790,7 +792,7 @@ func (l *loggingT) infoS(loggr logr.Logger, filter LogFilter, depth int, msg str
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
}
if loggr != nil {
- loggr.Info(msg, keysAndValues...)
+ logr.WithCallDepth(loggr, depth+2).Info(msg, keysAndValues...)
return
}
l.printS(nil, infoLog, depth+1, msg, keysAndValues...)
@@ -825,6 +827,8 @@ func kvListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
switch v.(type) {
case string, error:
b.WriteString(fmt.Sprintf("%s=%q", k, v))
+ case []byte:
+ b.WriteString(fmt.Sprintf("%s=%+q", k, v))
default:
if _, ok := v.(fmt.Stringer); ok {
b.WriteString(fmt.Sprintf("%s=%q", k, v))
@@ -855,12 +859,13 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
// SetLogger will set the backing logr implementation for klog.
// If set, all log lines will be suppressed from the regular Output, and
// redirected to the logr implementation.
-// All log lines include the 'severity', 'file' and 'line' values attached as
-// structured logging values.
// Use as:
// ...
// klog.SetLogger(zapr.NewLogger(zapLog))
func SetLogger(logr logr.Logger) {
+ logging.mu.Lock()
+ defer logging.mu.Unlock()
+
logging.logr = logr
}
@@ -899,7 +904,7 @@ func LogToStderr(stderr bool) {
}
// output writes the data to the log files and releases the buffer.
-func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string, line int, alsoToStderr bool) {
+func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, depth int, file string, line int, alsoToStderr bool) {
l.mu.Lock()
if l.traceLocation.isSet() {
if l.traceLocation.match(file, line) {
@@ -911,9 +916,9 @@ func (l *loggingT) output(s severity, log logr.Logger, buf *buffer, file string,
// TODO: set 'severity' and caller information as structured log info
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
if s == errorLog {
- l.logr.Error(nil, string(data))
+ logr.WithCallDepth(l.logr, depth+3).Error(nil, string(data))
} else {
- log.Info(string(data))
+ logr.WithCallDepth(log, depth+3).Info(string(data))
}
} else if l.toStderr {
os.Stderr.Write(data)
diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go
index 40be34786..682014bda 100644
--- a/vendor/k8s.io/kube-openapi/pkg/common/common.go
+++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go
@@ -21,7 +21,7 @@ import (
"strings"
"github.com/emicklei/go-restful"
- "github.com/go-openapi/spec"
+ "k8s.io/kube-openapi/pkg/validation/spec"
)
const (
diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
index c7c6d4459..a294c2e8a 100644
--- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
+++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
@@ -103,7 +103,7 @@ func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool {
}
const (
- specPackagePath = "github.com/go-openapi/spec"
+ specPackagePath = "k8s.io/kube-openapi/pkg/validation/spec"
openAPICommonPackagePath = "k8s.io/kube-openapi/pkg/common"
)
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore b/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore
new file mode 100644
index 000000000..dd91ed6a0
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore
@@ -0,0 +1,2 @@
+secrets.yml
+coverage.out
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE b/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go
new file mode 100644
index 000000000..f285970aa
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ContactInfo contact information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#contactObject
+type ContactInfo struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+ Email string `json:"email,omitempty"`
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go
new file mode 100644
index 000000000..88add91b2
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go
@@ -0,0 +1,24 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// ExternalDocumentation allows referencing an external resource for
+// extended documentation.
+//
+// For more information: http://goo.gl/8us55a#externalDocumentationObject
+type ExternalDocumentation struct {
+ Description string `json:"description,omitempty"`
+ URL string `json:"url,omitempty"`
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go
new file mode 100644
index 000000000..216cf1cde
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go
@@ -0,0 +1,71 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+const (
+ jsonArray = "array"
+)
+
+// HeaderProps describes a response header
+type HeaderProps struct {
+ Description string `json:"description,omitempty"`
+}
+
+// Header describes a header for a response of the API
+//
+// For more information: http://goo.gl/8us55a#headerObject
+type Header struct {
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+ HeaderProps
+}
+
+// MarshalJSON marshal this to JSON
+func (h Header) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(h.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(h.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(h.HeaderProps)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// UnmarshalJSON unmarshals this header from JSON
+func (h *Header) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &h.CommonValidations); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &h.VendorExtensible); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &h.HeaderProps)
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go
new file mode 100644
index 000000000..504a3972a
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go
@@ -0,0 +1,155 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// Extensions vendor specific extensions
+type Extensions map[string]interface{}
+
+// Add adds a value to these extensions
+func (e Extensions) Add(key string, value interface{}) {
+ realKey := strings.ToLower(key)
+ e[realKey] = value
+}
+
+// GetString gets a string value from the extensions
+func (e Extensions) GetString(key string) (string, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ str, ok := v.(string)
+ return str, ok
+ }
+ return "", false
+}
+
+// GetBool gets a string value from the extensions
+func (e Extensions) GetBool(key string) (bool, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ str, ok := v.(bool)
+ return str, ok
+ }
+ return false, false
+}
+
+// GetStringSlice gets a string value from the extensions
+func (e Extensions) GetStringSlice(key string) ([]string, bool) {
+ if v, ok := e[strings.ToLower(key)]; ok {
+ arr, isSlice := v.([]interface{})
+ if !isSlice {
+ return nil, false
+ }
+ var strs []string
+ for _, iface := range arr {
+ str, isString := iface.(string)
+ if !isString {
+ return nil, false
+ }
+ strs = append(strs, str)
+ }
+ return strs, ok
+ }
+ return nil, false
+}
+
+// VendorExtensible composition block.
+type VendorExtensible struct {
+ Extensions Extensions
+}
+
+// AddExtension adds an extension to this extensible object
+func (v *VendorExtensible) AddExtension(key string, value interface{}) {
+ if value == nil {
+ return
+ }
+ if v.Extensions == nil {
+ v.Extensions = make(map[string]interface{})
+ }
+ v.Extensions.Add(key, value)
+}
+
+// MarshalJSON marshals the extensions to json
+func (v VendorExtensible) MarshalJSON() ([]byte, error) {
+ toser := make(map[string]interface{})
+ for k, v := range v.Extensions {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ toser[k] = v
+ }
+ }
+ return json.Marshal(toser)
+}
+
+// UnmarshalJSON for this extensible object
+func (v *VendorExtensible) UnmarshalJSON(data []byte) error {
+ var d map[string]interface{}
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+ for k, vv := range d {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ if v.Extensions == nil {
+ v.Extensions = map[string]interface{}{}
+ }
+ v.Extensions[k] = vv
+ }
+ }
+ return nil
+}
+
+// InfoProps the properties for an info definition
+type InfoProps struct {
+ Description string `json:"description,omitempty"`
+ Title string `json:"title,omitempty"`
+ TermsOfService string `json:"termsOfService,omitempty"`
+ Contact *ContactInfo `json:"contact,omitempty"`
+ License *License `json:"license,omitempty"`
+ Version string `json:"version,omitempty"`
+}
+
+// Info object provides metadata about the API.
+// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience.
+//
+// For more information: http://goo.gl/8us55a#infoObject
+type Info struct {
+ VendorExtensible
+ InfoProps
+}
+
+// MarshalJSON marshal this to JSON
+func (i Info) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(i.InfoProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(i.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (i *Info) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &i.InfoProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &i.VendorExtensible)
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go
new file mode 100644
index 000000000..b75aefe16
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go
@@ -0,0 +1,109 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+const (
+ jsonRef = "$ref"
+)
+
+// SimpleSchema describe swagger simple schemas for parameters and headers
+type SimpleSchema struct {
+ Type string `json:"type,omitempty"`
+ Nullable bool `json:"nullable,omitempty"`
+ Format string `json:"format,omitempty"`
+ Items *Items `json:"items,omitempty"`
+ CollectionFormat string `json:"collectionFormat,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+ Example interface{} `json:"example,omitempty"`
+}
+
+// CommonValidations describe common JSON-schema validations
+type CommonValidations struct {
+ Maximum *float64 `json:"maximum,omitempty"`
+ ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
+ Minimum *float64 `json:"minimum,omitempty"`
+ ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
+ MaxLength *int64 `json:"maxLength,omitempty"`
+ MinLength *int64 `json:"minLength,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ MaxItems *int64 `json:"maxItems,omitempty"`
+ MinItems *int64 `json:"minItems,omitempty"`
+ UniqueItems bool `json:"uniqueItems,omitempty"`
+ MultipleOf *float64 `json:"multipleOf,omitempty"`
+ Enum []interface{} `json:"enum,omitempty"`
+}
+
+// Items a limited subset of JSON-Schema's items object.
+// It is used by parameter definitions that are not located in "body".
+//
+// For more information: http://goo.gl/8us55a#items-object
+type Items struct {
+ Refable
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (i *Items) UnmarshalJSON(data []byte) error {
+ var validations CommonValidations
+ if err := json.Unmarshal(data, &validations); err != nil {
+ return err
+ }
+ var ref Refable
+ if err := json.Unmarshal(data, &ref); err != nil {
+ return err
+ }
+ var simpleSchema SimpleSchema
+ if err := json.Unmarshal(data, &simpleSchema); err != nil {
+ return err
+ }
+ var vendorExtensible VendorExtensible
+ if err := json.Unmarshal(data, &vendorExtensible); err != nil {
+ return err
+ }
+ i.Refable = ref
+ i.CommonValidations = validations
+ i.SimpleSchema = simpleSchema
+ i.VendorExtensible = vendorExtensible
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (i Items) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(i.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(i.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(i.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(i.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b4, b3, b1, b2), nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go
new file mode 100644
index 000000000..f20961b4f
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go
@@ -0,0 +1,23 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+// License information for the exposed API.
+//
+// For more information: http://goo.gl/8us55a#licenseObject
+type License struct {
+ Name string `json:"name,omitempty"`
+ URL string `json:"url,omitempty"`
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go
new file mode 100644
index 000000000..c7acd8672
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go
@@ -0,0 +1,96 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// OperationProps describes an operation
+//
+// NOTES:
+// - schemes, when present must be from [http, https, ws, wss]: see validate
+// - Security is handled as a special case: see MarshalJSON function
+type OperationProps struct {
+ Description string `json:"description,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Schemes []string `json:"schemes,omitempty"`
+ Tags []string `json:"tags,omitempty"`
+ Summary string `json:"summary,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+ ID string `json:"operationId,omitempty"`
+ Deprecated bool `json:"deprecated,omitempty"`
+ Security []map[string][]string `json:"security,omitempty"`
+ Parameters []Parameter `json:"parameters,omitempty"`
+ Responses *Responses `json:"responses,omitempty"`
+}
+
+// MarshalJSON takes care of serializing operation properties to JSON
+//
+// We use a custom marhaller here to handle a special cases related to
+// the Security field. We need to preserve zero length slice
+// while omitting the field when the value is nil/unset.
+func (op OperationProps) MarshalJSON() ([]byte, error) {
+ type Alias OperationProps
+ if op.Security == nil {
+ return json.Marshal(&struct {
+ Security []map[string][]string `json:"security,omitempty"`
+ *Alias
+ }{
+ Security: op.Security,
+ Alias: (*Alias)(&op),
+ })
+ }
+ return json.Marshal(&struct {
+ Security []map[string][]string `json:"security"`
+ *Alias
+ }{
+ Security: op.Security,
+ Alias: (*Alias)(&op),
+ })
+}
+
+// Operation describes a single API operation on a path.
+//
+// For more information: http://goo.gl/8us55a#operationObject
+type Operation struct {
+ VendorExtensible
+ OperationProps
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (o *Operation) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &o.OperationProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &o.VendorExtensible)
+}
+
+// MarshalJSON converts this items object to JSON
+func (o Operation) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(o.OperationProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(o.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go
new file mode 100644
index 000000000..218513974
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go
@@ -0,0 +1,111 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// ParamProps describes the specific attributes of an operation parameter
+//
+// NOTE:
+// - Schema is defined when "in" == "body": see validate
+// - AllowEmptyValue is allowed where "in" == "query" || "formData"
+type ParamProps struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"name,omitempty"`
+ In string `json:"in,omitempty"`
+ Required bool `json:"required,omitempty"`
+ Schema *Schema `json:"schema,omitempty"`
+ AllowEmptyValue bool `json:"allowEmptyValue,omitempty"`
+}
+
+// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn).
+//
+// There are five possible parameter types.
+// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part
+// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`,
+// the path parameter is `itemId`.
+// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`.
+// * Header - Custom headers that are expected as part of the request.
+// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be
+// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for
+// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist
+// together for the same operation.
+// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or
+// `multipart/form-data` are used as the content type of the request (in Swagger's definition,
+// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used
+// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be
+// declared together with a body parameter for the same operation. Form parameters have a different format based on
+// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4).
+// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload.
+// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple
+// parameters that are being transferred.
+// * `multipart/form-data` - each parameter takes a section in the payload with an internal header.
+// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is
+// `submit-name`. This type of form parameters is more commonly used for file transfers.
+//
+// For more information: http://goo.gl/8us55a#parameterObject
+type Parameter struct {
+ Refable
+ CommonValidations
+ SimpleSchema
+ VendorExtensible
+ ParamProps
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Parameter) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &p.CommonValidations); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.Refable); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.SimpleSchema); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &p.ParamProps)
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Parameter) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(p.CommonValidations)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(p.SimpleSchema)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(p.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ b5, err := json.Marshal(p.ParamProps)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b3, b1, b2, b4, b5), nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go
new file mode 100644
index 000000000..04de58f00
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go
@@ -0,0 +1,74 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// PathItemProps the path item specific properties
+type PathItemProps struct {
+ Get *Operation `json:"get,omitempty"`
+ Put *Operation `json:"put,omitempty"`
+ Post *Operation `json:"post,omitempty"`
+ Delete *Operation `json:"delete,omitempty"`
+ Options *Operation `json:"options,omitempty"`
+ Head *Operation `json:"head,omitempty"`
+ Patch *Operation `json:"patch,omitempty"`
+ Parameters []Parameter `json:"parameters,omitempty"`
+}
+
+// PathItem describes the operations available on a single path.
+// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+// The path itself is still exposed to the documentation viewer but they will
+// not know which operations and parameters are available.
+//
+// For more information: http://goo.gl/8us55a#pathItemObject
+type PathItem struct {
+ Refable
+ VendorExtensible
+ PathItemProps
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *PathItem) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &p.Refable); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &p.VendorExtensible); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &p.PathItemProps)
+}
+
+// MarshalJSON converts this items object to JSON
+func (p PathItem) MarshalJSON() ([]byte, error) {
+ b3, err := json.Marshal(p.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b4, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ b5, err := json.Marshal(p.PathItemProps)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b3, b4, b5)
+ return concated, nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go
new file mode 100644
index 000000000..319aba879
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go
@@ -0,0 +1,85 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// Paths holds the relative paths to the individual endpoints.
+// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order
+// to construct the full URL.
+// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering).
+//
+// For more information: http://goo.gl/8us55a#pathsObject
+type Paths struct {
+ VendorExtensible
+ Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/"
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (p *Paths) UnmarshalJSON(data []byte) error {
+ var res map[string]json.RawMessage
+ if err := json.Unmarshal(data, &res); err != nil {
+ return err
+ }
+ for k, v := range res {
+ if strings.HasPrefix(strings.ToLower(k), "x-") {
+ if p.Extensions == nil {
+ p.Extensions = make(map[string]interface{})
+ }
+ var d interface{}
+ if err := json.Unmarshal(v, &d); err != nil {
+ return err
+ }
+ p.Extensions[k] = d
+ }
+ if strings.HasPrefix(k, "/") {
+ if p.Paths == nil {
+ p.Paths = make(map[string]PathItem)
+ }
+ var pi PathItem
+ if err := json.Unmarshal(v, &pi); err != nil {
+ return err
+ }
+ p.Paths[k] = pi
+ }
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (p Paths) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(p.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+
+ pths := make(map[string]PathItem)
+ for k, v := range p.Paths {
+ if strings.HasPrefix(k, "/") {
+ pths[k] = v
+ }
+ }
+ b2, err := json.Marshal(pths)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go
new file mode 100644
index 000000000..1405bfd8e
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go
@@ -0,0 +1,167 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "net/http"
+ "os"
+ "path/filepath"
+
+ "github.com/go-openapi/jsonreference"
+)
+
+// Refable is a struct for things that accept a $ref property
+type Refable struct {
+ Ref Ref
+}
+
+// MarshalJSON marshals the ref to json
+func (r Refable) MarshalJSON() ([]byte, error) {
+ return r.Ref.MarshalJSON()
+}
+
+// UnmarshalJSON unmarshalss the ref from json
+func (r *Refable) UnmarshalJSON(d []byte) error {
+ return json.Unmarshal(d, &r.Ref)
+}
+
+// Ref represents a json reference that is potentially resolved
+type Ref struct {
+ jsonreference.Ref
+}
+
+// RemoteURI gets the remote uri part of the ref
+func (r *Ref) RemoteURI() string {
+ if r.String() == "" {
+ return r.String()
+ }
+
+ u := *r.GetURL()
+ u.Fragment = ""
+ return u.String()
+}
+
+// IsValidURI returns true when the url the ref points to can be found
+func (r *Ref) IsValidURI(basepaths ...string) bool {
+ if r.String() == "" {
+ return true
+ }
+
+ v := r.RemoteURI()
+ if v == "" {
+ return true
+ }
+
+ if r.HasFullURL {
+ rr, err := http.Get(v)
+ if err != nil {
+ return false
+ }
+
+ return rr.StatusCode/100 == 2
+ }
+
+ if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) {
+ return false
+ }
+
+ // check for local file
+ pth := v
+ if r.HasURLPathOnly {
+ base := "."
+ if len(basepaths) > 0 {
+ base = filepath.Dir(filepath.Join(basepaths...))
+ }
+ p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth)))
+ if e != nil {
+ return false
+ }
+ pth = p
+ }
+
+ fi, err := os.Stat(filepath.ToSlash(pth))
+ if err != nil {
+ return false
+ }
+
+ return !fi.IsDir()
+}
+
+// Inherits creates a new reference from a parent and a child
+// If the child cannot inherit from the parent, an error is returned
+func (r *Ref) Inherits(child Ref) (*Ref, error) {
+ ref, err := r.Ref.Inherits(child.Ref)
+ if err != nil {
+ return nil, err
+ }
+ return &Ref{Ref: *ref}, nil
+}
+
+// NewRef creates a new instance of a ref object
+// returns an error when the reference uri is an invalid uri
+func NewRef(refURI string) (Ref, error) {
+ ref, err := jsonreference.New(refURI)
+ if err != nil {
+ return Ref{}, err
+ }
+ return Ref{Ref: ref}, nil
+}
+
+// MustCreateRef creates a ref object but panics when refURI is invalid.
+// Use the NewRef method for a version that returns an error.
+func MustCreateRef(refURI string) Ref {
+ return Ref{Ref: jsonreference.MustCreateRef(refURI)}
+}
+
+// MarshalJSON marshals this ref into a JSON object
+func (r Ref) MarshalJSON() ([]byte, error) {
+ str := r.String()
+ if str == "" {
+ if r.IsRoot() {
+ return []byte(`{"$ref":""}`), nil
+ }
+ return []byte("{}"), nil
+ }
+ v := map[string]interface{}{"$ref": str}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshals this ref from a JSON object
+func (r *Ref) UnmarshalJSON(d []byte) error {
+ var v map[string]interface{}
+ if err := json.Unmarshal(d, &v); err != nil {
+ return err
+ }
+ return r.fromMap(v)
+}
+
+func (r *Ref) fromMap(v map[string]interface{}) error {
+ if v == nil {
+ return nil
+ }
+
+ if vv, ok := v["$ref"]; ok {
+ if str, ok := vv.(string); ok {
+ ref, err := jsonreference.New(str)
+ if err != nil {
+ return err
+ }
+ *r = Ref{Ref: ref}
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go
new file mode 100644
index 000000000..9fd717ec3
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go
@@ -0,0 +1,78 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// ResponseProps properties specific to a response
+type ResponseProps struct {
+ Description string `json:"description,omitempty"`
+ Schema *Schema `json:"schema,omitempty"`
+ Headers map[string]Header `json:"headers,omitempty"`
+ Examples map[string]interface{} `json:"examples,omitempty"`
+}
+
+// Response describes a single response from an API Operation.
+//
+// For more information: http://goo.gl/8us55a#responseObject
+type Response struct {
+ Refable
+ ResponseProps
+ VendorExtensible
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Response) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &r.ResponseProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &r.Refable); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &r.VendorExtensible)
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Response) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(r.ResponseProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(r.Refable)
+ if err != nil {
+ return nil, err
+ }
+ b3, err := json.Marshal(r.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2, b3), nil
+}
+
+// NewResponse creates a new response instance
+func NewResponse() *Response {
+ return new(Response)
+}
+
+// ResponseRef creates a response as a json reference
+func ResponseRef(url string) *Response {
+ resp := NewResponse()
+ resp.Ref = MustCreateRef(url)
+ return resp
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go
new file mode 100644
index 000000000..b2c3883a9
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go
@@ -0,0 +1,110 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "reflect"
+ "strconv"
+
+ "github.com/go-openapi/swag"
+)
+
+// Responses is a container for the expected responses of an operation.
+// The container maps a HTTP response code to the expected response.
+// It is not expected from the documentation to necessarily cover all possible HTTP response codes,
+// since they may not be known in advance. However, it is expected from the documentation to cover
+// a successful operation response and any known errors.
+//
+// The `default` can be used a default response object for all HTTP codes that are not covered
+// individually by the specification.
+//
+// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response
+// for a successful operation call.
+//
+// For more information: http://goo.gl/8us55a#responsesObject
+type Responses struct {
+ VendorExtensible
+ ResponsesProps
+}
+
+// UnmarshalJSON hydrates this items instance with the data from JSON
+func (r *Responses) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &r.ResponsesProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &r.VendorExtensible); err != nil {
+ return err
+ }
+ if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) {
+ r.ResponsesProps = ResponsesProps{}
+ }
+ return nil
+}
+
+// MarshalJSON converts this items object to JSON
+func (r Responses) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(r.ResponsesProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(r.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ concated := swag.ConcatJSON(b1, b2)
+ return concated, nil
+}
+
+// ResponsesProps describes all responses for an operation.
+// It tells what is the default response and maps all responses with a
+// HTTP status code.
+type ResponsesProps struct {
+ Default *Response
+ StatusCodeResponses map[int]Response
+}
+
+// MarshalJSON marshals responses as JSON
+func (r ResponsesProps) MarshalJSON() ([]byte, error) {
+ toser := map[string]Response{}
+ if r.Default != nil {
+ toser["default"] = *r.Default
+ }
+ for k, v := range r.StatusCodeResponses {
+ toser[strconv.Itoa(k)] = v
+ }
+ return json.Marshal(toser)
+}
+
+// UnmarshalJSON unmarshals responses from JSON
+func (r *ResponsesProps) UnmarshalJSON(data []byte) error {
+ var res map[string]Response
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil
+ }
+ if v, ok := res["default"]; ok {
+ r.Default = &v
+ delete(res, "default")
+ }
+ for k, v := range res {
+ if nk, err := strconv.Atoi(k); err == nil {
+ if r.StatusCodeResponses == nil {
+ r.StatusCodeResponses = map[int]Response{}
+ }
+ r.StatusCodeResponses[nk] = v
+ }
+ }
+ return nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go
new file mode 100644
index 000000000..b0aeeb0d0
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go
@@ -0,0 +1,513 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/go-openapi/swag"
+)
+
+// BooleanProperty creates a boolean property
+func BooleanProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}}
+}
+
+// BoolProperty creates a boolean property
+func BoolProperty() *Schema { return BooleanProperty() }
+
+// StringProperty creates a string property
+func StringProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// CharProperty creates a string property
+func CharProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}}
+}
+
+// Float64Property creates a float64/double property
+func Float64Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}}
+}
+
+// Float32Property creates a float32/float property
+func Float32Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}}
+}
+
+// Int8Property creates an int8 property
+func Int8Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}}
+}
+
+// Int16Property creates an int16 property
+func Int16Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}}
+}
+
+// Int32Property creates an int32 property
+func Int32Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}}
+}
+
+// Int64Property creates an int64 property
+func Int64Property() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}}
+}
+
+// StrFmtProperty creates a property for the named string format
+func StrFmtProperty(format string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}}
+}
+
+// DateProperty creates a date property
+func DateProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}}
+}
+
+// DateTimeProperty creates a date time property
+func DateTimeProperty() *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}}
+}
+
+// MapProperty creates a map property
+func MapProperty(property *Schema) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"object"},
+ AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}}
+}
+
+// RefProperty creates a ref property
+func RefProperty(name string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// RefSchema creates a ref property
+func RefSchema(name string) *Schema {
+ return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}}
+}
+
+// ArrayProperty creates an array property
+func ArrayProperty(items *Schema) *Schema {
+ if items == nil {
+ return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}}
+ }
+ return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}}
+}
+
+// ComposedSchema creates a schema with allOf
+func ComposedSchema(schemas ...Schema) *Schema {
+ s := new(Schema)
+ s.AllOf = schemas
+ return s
+}
+
+// SchemaURL represents a schema url
+type SchemaURL string
+
+// MarshalJSON marshal this to JSON
+func (r SchemaURL) MarshalJSON() ([]byte, error) {
+ if r == "" {
+ return []byte("{}"), nil
+ }
+ v := map[string]interface{}{"$schema": string(r)}
+ return json.Marshal(v)
+}
+
+// UnmarshalJSON unmarshal this from JSON
+func (r *SchemaURL) UnmarshalJSON(data []byte) error {
+ var v map[string]interface{}
+ if err := json.Unmarshal(data, &v); err != nil {
+ return err
+ }
+ return r.fromMap(v)
+}
+
+func (r *SchemaURL) fromMap(v map[string]interface{}) error {
+ if v == nil {
+ return nil
+ }
+ if vv, ok := v["$schema"]; ok {
+ if str, ok := vv.(string); ok {
+ u, err := url.Parse(str)
+ if err != nil {
+ return err
+ }
+
+ *r = SchemaURL(u.String())
+ }
+ }
+ return nil
+}
+
+// SchemaProps describes a JSON schema (draft 4)
+type SchemaProps struct {
+ ID string `json:"id,omitempty"`
+ Ref Ref `json:"-"`
+ Schema SchemaURL `json:"-"`
+ Description string `json:"description,omitempty"`
+ Type StringOrArray `json:"type,omitempty"`
+ Nullable bool `json:"nullable,omitempty"`
+ Format string `json:"format,omitempty"`
+ Title string `json:"title,omitempty"`
+ Default interface{} `json:"default,omitempty"`
+ Maximum *float64 `json:"maximum,omitempty"`
+ ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"`
+ Minimum *float64 `json:"minimum,omitempty"`
+ ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"`
+ MaxLength *int64 `json:"maxLength,omitempty"`
+ MinLength *int64 `json:"minLength,omitempty"`
+ Pattern string `json:"pattern,omitempty"`
+ MaxItems *int64 `json:"maxItems,omitempty"`
+ MinItems *int64 `json:"minItems,omitempty"`
+ UniqueItems bool `json:"uniqueItems,omitempty"`
+ MultipleOf *float64 `json:"multipleOf,omitempty"`
+ Enum []interface{} `json:"enum,omitempty"`
+ MaxProperties *int64 `json:"maxProperties,omitempty"`
+ MinProperties *int64 `json:"minProperties,omitempty"`
+ Required []string `json:"required,omitempty"`
+ Items *SchemaOrArray `json:"items,omitempty"`
+ AllOf []Schema `json:"allOf,omitempty"`
+ OneOf []Schema `json:"oneOf,omitempty"`
+ AnyOf []Schema `json:"anyOf,omitempty"`
+ Not *Schema `json:"not,omitempty"`
+ Properties map[string]Schema `json:"properties,omitempty"`
+ AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"`
+ PatternProperties map[string]Schema `json:"patternProperties,omitempty"`
+ Dependencies Dependencies `json:"dependencies,omitempty"`
+ AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"`
+ Definitions Definitions `json:"definitions,omitempty"`
+}
+
+// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4)
+type SwaggerSchemaProps struct {
+ Discriminator string `json:"discriminator,omitempty"`
+ ReadOnly bool `json:"readOnly,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+ Example interface{} `json:"example,omitempty"`
+}
+
+// Schema the schema object allows the definition of input and output data types.
+// These types can be objects, but also primitives and arrays.
+// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/)
+// and uses a predefined subset of it.
+// On top of this subset, there are extensions provided by this specification to allow for more complete documentation.
+//
+// For more information: http://goo.gl/8us55a#schemaObject
+type Schema struct {
+ VendorExtensible
+ SchemaProps
+ SwaggerSchemaProps
+ ExtraProps map[string]interface{} `json:"-"`
+}
+
+// WithID sets the id for this schema, allows for chaining
+func (s *Schema) WithID(id string) *Schema {
+ s.ID = id
+ return s
+}
+
+// WithTitle sets the title for this schema, allows for chaining
+func (s *Schema) WithTitle(title string) *Schema {
+ s.Title = title
+ return s
+}
+
+// WithDescription sets the description for this schema, allows for chaining
+func (s *Schema) WithDescription(description string) *Schema {
+ s.Description = description
+ return s
+}
+
+// WithProperties sets the properties for this schema
+func (s *Schema) WithProperties(schemas map[string]Schema) *Schema {
+ s.Properties = schemas
+ return s
+}
+
+// SetProperty sets a property on this schema
+func (s *Schema) SetProperty(name string, schema Schema) *Schema {
+ if s.Properties == nil {
+ s.Properties = make(map[string]Schema)
+ }
+ s.Properties[name] = schema
+ return s
+}
+
+// WithAllOf sets the all of property
+func (s *Schema) WithAllOf(schemas ...Schema) *Schema {
+ s.AllOf = schemas
+ return s
+}
+
+// WithMaxProperties sets the max number of properties an object can have
+func (s *Schema) WithMaxProperties(max int64) *Schema {
+ s.MaxProperties = &max
+ return s
+}
+
+// WithMinProperties sets the min number of properties an object must have
+func (s *Schema) WithMinProperties(min int64) *Schema {
+ s.MinProperties = &min
+ return s
+}
+
+// Typed sets the type of this schema for a single value item
+func (s *Schema) Typed(tpe, format string) *Schema {
+ s.Type = []string{tpe}
+ s.Format = format
+ return s
+}
+
+// AddType adds a type with potential format to the types for this schema
+func (s *Schema) AddType(tpe, format string) *Schema {
+ s.Type = append(s.Type, tpe)
+ if format != "" {
+ s.Format = format
+ }
+ return s
+}
+
+// AsNullable flags this schema as nullable.
+func (s *Schema) AsNullable() *Schema {
+ s.Nullable = true
+ return s
+}
+
+// CollectionOf a fluent builder method for an array parameter
+func (s *Schema) CollectionOf(items Schema) *Schema {
+ s.Type = []string{jsonArray}
+ s.Items = &SchemaOrArray{Schema: &items}
+ return s
+}
+
+// WithDefault sets the default value on this parameter
+func (s *Schema) WithDefault(defaultValue interface{}) *Schema {
+ s.Default = defaultValue
+ return s
+}
+
+// WithRequired flags this parameter as required
+func (s *Schema) WithRequired(items ...string) *Schema {
+ s.Required = items
+ return s
+}
+
+// AddRequired adds field names to the required properties array
+func (s *Schema) AddRequired(items ...string) *Schema {
+ s.Required = append(s.Required, items...)
+ return s
+}
+
+// WithMaxLength sets a max length value
+func (s *Schema) WithMaxLength(max int64) *Schema {
+ s.MaxLength = &max
+ return s
+}
+
+// WithMinLength sets a min length value
+func (s *Schema) WithMinLength(min int64) *Schema {
+ s.MinLength = &min
+ return s
+}
+
+// WithPattern sets a pattern value
+func (s *Schema) WithPattern(pattern string) *Schema {
+ s.Pattern = pattern
+ return s
+}
+
+// WithMultipleOf sets a multiple of value
+func (s *Schema) WithMultipleOf(number float64) *Schema {
+ s.MultipleOf = &number
+ return s
+}
+
+// WithMaximum sets a maximum number value
+func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema {
+ s.Maximum = &max
+ s.ExclusiveMaximum = exclusive
+ return s
+}
+
+// WithMinimum sets a minimum number value
+func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema {
+ s.Minimum = &min
+ s.ExclusiveMinimum = exclusive
+ return s
+}
+
+// WithEnum sets a the enum values (replace)
+func (s *Schema) WithEnum(values ...interface{}) *Schema {
+ s.Enum = append([]interface{}{}, values...)
+ return s
+}
+
+// WithMaxItems sets the max items
+func (s *Schema) WithMaxItems(size int64) *Schema {
+ s.MaxItems = &size
+ return s
+}
+
+// WithMinItems sets the min items
+func (s *Schema) WithMinItems(size int64) *Schema {
+ s.MinItems = &size
+ return s
+}
+
+// UniqueValues dictates that this array can only have unique items
+func (s *Schema) UniqueValues() *Schema {
+ s.UniqueItems = true
+ return s
+}
+
+// AllowDuplicates this array can have duplicates
+func (s *Schema) AllowDuplicates() *Schema {
+ s.UniqueItems = false
+ return s
+}
+
+// AddToAllOf adds a schema to the allOf property
+func (s *Schema) AddToAllOf(schemas ...Schema) *Schema {
+ s.AllOf = append(s.AllOf, schemas...)
+ return s
+}
+
+// WithDiscriminator sets the name of the discriminator field
+func (s *Schema) WithDiscriminator(discriminator string) *Schema {
+ s.Discriminator = discriminator
+ return s
+}
+
+// AsReadOnly flags this schema as readonly
+func (s *Schema) AsReadOnly() *Schema {
+ s.ReadOnly = true
+ return s
+}
+
+// AsWritable flags this schema as writeable (not read-only)
+func (s *Schema) AsWritable() *Schema {
+ s.ReadOnly = false
+ return s
+}
+
+// WithExample sets the example for this schema
+func (s *Schema) WithExample(example interface{}) *Schema {
+ s.Example = example
+ return s
+}
+
+// WithExternalDocs sets/removes the external docs for/from this schema.
+// When you pass empty strings as params the external documents will be removed.
+// When you pass non-empty string as one value then those values will be used on the external docs object.
+// So when you pass a non-empty description, you should also pass the url and vice versa.
+func (s *Schema) WithExternalDocs(description, url string) *Schema {
+ if description == "" && url == "" {
+ s.ExternalDocs = nil
+ return s
+ }
+
+ if s.ExternalDocs == nil {
+ s.ExternalDocs = &ExternalDocumentation{}
+ }
+ s.ExternalDocs.Description = description
+ s.ExternalDocs.URL = url
+ return s
+}
+
+// MarshalJSON marshal this to JSON
+func (s Schema) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SchemaProps)
+ if err != nil {
+ return nil, fmt.Errorf("schema props %v", err)
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, fmt.Errorf("vendor props %v", err)
+ }
+ b3, err := s.Ref.MarshalJSON()
+ if err != nil {
+ return nil, fmt.Errorf("ref prop %v", err)
+ }
+ b4, err := s.Schema.MarshalJSON()
+ if err != nil {
+ return nil, fmt.Errorf("schema prop %v", err)
+ }
+ b5, err := json.Marshal(s.SwaggerSchemaProps)
+ if err != nil {
+ return nil, fmt.Errorf("common validations %v", err)
+ }
+ var b6 []byte
+ if s.ExtraProps != nil {
+ jj, err := json.Marshal(s.ExtraProps)
+ if err != nil {
+ return nil, fmt.Errorf("extra props %v", err)
+ }
+ b6 = jj
+ }
+ return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *Schema) UnmarshalJSON(data []byte) error {
+ props := struct {
+ SchemaProps
+ SwaggerSchemaProps
+ }{}
+ if err := json.Unmarshal(data, &props); err != nil {
+ return err
+ }
+
+ sch := Schema{
+ SchemaProps: props.SchemaProps,
+ SwaggerSchemaProps: props.SwaggerSchemaProps,
+ }
+
+ var d map[string]interface{}
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+
+ _ = sch.Ref.fromMap(d)
+ _ = sch.Schema.fromMap(d)
+
+ delete(d, "$ref")
+ delete(d, "$schema")
+ for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) {
+ delete(d, pn)
+ }
+
+ for k, vv := range d {
+ lk := strings.ToLower(k)
+ if strings.HasPrefix(lk, "x-") {
+ if sch.Extensions == nil {
+ sch.Extensions = map[string]interface{}{}
+ }
+ sch.Extensions[k] = vv
+ continue
+ }
+ if sch.ExtraProps == nil {
+ sch.ExtraProps = map[string]interface{}{}
+ }
+ sch.ExtraProps[k] = vv
+ }
+
+ *s = sch
+
+ return nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go
new file mode 100644
index 000000000..563b9b95e
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go
@@ -0,0 +1,64 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section
+type SecuritySchemeProps struct {
+ Description string `json:"description,omitempty"`
+ Type string `json:"type"`
+ Name string `json:"name,omitempty"` // api key
+ In string `json:"in,omitempty"` // api key
+ Flow string `json:"flow,omitempty"` // oauth2
+ AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2
+ TokenURL string `json:"tokenUrl,omitempty"` // oauth2
+ Scopes map[string]string `json:"scopes,omitempty"` // oauth2
+}
+
+// SecurityScheme allows the definition of a security scheme that can be used by the operations.
+// Supported schemes are basic authentication, an API key (either as a header or as a query parameter)
+// and OAuth2's common flows (implicit, password, application and access code).
+//
+// For more information: http://goo.gl/8us55a#securitySchemeObject
+type SecurityScheme struct {
+ VendorExtensible
+ SecuritySchemeProps
+}
+
+// MarshalJSON marshal this to JSON
+func (s SecurityScheme) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SecuritySchemeProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (s *SecurityScheme) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &s.VendorExtensible)
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go
new file mode 100644
index 000000000..be66d1ddd
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go
@@ -0,0 +1,286 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/go-openapi/swag"
+)
+
+// Swagger this is the root document object for the API specification.
+// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier)
+// together into one document.
+//
+// For more information: http://goo.gl/8us55a#swagger-object-
+type Swagger struct {
+ VendorExtensible
+ SwaggerProps
+}
+
+// MarshalJSON marshals this swagger structure to json
+func (s Swagger) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(s.SwaggerProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(s.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON unmarshals a swagger spec from json
+func (s *Swagger) UnmarshalJSON(data []byte) error {
+ var sw Swagger
+ if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil {
+ return err
+ }
+ if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil {
+ return err
+ }
+ *s = sw
+ return nil
+}
+
+// SwaggerProps captures the top-level properties of an Api specification
+//
+// NOTE: validation rules
+// - the scheme, when present must be from [http, https, ws, wss]
+// - BasePath must start with a leading "/"
+// - Paths is required
+type SwaggerProps struct {
+ ID string `json:"id,omitempty"`
+ Consumes []string `json:"consumes,omitempty"`
+ Produces []string `json:"produces,omitempty"`
+ Schemes []string `json:"schemes,omitempty"`
+ Swagger string `json:"swagger,omitempty"`
+ Info *Info `json:"info,omitempty"`
+ Host string `json:"host,omitempty"`
+ BasePath string `json:"basePath,omitempty"`
+ Paths *Paths `json:"paths"`
+ Definitions Definitions `json:"definitions,omitempty"`
+ Parameters map[string]Parameter `json:"parameters,omitempty"`
+ Responses map[string]Response `json:"responses,omitempty"`
+ SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"`
+ Security []map[string][]string `json:"security,omitempty"`
+ Tags []Tag `json:"tags,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+// Dependencies represent a dependencies property
+type Dependencies map[string]SchemaOrStringArray
+
+// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property
+type SchemaOrBool struct {
+ Allows bool
+ Schema *Schema
+}
+
+var jsTrue = []byte("true")
+var jsFalse = []byte("false")
+
+// MarshalJSON convert this object to JSON
+func (s SchemaOrBool) MarshalJSON() ([]byte, error) {
+ if s.Schema != nil {
+ return json.Marshal(s.Schema)
+ }
+
+ if s.Schema == nil && !s.Allows {
+ return jsFalse, nil
+ }
+ return jsTrue, nil
+}
+
+// UnmarshalJSON converts this bool or schema object from a JSON structure
+func (s *SchemaOrBool) UnmarshalJSON(data []byte) error {
+ var nw SchemaOrBool
+ if len(data) >= 4 {
+ if data[0] == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e')
+ }
+ *s = nw
+ return nil
+}
+
+// SchemaOrStringArray represents a schema or a string array
+type SchemaOrStringArray struct {
+ Schema *Schema
+ Property []string
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) {
+ if len(s.Property) > 0 {
+ return json.Marshal(s.Property)
+ }
+ if s.Schema != nil {
+ return json.Marshal(s.Schema)
+ }
+ return []byte("null"), nil
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error {
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+ var nw SchemaOrStringArray
+ if first == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ if first == '[' {
+ if err := json.Unmarshal(data, &nw.Property); err != nil {
+ return err
+ }
+ }
+ *s = nw
+ return nil
+}
+
+// Definitions contains the models explicitly defined in this spec
+// An object to hold data types that can be consumed and produced by operations.
+// These data types can be primitives, arrays or models.
+//
+// For more information: http://goo.gl/8us55a#definitionsObject
+type Definitions map[string]Schema
+
+// SecurityDefinitions a declaration of the security schemes available to be used in the specification.
+// This does not enforce the security schemes on the operations and only serves to provide
+// the relevant details for each scheme.
+//
+// For more information: http://goo.gl/8us55a#securityDefinitionsObject
+type SecurityDefinitions map[string]*SecurityScheme
+
+// StringOrArray represents a value that can either be a string
+// or an array of strings. Mainly here for serialization purposes
+type StringOrArray []string
+
+// Contains returns true when the value is contained in the slice
+func (s StringOrArray) Contains(value string) bool {
+ for _, str := range s {
+ if str == value {
+ return true
+ }
+ }
+ return false
+}
+
+// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string
+func (s *StringOrArray) UnmarshalJSON(data []byte) error {
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+
+ if first == '[' {
+ var parsed []string
+ if err := json.Unmarshal(data, &parsed); err != nil {
+ return err
+ }
+ *s = StringOrArray(parsed)
+ return nil
+ }
+
+ var single interface{}
+ if err := json.Unmarshal(data, &single); err != nil {
+ return err
+ }
+ if single == nil {
+ return nil
+ }
+ switch v := single.(type) {
+ case string:
+ *s = StringOrArray([]string{v})
+ return nil
+ default:
+ return fmt.Errorf("only string or array is allowed, not %T", single)
+ }
+}
+
+// MarshalJSON converts this string or array to a JSON array or JSON string
+func (s StringOrArray) MarshalJSON() ([]byte, error) {
+ if len(s) == 1 {
+ return json.Marshal([]string(s)[0])
+ }
+ return json.Marshal([]string(s))
+}
+
+// SchemaOrArray represents a value that can either be a Schema
+// or an array of Schema. Mainly here for serialization purposes
+type SchemaOrArray struct {
+ Schema *Schema
+ Schemas []Schema
+}
+
+// Len returns the number of schemas in this property
+func (s SchemaOrArray) Len() int {
+ if s.Schema != nil {
+ return 1
+ }
+ return len(s.Schemas)
+}
+
+// ContainsType returns true when one of the schemas is of the specified type
+func (s *SchemaOrArray) ContainsType(name string) bool {
+ if s.Schema != nil {
+ return s.Schema.Type != nil && s.Schema.Type.Contains(name)
+ }
+ return false
+}
+
+// MarshalJSON converts this schema object or array into JSON structure
+func (s SchemaOrArray) MarshalJSON() ([]byte, error) {
+ if len(s.Schemas) > 0 {
+ return json.Marshal(s.Schemas)
+ }
+ return json.Marshal(s.Schema)
+}
+
+// UnmarshalJSON converts this schema object or array from a JSON structure
+func (s *SchemaOrArray) UnmarshalJSON(data []byte) error {
+ var nw SchemaOrArray
+ var first byte
+ if len(data) > 1 {
+ first = data[0]
+ }
+ if first == '{' {
+ var sch Schema
+ if err := json.Unmarshal(data, &sch); err != nil {
+ return err
+ }
+ nw.Schema = &sch
+ }
+ if first == '[' {
+ if err := json.Unmarshal(data, &nw.Schemas); err != nil {
+ return err
+ }
+ }
+ *s = nw
+ return nil
+}
diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go
new file mode 100644
index 000000000..ddd1eac7e
--- /dev/null
+++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go
@@ -0,0 +1,59 @@
+// Copyright 2015 go-swagger maintainers
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spec
+
+import (
+ "encoding/json"
+
+ "github.com/go-openapi/swag"
+)
+
+// TagProps describe a tag entry in the top level tags section of a swagger spec
+type TagProps struct {
+ Description string `json:"description,omitempty"`
+ Name string `json:"name,omitempty"`
+ ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"`
+}
+
+// Tag allows adding meta data to a single tag that is used by the
+// [Operation Object](http://goo.gl/8us55a#operationObject).
+// It is not mandatory to have a Tag Object per tag used there.
+//
+// For more information: http://goo.gl/8us55a#tagObject
+type Tag struct {
+ VendorExtensible
+ TagProps
+}
+
+// MarshalJSON marshal this to JSON
+func (t Tag) MarshalJSON() ([]byte, error) {
+ b1, err := json.Marshal(t.TagProps)
+ if err != nil {
+ return nil, err
+ }
+ b2, err := json.Marshal(t.VendorExtensible)
+ if err != nil {
+ return nil, err
+ }
+ return swag.ConcatJSON(b1, b2), nil
+}
+
+// UnmarshalJSON marshal this from JSON
+func (t *Tag) UnmarshalJSON(data []byte) error {
+ if err := json.Unmarshal(data, &t.TagProps); err != nil {
+ return err
+ }
+ return json.Unmarshal(data, &t.VendorExtensible)
+}
diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go
index 0a55a844e..2cab2c580 100644
--- a/vendor/k8s.io/utils/pointer/pointer.go
+++ b/vendor/k8s.io/utils/pointer/pointer.go
@@ -46,86 +46,200 @@ func AllPtrFieldsNil(obj interface{}) bool {
return true
}
-// Int32Ptr returns a pointer to an int32
-func Int32Ptr(i int32) *int32 {
+// Int returns a pointer to an int
+func Int(i int) *int {
return &i
}
-// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil,
-// else returns def.
-func Int32PtrDerefOr(ptr *int32, def int32) int32 {
+var IntPtr = Int // for back-compat
+
+// IntDeref dereferences the int ptr and returns it if not nil, or else
+// returns def.
+func IntDeref(ptr *int, def int) int {
if ptr != nil {
return *ptr
}
return def
}
-// Int64Ptr returns a pointer to an int64
-func Int64Ptr(i int64) *int64 {
+var IntPtrDerefOr = IntDeref // for back-compat
+
+// Int32 returns a pointer to an int32.
+func Int32(i int32) *int32 {
return &i
}
-// Int64PtrDerefOr dereference the int64 ptr and returns it if not nil,
-// else returns def.
-func Int64PtrDerefOr(ptr *int64, def int64) int64 {
+var Int32Ptr = Int32 // for back-compat
+
+// Int32Deref dereferences the int32 ptr and returns it if not nil, or else
+// returns def.
+func Int32Deref(ptr *int32, def int32) int32 {
if ptr != nil {
return *ptr
}
return def
}
-// BoolPtr returns a pointer to a bool
-func BoolPtr(b bool) *bool {
+var Int32PtrDerefOr = Int32Deref // for back-compat
+
+// Int32Equal returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func Int32Equal(a, b *int32) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
+
+// Int64 returns a pointer to an int64.
+func Int64(i int64) *int64 {
+ return &i
+}
+
+var Int64Ptr = Int64 // for back-compat
+
+// Int64Deref dereferences the int64 ptr and returns it if not nil, or else
+// returns def.
+func Int64Deref(ptr *int64, def int64) int64 {
+ if ptr != nil {
+ return *ptr
+ }
+ return def
+}
+
+var Int64PtrDerefOr = Int64Deref // for back-compat
+
+// Int64Equal returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func Int64Equal(a, b *int64) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
+
+// Bool returns a pointer to a bool.
+func Bool(b bool) *bool {
return &b
}
-// BoolPtrDerefOr dereference the bool ptr and returns it if not nil,
-// else returns def.
-func BoolPtrDerefOr(ptr *bool, def bool) bool {
+var BoolPtr = Bool // for back-compat
+
+// BoolDeref dereferences the bool ptr and returns it if not nil, or else
+// returns def.
+func BoolDeref(ptr *bool, def bool) bool {
if ptr != nil {
return *ptr
}
return def
}
-// StringPtr returns a pointer to the passed string.
-func StringPtr(s string) *string {
+var BoolPtrDerefOr = BoolDeref // for back-compat
+
+// BoolEqual returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func BoolEqual(a, b *bool) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
+
+// String returns a pointer to a string.
+func String(s string) *string {
return &s
}
-// StringPtrDerefOr dereference the string ptr and returns it if not nil,
-// else returns def.
-func StringPtrDerefOr(ptr *string, def string) string {
+var StringPtr = String // for back-compat
+
+// StringDeref dereferences the string ptr and returns it if not nil, or else
+// returns def.
+func StringDeref(ptr *string, def string) string {
if ptr != nil {
return *ptr
}
return def
}
-// Float32Ptr returns a pointer to the passed float32.
-func Float32Ptr(i float32) *float32 {
+var StringPtrDerefOr = StringDeref // for back-compat
+
+// StringEqual returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func StringEqual(a, b *string) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
+
+// Float32 returns a pointer to the a float32.
+func Float32(i float32) *float32 {
return &i
}
-// Float32PtrDerefOr dereference the float32 ptr and returns it if not nil,
-// else returns def.
-func Float32PtrDerefOr(ptr *float32, def float32) float32 {
+var Float32Ptr = Float32
+
+// Float32Deref dereferences the float32 ptr and returns it if not nil, or else
+// returns def.
+func Float32Deref(ptr *float32, def float32) float32 {
if ptr != nil {
return *ptr
}
return def
}
-// Float64Ptr returns a pointer to the passed float64.
-func Float64Ptr(i float64) *float64 {
+var Float32PtrDerefOr = Float32Deref // for back-compat
+
+// Float32Equal returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func Float32Equal(a, b *float32) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
+
+// Float64 returns a pointer to the a float64.
+func Float64(i float64) *float64 {
return &i
}
-// Float64PtrDerefOr dereference the float64 ptr and returns it if not nil,
-// else returns def.
-func Float64PtrDerefOr(ptr *float64, def float64) float64 {
+var Float64Ptr = Float64
+
+// Float64Deref dereferences the float64 ptr and returns it if not nil, or else
+// returns def.
+func Float64Deref(ptr *float64, def float64) float64 {
if ptr != nil {
return *ptr
}
return def
}
+
+var Float64PtrDerefOr = Float64Deref // for back-compat
+
+// Float64Equal returns true if both arguments are nil or both arguments
+// dereference to the same value.
+func Float64Equal(a, b *float64) bool {
+ if (a == nil) != (b == nil) {
+ return false
+ }
+ if a == nil {
+ return true
+ }
+ return *a == *b
+}
diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go
index 2af4967ca..3023d1066 100644
--- a/vendor/k8s.io/utils/trace/trace.go
+++ b/vendor/k8s.io/utils/trace/trace.go
@@ -56,7 +56,7 @@ func writeTraceItemSummary(b *bytes.Buffer, msg string, totalTime time.Duration,
b.WriteString(" ")
}
- b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:00.000")))
+ b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:05.000")))
}
func durationToMilliseconds(timeDuration time.Duration) int64 {
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go
index 2cb09b105..043315d27 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go
@@ -22,6 +22,7 @@ import (
"fmt"
"regexp"
+ cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
corev1 "k8s.io/api/core/v1"
"knative.dev/pkg/apis"
"knative.dev/pkg/kmp"
@@ -193,6 +194,17 @@ func ValidateSubscriptionAPIFiltersList(ctx context.Context, filters []Subscript
return errs
}
+func ValidateCESQLExpression(ctx context.Context, expression string) (errs *apis.FieldError) {
+ if expression == "" {
+ return nil
+ }
+ _, err := cesqlparser.Parse(expression)
+ if err != nil {
+ return apis.ErrInvalidValue(expression, apis.CurrentField, err.Error())
+ }
+ return nil
+}
+
func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPIFilter) (errs *apis.FieldError) {
if filter == nil {
return nil
@@ -211,6 +223,8 @@ func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPI
ValidateSubscriptionAPIFiltersList(ctx, filter.Any).ViaField("any"),
).Also(
ValidateSubscriptionAPIFilter(ctx, filter.Not).ViaField("not"),
+ ).Also(
+ ValidateCESQLExpression(ctx, filter.SQL).ViaField("sql"),
)
return errs
}
diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/client.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/client.go
index fe2c5aa6a..41cc82eaf 100644
--- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/client.go
+++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/client.go
@@ -318,6 +318,13 @@ func (g *clientGenerator) GenerateType(c *generator.Context, t *types.Type, w io
resultType.Name.Name = e.ResultTypeOverride
}
}
+ // TODO: Total hacks to get this to run at all.
+ if v == "apply" {
+ inputType = *(c.Universe.Type(types.Name{
+ Package: "k8s.io/client-go/applyconfigurations/" + strings.ReplaceAll(inputType.Name.Package, "k8s.io/api/", ""),
+ Name: inputType.Name.Name + "ApplyConfiguration",
+ }))
+ }
opts["InputType"] = &inputType
opts["ResultType"] = &resultType
if e.IsSubresource() {
@@ -579,6 +586,11 @@ func (w *wrap{{.GroupGoName}}{{.Version}}{{ .Type.Name.Name }}Impl) ApplyStatus(
var extensionVerbs = sets.NewString( /* Populated from extensionVerbMap during init */ )
var extensionVerbMap = map[string]string{
+ "apply": `
+func (w *wrap{{.GroupGoName}}{{.Version}}{{ .Type.Name.Name }}Impl) Apply(ctx {{ .contextContext|raw }}, name string, in *{{ .InputType|raw }}, opts {{ .metav1ApplyOptions|raw }}) (*{{ .ResultType|raw }}, error) {
+ panic("NYI")
+}
+`,
"create": `
func (w *wrap{{.GroupGoName}}{{.Version}}{{ .Type.Name.Name }}Impl) Create(ctx {{ .contextContext|raw }}, {{ if .Subresource }}_ string, {{ end }}in *{{ .InputType|raw }}, opts {{ .metav1CreateOptions|raw }}) (*{{ .ResultType|raw }}, error) {
panic("NYI")
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 367e52575..1c5d9e3ce 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -8,6 +8,8 @@ contrib.go.opencensus.io/exporter/prometheus
github.com/PuerkitoBio/purell
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
github.com/PuerkitoBio/urlesc
+# github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211221011931-643d94fcab96
+github.com/antlr/antlr4/runtime/Go/antlr
# github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile
# github.com/blendle/zapdriver v1.3.1
@@ -21,7 +23,15 @@ github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1
github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1
# github.com/cespare/xxhash/v2 v2.1.1
github.com/cespare/xxhash/v2
-# github.com/cloudevents/sdk-go/v2 v2.7.0
+# github.com/cloudevents/sdk-go/sql/v2 v2.8.0
+github.com/cloudevents/sdk-go/sql/v2
+github.com/cloudevents/sdk-go/sql/v2/expression
+github.com/cloudevents/sdk-go/sql/v2/function
+github.com/cloudevents/sdk-go/sql/v2/gen
+github.com/cloudevents/sdk-go/sql/v2/parser
+github.com/cloudevents/sdk-go/sql/v2/runtime
+github.com/cloudevents/sdk-go/sql/v2/utils
+# github.com/cloudevents/sdk-go/v2 v2.8.0
github.com/cloudevents/sdk-go/v2
github.com/cloudevents/sdk-go/v2/binding
github.com/cloudevents/sdk-go/v2/binding/format
@@ -44,7 +54,7 @@ github.com/davecgh/go-spew/spew
## explicit
github.com/emicklei/go-restful
github.com/emicklei/go-restful/log
-# github.com/evanphx/json-patch v4.9.0+incompatible
+# github.com/evanphx/json-patch v4.11.0+incompatible
github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.6.0
github.com/evanphx/json-patch/v5
@@ -83,7 +93,7 @@ github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
github.com/golang/protobuf/ptypes/wrappers
-# github.com/google/btree v1.0.0
+# github.com/google/btree v1.0.1
github.com/google/btree
# github.com/google/go-cmp v0.5.6
## explicit
@@ -102,8 +112,7 @@ github.com/google/gofuzz/bytesource
github.com/google/shlex
# github.com/google/uuid v1.3.0
github.com/google/uuid
-# github.com/googleapis/gnostic v0.5.3
-## explicit
+# github.com/googleapis/gnostic v0.5.5
github.com/googleapis/gnostic/compiler
github.com/googleapis/gnostic/extensions
github.com/googleapis/gnostic/jsonschema
@@ -263,7 +272,7 @@ go.uber.org/zap/zapcore
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.0.0-20211205041911-012df41ee64c
+# golang.org/x/net v0.0.0-20211209124913-491a49abca63
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
golang.org/x/net/http/httpguts
@@ -438,7 +447,7 @@ gotest.tools/v3/internal/assert
gotest.tools/v3/internal/difflib
gotest.tools/v3/internal/format
gotest.tools/v3/internal/source
-# k8s.io/api v0.21.4
+# k8s.io/api v0.22.5
## explicit
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1beta1
@@ -483,14 +492,14 @@ k8s.io/api/scheduling/v1beta1
k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
-# k8s.io/apiextensions-apiserver v0.21.4
+# k8s.io/apiextensions-apiserver v0.22.5
## explicit
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1
k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1
-# k8s.io/apimachinery v0.21.4
+# k8s.io/apimachinery v0.22.5
## explicit
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@@ -547,7 +556,7 @@ k8s.io/apimachinery/third_party/forked/golang/reflect
k8s.io/cli-runtime/pkg/genericclioptions
k8s.io/cli-runtime/pkg/printers
k8s.io/cli-runtime/pkg/resource
-# k8s.io/client-go v0.21.4
+# k8s.io/client-go v0.22.5
## explicit
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1beta1
@@ -640,6 +649,8 @@ k8s.io/client-go/kubernetes/typed/storage/v1
k8s.io/client-go/kubernetes/typed/storage/v1alpha1
k8s.io/client-go/kubernetes/typed/storage/v1beta1
k8s.io/client-go/pkg/apis/clientauthentication
+k8s.io/client-go/pkg/apis/clientauthentication/install
+k8s.io/client-go/pkg/apis/clientauthentication/v1
k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1
k8s.io/client-go/pkg/apis/clientauthentication/v1beta1
k8s.io/client-go/pkg/version
@@ -673,7 +684,7 @@ k8s.io/client-go/util/jsonpath
k8s.io/client-go/util/keyutil
k8s.io/client-go/util/retry
k8s.io/client-go/util/workqueue
-# k8s.io/code-generator v0.21.4
+# k8s.io/code-generator v0.22.5
## explicit
k8s.io/code-generator
k8s.io/code-generator/cmd/client-gen
@@ -721,22 +732,22 @@ k8s.io/gengo/parser
k8s.io/gengo/types
# k8s.io/klog v1.0.0
k8s.io/klog
-# k8s.io/klog/v2 v2.8.0
+# k8s.io/klog/v2 v2.9.0
k8s.io/klog/v2
-# k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
+# k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c
k8s.io/kube-openapi/cmd/openapi-gen/args
k8s.io/kube-openapi/pkg/common
k8s.io/kube-openapi/pkg/generators
k8s.io/kube-openapi/pkg/generators/rules
k8s.io/kube-openapi/pkg/util/proto
k8s.io/kube-openapi/pkg/util/sets
-# k8s.io/utils v0.0.0-20210111153108-fddb29f9d009
-## explicit
+k8s.io/kube-openapi/pkg/validation/spec
+# k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a
k8s.io/utils/buffer
k8s.io/utils/integer
k8s.io/utils/pointer
k8s.io/utils/trace
-# knative.dev/eventing v0.28.1-0.20211222204918-d8297456d455
+# knative.dev/eventing v0.28.1-0.20220107145225-eb4c06c8009d
## explicit
knative.dev/eventing/pkg/apis/config
knative.dev/eventing/pkg/apis/duck
@@ -765,7 +776,7 @@ knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/fake
# knative.dev/hack v0.0.0-20211222071919-abd085fc43de
## explicit
knative.dev/hack
-# knative.dev/networking v0.0.0-20211223013028-62388a5f2853
+# knative.dev/networking v0.0.0-20220107020122-0dbedcd88acf
## explicit
knative.dev/networking/pkg
knative.dev/networking/pkg/apis/networking
@@ -773,7 +784,7 @@ knative.dev/networking/pkg/apis/networking/v1alpha1
knative.dev/networking/pkg/client/clientset/versioned
knative.dev/networking/pkg/client/clientset/versioned/scheme
knative.dev/networking/pkg/client/clientset/versioned/typed/networking/v1alpha1
-# knative.dev/pkg v0.0.0-20211216142117-79271798f696
+# knative.dev/pkg v0.0.0-20220105211333-96f18522d78d
## explicit
knative.dev/pkg/apis
knative.dev/pkg/apis/duck
@@ -823,7 +834,7 @@ knative.dev/pkg/tracing/config
knative.dev/pkg/tracing/propagation
knative.dev/pkg/tracing/propagation/tracecontextb3
knative.dev/pkg/tracker
-# knative.dev/serving v0.28.1-0.20211221064617-c69f92cdfce7
+# knative.dev/serving v0.28.1-0.20220107170125-03091748d279
## explicit
knative.dev/serving/pkg/apis/autoscaling
knative.dev/serving/pkg/apis/autoscaling/v1alpha1