Bump Kind to v0.24.0 which default node image is Kubernetes v1.31.0.
Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
This commit is contained in:
parent
2efe03fac9
commit
9aba233c47
8
go.mod
8
go.mod
|
@ -55,7 +55,7 @@ require (
|
|||
sigs.k8s.io/cluster-api v1.7.1
|
||||
sigs.k8s.io/controller-runtime v0.18.4
|
||||
sigs.k8s.io/custom-metrics-apiserver v1.30.0
|
||||
sigs.k8s.io/kind v0.22.0
|
||||
sigs.k8s.io/kind v0.24.0
|
||||
sigs.k8s.io/mcs-api v0.1.0
|
||||
sigs.k8s.io/metrics-server v0.7.1
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1
|
||||
|
@ -64,10 +64,10 @@ require (
|
|||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/BurntSushi/toml v1.0.0 // indirect
|
||||
github.com/BurntSushi/toml v1.4.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/NYTimes/gziphandler v1.1.1 // indirect
|
||||
github.com/alessio/shellescape v1.4.1 // indirect
|
||||
github.com/alessio/shellescape v1.4.2 // indirect
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
|
@ -129,7 +129,7 @@ require (
|
|||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
|
|
20
go.sum
20
go.sum
|
@ -65,8 +65,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB
|
|||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0=
|
||||
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
|
@ -88,8 +88,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
|
||||
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
|
||||
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
|
||||
github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0=
|
||||
github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18=
|
||||
|
@ -212,7 +212,6 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
|
|||
github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI=
|
||||
github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg=
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
|
||||
|
@ -590,7 +589,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
|
|||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
|
@ -619,8 +617,9 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
|
|||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4=
|
||||
github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
|
@ -721,7 +720,6 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
|
|||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g=
|
||||
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
|
||||
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
|
@ -1381,7 +1379,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
|
@ -1506,8 +1503,8 @@ sigs.k8s.io/custom-metrics-apiserver v1.30.0/go.mod h1:QXOKIL83M545uITzoZn4OC1C7
|
|||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4=
|
||||
sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI=
|
||||
sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs=
|
||||
sigs.k8s.io/kind v0.24.0 h1:g4y4eu0qa+SCeKESLpESgMmVFBebL0BDa6f777OIWrg=
|
||||
sigs.k8s.io/kind v0.24.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw=
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0=
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY=
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U=
|
||||
|
@ -1522,6 +1519,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s
|
|||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
|
|
@ -79,7 +79,7 @@ util::verify_go_version
|
|||
util::verify_docker
|
||||
|
||||
# install kind and kubectl
|
||||
kind_version=v0.22.0
|
||||
kind_version=v0.24.0
|
||||
echo -n "Preparing: 'kind' existence check - "
|
||||
if util::cmd_exist kind; then
|
||||
echo "passed"
|
||||
|
|
|
@ -39,7 +39,7 @@ KARMADA_GO_PACKAGE="github.com/karmada-io/karmada"
|
|||
|
||||
MIN_Go_VERSION=go1.22.6
|
||||
|
||||
DEFAULT_CLUSTER_VERSION="kindest/node:v1.27.3"
|
||||
DEFAULT_CLUSTER_VERSION="kindest/node:v1.31.0"
|
||||
|
||||
KARMADA_TARGET_SOURCE=(
|
||||
karmada-aggregated-apiserver=cmd/aggregated-apiserver
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
toml.test
|
||||
/toml.test
|
||||
/toml-test
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
|
@ -1,6 +1,5 @@
|
|||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages.
|
||||
reflection interface similar to Go's standard library `json` and `xml` packages.
|
||||
|
||||
Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
|
||||
|
||||
|
@ -10,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a
|
|||
changelog; this information is also in the git tag annotations (e.g. `git show
|
||||
v0.4.0`).
|
||||
|
||||
This library requires Go 1.13 or newer; install it with:
|
||||
This library requires Go 1.18 or newer; add it to your go.mod with:
|
||||
|
||||
% go get github.com/BurntSushi/toml@latest
|
||||
|
||||
|
@ -19,16 +18,7 @@ It also comes with a TOML validator CLI tool:
|
|||
% go install github.com/BurntSushi/toml/cmd/tomlv@latest
|
||||
% tomlv some-toml-file.toml
|
||||
|
||||
### Testing
|
||||
This package passes all tests in [toml-test] for both the decoder and the
|
||||
encoder.
|
||||
|
||||
[toml-test]: https://github.com/BurntSushi/toml-test
|
||||
|
||||
### Examples
|
||||
This package works similar to how the Go standard library handles XML and JSON.
|
||||
Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys and
|
||||
values:
|
||||
|
||||
|
@ -40,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ]
|
|||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
|
@ -48,20 +38,15 @@ type Config struct {
|
|||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
DOB time.Time
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
err := toml.Decode(tomlData, &conf)
|
||||
// handle error
|
||||
_, err := toml.Decode(tomlData, &conf)
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML key
|
||||
value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
|
@ -73,139 +58,63 @@ type TOML struct {
|
|||
}
|
||||
```
|
||||
|
||||
Beware that like other most other decoders **only exported fields** are
|
||||
considered when encoding and decoding; private fields are silently ignored.
|
||||
Beware that like other decoders **only exported fields** are considered when
|
||||
encoding and decoding; private fields are silently ignored.
|
||||
|
||||
### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
Here's an example that automatically parses values in a `mail.Address`:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
Can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
// Create address type which satisfies the encoding.TextUnmarshaler interface.
|
||||
type address struct {
|
||||
*mail.Address
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
func (a *address) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
a.Address, err = mail.ParseAddress(string(text))
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode it.
|
||||
func decode() {
|
||||
blob := `
|
||||
contacts = [
|
||||
"Donald Duck <donald@duckburg.com>",
|
||||
"Scrooge McDuck <scrooge@duckburg.com>",
|
||||
]
|
||||
`
|
||||
|
||||
var contacts struct {
|
||||
Contacts []address
|
||||
}
|
||||
|
||||
_, err := toml.Decode(blob, &contacts)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, c := range contacts.Contacts {
|
||||
fmt.Printf("%#v\n", c.Address)
|
||||
}
|
||||
|
||||
// Output:
|
||||
// &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"}
|
||||
// &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"}
|
||||
}
|
||||
```
|
||||
|
||||
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
|
||||
a similar way.
|
||||
|
||||
### More complex usage
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_example/example.{go,toml}`.
|
||||
See the [`_example/`](/_example) directory for a more complex example.
|
||||
|
|
|
@ -1,32 +1,66 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"math"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
UnmarshalTOML(any) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
// Unmarshal decodes the contents of data in TOML format into a pointer v.
|
||||
//
|
||||
// See [Decoder] for a description of the decoding process.
|
||||
func Unmarshal(data []byte, v any) error {
|
||||
_, err := NewDecoder(bytes.NewReader(data)).Decode(v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
//
|
||||
// See [Decoder] for a description of the decoding process.
|
||||
func Decode(data string, v any) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile reads the contents of a file and decodes it with [Decode].
|
||||
func DecodeFile(path string, v any) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFS reads the contents of a file from [fs.FS] and decodes it with
|
||||
// [Decode].
|
||||
func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
//
|
||||
// This type can be used for any value, which will cause decoding to be delayed.
|
||||
// You can use the PrimitiveDecode() function to "manually" decode these values.
|
||||
// You can use [PrimitiveDecode] to "manually" decode these values.
|
||||
//
|
||||
// NOTE: The underlying representation of a `Primitive` value is subject to
|
||||
// change. Do not rely on it.
|
||||
|
@ -35,43 +69,29 @@ func Unmarshal(p []byte, v interface{}) error {
|
|||
// overhead of reflection. They can be useful when you don't know the exact type
|
||||
// of TOML data until runtime.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
undecoded any
|
||||
context Key
|
||||
}
|
||||
|
||||
// The significand precision for float32 and float64 is 24 and 53 bits; this is
|
||||
// the range a natural number can be stored in a float without loss of data.
|
||||
const (
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = 9007199254740991 // 2^53-1
|
||||
maxSafeFloat32Int = 16777215 // 2^24-1
|
||||
maxSafeFloat64Int = int64(9007199254740991) // 2^53-1
|
||||
)
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decoder decodes TOML data.
|
||||
//
|
||||
// TOML tables correspond to Go structs or maps (dealer's choice – they can be
|
||||
// used interchangeably).
|
||||
// TOML tables correspond to Go structs or maps; they can be used
|
||||
// interchangeably, but structs offer better type safety.
|
||||
//
|
||||
// TOML table arrays correspond to either a slice of structs or a slice of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
|
||||
// in the local timezone.
|
||||
// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the
|
||||
// local timezone.
|
||||
//
|
||||
// [time.Duration] types are treated as nanoseconds if the TOML value is an
|
||||
// integer, or they're parsed with time.ParseDuration() if they're strings.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond to the
|
||||
// obvious Go types.
|
||||
|
@ -80,9 +100,9 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|||
// interface, in which case any primitive TOML value (floats, strings, integers,
|
||||
// booleans, datetimes) will be converted to a []byte and given to the value's
|
||||
// UnmarshalText method. See the Unmarshaler example for a demonstration with
|
||||
// time duration strings.
|
||||
// email addresses.
|
||||
//
|
||||
// Key mapping
|
||||
// # Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go struct.
|
||||
// The special `toml` struct tag can be used to map TOML keys to struct fields
|
||||
|
@ -109,10 +129,11 @@ func NewDecoder(r io.Reader) *Decoder {
|
|||
var (
|
||||
unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
||||
unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
|
||||
primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Decode TOML data in to the pointer `v`.
|
||||
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
||||
func (dec *Decoder) Decode(v any) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
s := "%q"
|
||||
|
@ -120,25 +141,25 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
|||
s = "%v"
|
||||
}
|
||||
|
||||
return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v))
|
||||
}
|
||||
|
||||
// Check if this is a supported type: struct, map, interface{}, or something
|
||||
// that implements UnmarshalTOML or UnmarshalText.
|
||||
// Check if this is a supported type: struct, map, any, or something that
|
||||
// implements UnmarshalTOML or UnmarshalText.
|
||||
rv = indirect(rv)
|
||||
rt := rv.Type()
|
||||
if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map &&
|
||||
!(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) &&
|
||||
!rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) {
|
||||
return MetaData{}, e("cannot decode to type %s", rt)
|
||||
return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt)
|
||||
}
|
||||
|
||||
// TODO: parser should read from io.Reader? Or at the very least, make it
|
||||
// read from []byte rather than string
|
||||
data, err := ioutil.ReadAll(dec.r)
|
||||
data, err := io.ReadAll(dec.r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
|
@ -150,30 +171,29 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
|
|||
|
||||
md := MetaData{
|
||||
mapping: p.mapping,
|
||||
types: p.types,
|
||||
keyInfo: p.keyInfo,
|
||||
keys: p.ordered,
|
||||
decoded: make(map[string]struct{}, len(p.ordered)),
|
||||
context: nil,
|
||||
data: data,
|
||||
}
|
||||
return md, md.unify(p.mapping, rv)
|
||||
}
|
||||
|
||||
// Decode the TOML data in to the pointer v.
|
||||
// PrimitiveDecode is just like the other Decode* functions, except it decodes a
|
||||
// TOML value that has already been parsed. Valid primitive values can *only* be
|
||||
// obtained from values filled by the decoder functions, including this method.
|
||||
// (i.e., v may contain more [Primitive] values.)
|
||||
//
|
||||
// See the documentation on Decoder for a description of the decoding process.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
return NewDecoder(strings.NewReader(data)).Decode(v)
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at path and decode it for you.
|
||||
func DecodeFile(path string, v interface{}) (MetaData, error) {
|
||||
fp, err := os.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
// Meta data for primitive values is included in the meta data returned by the
|
||||
// Decode* functions with one exception: keys returned by the Undecoded method
|
||||
// will only reflect keys that were decoded. Namely, any keys hidden behind a
|
||||
// Primitive will be considered undecoded. Executing this method will update the
|
||||
// undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
|
@ -181,10 +201,10 @@ func DecodeFile(path string, v interface{}) (MetaData, error) {
|
|||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unify(data any, rv reflect.Value) error {
|
||||
// Special case. Look for a `Primitive` value.
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
if rv.Type() == primitiveType {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
|
@ -196,17 +216,18 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
rvi := rv.Interface()
|
||||
if v, ok := rvi.(Unmarshaler); ok {
|
||||
err := v.UnmarshalTOML(data)
|
||||
if err != nil {
|
||||
return md.parseErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
if v, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
|
||||
|
@ -217,19 +238,10 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
|
@ -243,25 +255,23 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
if rv.NumMethod() > 0 { /// Only empty interfaces are supported.
|
||||
return md.e("unsupported type %s", rv.Type())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
return md.e("unsupported type %s", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]any)
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping))
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
|
@ -286,27 +296,28 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
|||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = struct{}{}
|
||||
md.context = append(md.context, key)
|
||||
|
||||
err := md.unify(datum, subv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
return e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
if k := rv.Type().Key().Kind(); k != reflect.String {
|
||||
return fmt.Errorf(
|
||||
"toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
k, rv.Type())
|
||||
func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error {
|
||||
keyType := rv.Type().Key().Kind()
|
||||
if keyType != reflect.String && keyType != reflect.Interface {
|
||||
return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)",
|
||||
keyType, rv.Type())
|
||||
}
|
||||
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
tmap, ok := mapping.(map[string]any)
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
|
@ -321,19 +332,28 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
|||
md.context = append(md.context, k)
|
||||
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
|
||||
err := md.unify(v, indirect(rvval))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvkey.SetString(k)
|
||||
|
||||
switch keyType {
|
||||
case reflect.Interface:
|
||||
rvkey.Set(reflect.ValueOf(k))
|
||||
case reflect.String:
|
||||
rvkey.SetString(k)
|
||||
}
|
||||
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyArray(data any, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
|
@ -342,12 +362,12 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
|||
return md.badtype("slice", data)
|
||||
}
|
||||
if l := datav.Len(); l != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifySlice(data any, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
|
@ -374,7 +394,19 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyString(data any, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(json.Number)
|
||||
if ok {
|
||||
if i, ok := data.(int64); ok {
|
||||
rv.SetString(strconv.FormatInt(i, 10))
|
||||
} else if f, ok := data.(float64); ok {
|
||||
rv.SetString(strconv.FormatFloat(f, 'f', -1, 64))
|
||||
} else {
|
||||
return md.badtype("string", data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
|
@ -382,12 +414,14 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
|||
return md.badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error {
|
||||
rvk := rv.Kind()
|
||||
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
switch rvk {
|
||||
case reflect.Float32:
|
||||
if num < -math.MaxFloat32 || num > math.MaxFloat32 {
|
||||
return e("value %f is out of range for float32", num)
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
|
@ -399,74 +433,61 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
|||
}
|
||||
|
||||
if num, ok := data.(int64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
if num < -maxSafeFloat32Int || num > maxSafeFloat32Int {
|
||||
return e("value %d is out of range for float32", num)
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
if num < -maxSafeFloat64Int || num > maxSafeFloat64Int {
|
||||
return e("value %d is out of range for float64", num)
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
default:
|
||||
panic("bug")
|
||||
if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) ||
|
||||
(rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) {
|
||||
return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetFloat(float64(num))
|
||||
return nil
|
||||
}
|
||||
|
||||
return md.badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
}
|
||||
func (md *MetaData) unifyInt(data any, rv reflect.Value) error {
|
||||
_, ok := rv.Interface().(time.Duration)
|
||||
if ok {
|
||||
// Parse as string duration, and fall back to regular integer parsing
|
||||
// (as nanosecond) if this is not a string.
|
||||
if s, ok := data.(string); ok {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
return md.parseErr(errParseDuration{s})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
rv.SetInt(int64(dur))
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return md.badtype("integer", data)
|
||||
|
||||
num, ok := data.(int64)
|
||||
if !ok {
|
||||
return md.badtype("integer", data)
|
||||
}
|
||||
|
||||
rvk := rv.Kind()
|
||||
switch {
|
||||
case rvk >= reflect.Int && rvk <= reflect.Int64:
|
||||
if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) ||
|
||||
(rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) ||
|
||||
(rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetInt(num)
|
||||
case rvk >= reflect.Uint && rvk <= reflect.Uint64:
|
||||
unum := uint64(num)
|
||||
if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) ||
|
||||
rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) ||
|
||||
rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) {
|
||||
return md.parseErr(errParseRange{i: num, size: rvk.String()})
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
default:
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyBool(data any, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
|
@ -474,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
|||
return md.badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
func (md *MetaData) unifyAnything(data any, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
|
||||
func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case Marshaler:
|
||||
|
@ -488,7 +509,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
|||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case TextMarshaler:
|
||||
case encoding.TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -508,17 +529,40 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro
|
|||
return md.badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
return md.parseErr(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) badtype(dst string, data interface{}) error {
|
||||
return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst)
|
||||
func (md *MetaData) badtype(dst string, data any) error {
|
||||
return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst)
|
||||
}
|
||||
|
||||
func (md *MetaData) parseErr(err error) error {
|
||||
k := md.context.String()
|
||||
return ParseError{
|
||||
LastKey: k,
|
||||
Position: md.keyInfo[k].pos,
|
||||
Line: md.keyInfo[k].pos.Line,
|
||||
err: err,
|
||||
input: string(md.data),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *MetaData) e(format string, args ...any) error {
|
||||
f := "toml: "
|
||||
if len(md.context) > 0 {
|
||||
f = fmt.Sprintf("toml: (last key %q): ", md.context)
|
||||
p := md.keyInfo[md.context.String()].pos
|
||||
if p.Line > 0 {
|
||||
f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf(f+format, args...)
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
func rvalue(v any) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
|
@ -533,7 +577,11 @@ func indirect(v reflect.Value) reflect.Value {
|
|||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
pvi := pv.Interface()
|
||||
if _, ok := pvi.(encoding.TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
if _, ok := pvi.(Unmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
|
@ -549,12 +597,17 @@ func isUnifiable(rv reflect.Value) bool {
|
|||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
|
||||
rvi := rv.Interface()
|
||||
if _, ok := rvi.(encoding.TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rvi.(Unmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
// fmt %T with "interface {}" replaced with "any", which is far more readable.
|
||||
func fmtType(t any) string {
|
||||
return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any")
|
||||
}
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package toml
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
)
|
||||
|
||||
// DecodeFS is just like Decode, except it will automatically read the contents
|
||||
// of the file at `path` from a fs.FS instance.
|
||||
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
|
||||
fp, err := fsys.Open(path)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
defer fp.Close()
|
||||
return NewDecoder(fp).Decode(v)
|
||||
}
|
|
@ -5,17 +5,25 @@ import (
|
|||
"io"
|
||||
)
|
||||
|
||||
// TextMarshaler is an alias for encoding.TextMarshaler.
|
||||
//
|
||||
// Deprecated: use encoding.TextMarshaler
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is an alias for encoding.TextUnmarshaler.
|
||||
//
|
||||
// Deprecated: use encoding.TextUnmarshaler
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
||||
|
||||
// DecodeReader is an alias for NewDecoder(r).Decode(v).
|
||||
//
|
||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||
func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
|
||||
// PrimitiveDecode is an alias for MetaData.PrimitiveDecode().
|
||||
//
|
||||
// Deprecated: use MetaData.PrimitiveDecode.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
func PrimitiveDecode(primValue Primitive, v any) error {
|
||||
md := MetaData{decoded: make(map[string]struct{})}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Deprecated: use NewDecoder(reader).Decode(&value).
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) }
|
||||
|
|
|
@ -1,13 +1,8 @@
|
|||
/*
|
||||
Package toml implements decoding and encoding of TOML files.
|
||||
|
||||
This package supports TOML v1.0.0, as listed on https://toml.io
|
||||
|
||||
There is also support for delaying decoding with the Primitive type, and
|
||||
querying the set of keys in a TOML document with the MetaData type.
|
||||
|
||||
The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||
and can be used to verify if TOML document is valid. It can also be used to
|
||||
print the type of each key.
|
||||
*/
|
||||
// Package toml implements decoding and encoding of TOML files.
|
||||
//
|
||||
// This package supports TOML v1.0.0, as specified at https://toml.io
|
||||
//
|
||||
// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
|
||||
// and can be used to verify if TOML document is valid. It can also be used to
|
||||
// print the type of each key.
|
||||
package toml
|
||||
|
|
|
@ -2,7 +2,9 @@ package toml
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -63,18 +65,38 @@ var dblQuotedReplacer = strings.NewReplacer(
|
|||
"\x7f", `\u007f`,
|
||||
)
|
||||
|
||||
var (
|
||||
marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
|
||||
timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Marshaler is the interface implemented by types that can marshal themselves
|
||||
// into valid TOML.
|
||||
type Marshaler interface {
|
||||
MarshalTOML() ([]byte, error)
|
||||
}
|
||||
|
||||
// Marshal returns a TOML representation of the Go value.
|
||||
//
|
||||
// See [Encoder] for a description of the encoding process.
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
buff := new(bytes.Buffer)
|
||||
if err := NewEncoder(buff).Encode(v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buff.Bytes(), nil
|
||||
}
|
||||
|
||||
// Encoder encodes a Go to a TOML document.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same as
|
||||
// for the Decode* functions.
|
||||
// for [Decode].
|
||||
//
|
||||
// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to
|
||||
// time.Time is encoded as a RFC 3339 string, and time.Duration as its string
|
||||
// representation.
|
||||
//
|
||||
// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to
|
||||
// encoding the value as custom TOML.
|
||||
//
|
||||
// If you want to write arbitrary binary data then you will need to use
|
||||
|
@ -85,6 +107,17 @@ type Marshaler interface {
|
|||
//
|
||||
// Go maps will be sorted alphabetically by key for deterministic output.
|
||||
//
|
||||
// The toml struct tag can be used to provide the key name; if omitted the
|
||||
// struct field name will be used. If the "omitempty" option is present the
|
||||
// following value will be skipped:
|
||||
//
|
||||
// - arrays, slices, maps, and string with len of 0
|
||||
// - struct with all zero values
|
||||
// - bool false
|
||||
//
|
||||
// If omitzero is given all int and float types with a value of 0 will be
|
||||
// skipped.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation will return an
|
||||
// error. Examples of this includes maps with non-string keys, slices with nil
|
||||
// elements, embedded non-struct types, and nested slices containing maps or
|
||||
|
@ -94,28 +127,24 @@ type Marshaler interface {
|
|||
// NOTE: only exported keys are encoded due to the use of reflection. Unexported
|
||||
// keys are silently discarded.
|
||||
type Encoder struct {
|
||||
// String to use for a single indentation level; default is two spaces.
|
||||
Indent string
|
||||
|
||||
Indent string // string for a single indentation level; default is two spaces.
|
||||
hasWritten bool // written any output to w yet?
|
||||
w *bufio.Writer
|
||||
hasWritten bool // written any output to w yet?
|
||||
}
|
||||
|
||||
// NewEncoder create a new Encoder.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
return &Encoder{w: bufio.NewWriter(w), Indent: " "}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the Encoder's writer.
|
||||
// Encode writes a TOML representation of the Go value to the [Encoder]'s writer.
|
||||
//
|
||||
// An error is returned if the value given cannot be encoded to a valid TOML
|
||||
// document.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
func (enc *Encoder) Encode(v any) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
err := enc.safeEncode(Key([]string{}), rv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
|
@ -136,18 +165,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
|||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case: time needs to be in ISO8601 format.
|
||||
//
|
||||
// Special case: if we can marshal the type to text, then we used that. This
|
||||
// prevents the encoder for handling these types as generic structs (or
|
||||
// whatever the underlying type of a TextMarshaler is).
|
||||
switch t := rv.Interface().(type) {
|
||||
case time.Time, encoding.TextMarshaler, Marshaler:
|
||||
// If we can marshal the type to text, then we use that. This prevents the
|
||||
// encoder for handling these types as generic structs (or whatever the
|
||||
// underlying type of a TextMarshaler is).
|
||||
switch {
|
||||
case isMarshaler(rv):
|
||||
enc.writeKeyValue(key, rv, false)
|
||||
return
|
||||
// TODO: #76 would make this superfluous after implemented.
|
||||
case Primitive:
|
||||
enc.encode(key, reflect.ValueOf(t.undecoded))
|
||||
case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented.
|
||||
enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -212,18 +238,44 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
|||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalTOML returned nil and no error"))
|
||||
}
|
||||
enc.w.Write(s)
|
||||
return
|
||||
case encoding.TextMarshaler:
|
||||
s, err := v.MarshalText()
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
if s == nil {
|
||||
encPanic(errors.New("MarshalText returned nil and no error"))
|
||||
}
|
||||
enc.writeQuoted(string(s))
|
||||
return
|
||||
case time.Duration:
|
||||
enc.writeQuoted(v.String())
|
||||
return
|
||||
case json.Number:
|
||||
n, _ := rv.Interface().(json.Number)
|
||||
|
||||
if n == "" { /// Useful zero value.
|
||||
enc.w.WriteByte('0')
|
||||
return
|
||||
} else if v, err := n.Int64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
} else if v, err := n.Float64(); err == nil {
|
||||
enc.eElement(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n))
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
enc.eElement(rv.Elem())
|
||||
return
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
case reflect.Bool:
|
||||
|
@ -235,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
|||
case reflect.Float32:
|
||||
f := rv.Float()
|
||||
if math.IsNaN(f) {
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("nan")
|
||||
} else if math.IsInf(f, 0) {
|
||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("inf")
|
||||
} else {
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
|
||||
}
|
||||
case reflect.Float64:
|
||||
f := rv.Float()
|
||||
if math.IsNaN(f) {
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("nan")
|
||||
} else if math.IsInf(f, 0) {
|
||||
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
|
||||
if math.Signbit(f) {
|
||||
enc.wf("-")
|
||||
}
|
||||
enc.wf("inf")
|
||||
} else {
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
|
||||
}
|
||||
|
@ -259,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
|||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
default:
|
||||
encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
|
||||
encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface())))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -280,7 +344,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
|||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
elem := eindirect(rv.Index(i))
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
|
@ -294,7 +358,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
|||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
trv := eindirect(rv.Index(i))
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
|
@ -319,7 +383,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
|||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
switch rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv, inline)
|
||||
case reflect.Struct:
|
||||
|
@ -341,7 +405,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
|
@ -351,7 +415,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
var writeMapKeys = func(mapKeys []string, trailC bool) {
|
||||
sort.Strings(mapKeys)
|
||||
for i, mapKey := range mapKeys {
|
||||
val := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey)))
|
||||
if isNil(val) {
|
||||
continue
|
||||
}
|
||||
|
@ -379,6 +443,13 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
|
|||
|
||||
const is32Bit = (32 << (^uint(0) >> 63)) == 32
|
||||
|
||||
func pointerTo(t reflect.Type) reflect.Type {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
return pointerTo(t.Elem())
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table then all keys under it will be in that
|
||||
|
@ -395,48 +466,42 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
|
||||
isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct
|
||||
if f.PkgPath != "" && !isEmbed { /// Skip unexported fields.
|
||||
continue
|
||||
}
|
||||
opts := getOptions(f.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
|
||||
frv := rv.Field(i)
|
||||
frv := eindirect(rv.Field(i))
|
||||
|
||||
if is32Bit {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
start = copyStart
|
||||
}
|
||||
|
||||
// Treat anonymous struct fields with tag names as though they are
|
||||
// not anonymous, like encoding/json does.
|
||||
//
|
||||
// Non-struct anonymous fields use the normal encoding logic.
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if isEmbed {
|
||||
if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct {
|
||||
addFields(frv.Type(), frv, append(start, f.Index...))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsTable(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
// Copy so it works correct on 32bit archs; not clear why this
|
||||
// is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4
|
||||
// This also works fine on 64bit, but 32bit archs are somewhat
|
||||
// rare and this is a wee bit faster.
|
||||
if is32Bit {
|
||||
copyStart := make([]int, len(start))
|
||||
copy(copyStart, start)
|
||||
fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -447,21 +512,25 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
|
|||
fieldType := rt.FieldByIndex(fieldIndex)
|
||||
fieldVal := rv.FieldByIndex(fieldIndex)
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(fieldType.Tag)
|
||||
if opts.skip {
|
||||
continue
|
||||
}
|
||||
if opts.omitempty && isEmpty(fieldVal) {
|
||||
continue
|
||||
}
|
||||
|
||||
fieldVal = eindirect(fieldVal)
|
||||
|
||||
if isNil(fieldVal) { /// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
keyName := fieldType.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
}
|
||||
if opts.omitempty && isEmpty(fieldVal) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts.omitzero && isZero(fieldVal) {
|
||||
continue
|
||||
}
|
||||
|
@ -498,6 +567,21 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
|||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Struct {
|
||||
if rv.Type() == timeType {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
}
|
||||
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
|
@ -509,7 +593,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
|||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
if isTableArray(rv) {
|
||||
return tomlArrayHash
|
||||
}
|
||||
return tomlArray
|
||||
|
@ -519,67 +603,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
|||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
if _, ok := rv.Interface().(time.Time); ok {
|
||||
return tomlDatetime
|
||||
}
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
return tomlHash
|
||||
default:
|
||||
if isMarshaler(rv) {
|
||||
return tomlString
|
||||
}
|
||||
|
||||
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
func isMarshaler(rv reflect.Value) bool {
|
||||
switch rv.Interface().(type) {
|
||||
case encoding.TextMarshaler:
|
||||
return true
|
||||
case Marshaler:
|
||||
return true
|
||||
}
|
||||
|
||||
// Someone used a pointer receiver: we can make it work for pointer values.
|
||||
if rv.CanAddr() {
|
||||
if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Addr().Interface().(Marshaler); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml)
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
// isTableArray reports if all entries in the array or slice are a table.
|
||||
func isTableArray(arr reflect.Value) bool {
|
||||
if isNil(arr) || !arr.IsValid() || arr.Len() == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
/// Don't allow nil.
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
if tomlTypeOfGo(rv.Index(i)) == nil {
|
||||
ret := true
|
||||
for i := 0; i < arr.Len(); i++ {
|
||||
tt := tomlTypeOfGo(eindirect(arr.Index(i)))
|
||||
// Don't allow nil.
|
||||
if tt == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
}
|
||||
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
if ret && !typeEqual(tomlHash, tt) {
|
||||
ret = false
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
return ret
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
|
@ -624,8 +676,26 @@ func isEmpty(rv reflect.Value) bool {
|
|||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Struct:
|
||||
if rv.Type().Comparable() {
|
||||
return reflect.Zero(rv.Type()).Interface() == rv.Interface()
|
||||
}
|
||||
// Need to also check if all the fields are empty, otherwise something
|
||||
// like this with uncomparable types will always return true:
|
||||
//
|
||||
// type a struct{ field b }
|
||||
// type b struct{ s []string }
|
||||
// s := a{field: b{s: []string{"AAA"}}}
|
||||
for i := 0; i < rv.NumField(); i++ {
|
||||
if !isEmpty(rv.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
case reflect.Ptr:
|
||||
return rv.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -638,19 +708,21 @@ func (enc *Encoder) newline() {
|
|||
|
||||
// Write a key/value pair:
|
||||
//
|
||||
// key = <any value>
|
||||
// key = <any value>
|
||||
//
|
||||
// This is also used for "k = v" in inline tables; so something like this will
|
||||
// be written in three calls:
|
||||
//
|
||||
// ┌────────────────────┐
|
||||
// │ ┌───┐ ┌─────┐│
|
||||
// v v v v vv
|
||||
// key = {k = v, k2 = v2}
|
||||
//
|
||||
// ┌───────────────────┐
|
||||
// │ ┌───┐ ┌────┐│
|
||||
// v v v v vv
|
||||
// key = {k = 1, k2 = 2}
|
||||
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
||||
/// Marshaler used on top-level document; call eElement() to just call
|
||||
/// Marshal{TOML,Text}.
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
enc.eElement(val)
|
||||
return
|
||||
}
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
|
@ -659,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
|
|||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
func (enc *Encoder) wf(format string, v ...any) {
|
||||
_, err := fmt.Fprintf(enc.w, format, v...)
|
||||
if err != nil {
|
||||
encPanic(err)
|
||||
|
@ -675,13 +747,25 @@ func encPanic(err error) {
|
|||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
// Resolve any level of pointers to the actual value (e.g. **string → string).
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface {
|
||||
if isMarshaler(v) {
|
||||
return v
|
||||
}
|
||||
if v.CanAddr() { /// Special case for marshalers; see #358.
|
||||
if pv := v.Addr(); isMarshaler(pv) {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
if v.IsNil() {
|
||||
return v
|
||||
}
|
||||
|
||||
return eindirect(v.Elem())
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
|
|
|
@ -5,57 +5,60 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// ParseError is returned when there is an error parsing the TOML syntax.
|
||||
//
|
||||
// For example invalid syntax, duplicate keys, etc.
|
||||
// ParseError is returned when there is an error parsing the TOML syntax such as
|
||||
// invalid syntax, duplicate keys, etc.
|
||||
//
|
||||
// In addition to the error message itself, you can also print detailed location
|
||||
// information with context by using ErrorWithLocation():
|
||||
// information with context by using [ErrorWithPosition]:
|
||||
//
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
// toml: error: Key 'fruit' was already created and cannot be used as an array.
|
||||
//
|
||||
// At line 4, column 2-7:
|
||||
// At line 4, column 2-7:
|
||||
//
|
||||
// 2 | fruit = []
|
||||
// 3 |
|
||||
// 4 | [[fruit]] # Not allowed
|
||||
// ^^^^^
|
||||
// 2 | fruit = []
|
||||
// 3 |
|
||||
// 4 | [[fruit]] # Not allowed
|
||||
// ^^^^^
|
||||
//
|
||||
// Furthermore, the ErrorWithUsage() can be used to print the above with some
|
||||
// more detailed usage guidance:
|
||||
// [ErrorWithUsage] can be used to print the above with some more detailed usage
|
||||
// guidance:
|
||||
//
|
||||
// toml: error: newlines not allowed within inline tables
|
||||
// toml: error: newlines not allowed within inline tables
|
||||
//
|
||||
// At line 1, column 18:
|
||||
// At line 1, column 18:
|
||||
//
|
||||
// 1 | x = [{ key = 42 #
|
||||
// ^
|
||||
// 1 | x = [{ key = 42 #
|
||||
// ^
|
||||
//
|
||||
// Error help:
|
||||
// Error help:
|
||||
//
|
||||
// Inline tables must always be on a single line:
|
||||
// Inline tables must always be on a single line:
|
||||
//
|
||||
// table = {key = 42, second = 43}
|
||||
// table = {key = 42, second = 43}
|
||||
//
|
||||
// It is invalid to split them over multiple lines like so:
|
||||
// It is invalid to split them over multiple lines like so:
|
||||
//
|
||||
// # INVALID
|
||||
// table = {
|
||||
// key = 42,
|
||||
// second = 43
|
||||
// }
|
||||
// # INVALID
|
||||
// table = {
|
||||
// key = 42,
|
||||
// second = 43
|
||||
// }
|
||||
//
|
||||
// Use regular for this:
|
||||
// Use regular for this:
|
||||
//
|
||||
// [table]
|
||||
// key = 42
|
||||
// second = 43
|
||||
// [table]
|
||||
// key = 42
|
||||
// second = 43
|
||||
type ParseError struct {
|
||||
Message string // Short technical message.
|
||||
Usage string // Longer message with usage guidance; may be blank.
|
||||
Position Position // Position of the error
|
||||
LastKey string // Last parsed key, may be blank.
|
||||
Line int // Line the error occurred. Deprecated: use Position.
|
||||
|
||||
// Line the error occurred.
|
||||
//
|
||||
// Deprecated: use [Position].
|
||||
Line int
|
||||
|
||||
err error
|
||||
input string
|
||||
|
@ -81,9 +84,9 @@ func (pe ParseError) Error() string {
|
|||
pe.Position.Line, pe.LastKey, msg)
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context.
|
||||
// ErrorWithPosition returns the error with detailed location context.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
// See the documentation on [ParseError].
|
||||
func (pe ParseError) ErrorWithPosition() string {
|
||||
if pe.input == "" { // Should never happen, but just in case.
|
||||
return pe.Error()
|
||||
|
@ -111,26 +114,39 @@ func (pe ParseError) ErrorWithPosition() string {
|
|||
msg, pe.Position.Line, col, col+pe.Position.Len)
|
||||
}
|
||||
if pe.Position.Line > 2 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3])
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3]))
|
||||
}
|
||||
if pe.Position.Line > 1 {
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2])
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2]))
|
||||
}
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1])
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len))
|
||||
|
||||
/// Expand tabs, so that the ^^^s are at the correct position, but leave
|
||||
/// "column 10-13" intact. Adjusting this to the visual column would be
|
||||
/// better, but we don't know the tabsize of the user in their editor, which
|
||||
/// can be 8, 4, 2, or something else. We can't know. So leaving it as the
|
||||
/// character index is probably the "most correct".
|
||||
expanded := expandTab(lines[pe.Position.Line-1])
|
||||
diff := len(expanded) - len(lines[pe.Position.Line-1])
|
||||
|
||||
fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded)
|
||||
fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len))
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// ErrorWithUsage() returns the error with detailed location context and usage
|
||||
// ErrorWithUsage returns the error with detailed location context and usage
|
||||
// guidance.
|
||||
//
|
||||
// See the documentation on ParseError.
|
||||
// See the documentation on [ParseError].
|
||||
func (pe ParseError) ErrorWithUsage() string {
|
||||
m := pe.ErrorWithPosition()
|
||||
if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" {
|
||||
return m + "Error help:\n\n " +
|
||||
strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") +
|
||||
"\n"
|
||||
lines := strings.Split(strings.TrimSpace(u.Usage()), "\n")
|
||||
for i := range lines {
|
||||
if lines[i] != "" {
|
||||
lines[i] = " " + lines[i]
|
||||
}
|
||||
}
|
||||
return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n"
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
@ -152,14 +168,49 @@ func (pe ParseError) column(lines []string) int {
|
|||
return col
|
||||
}
|
||||
|
||||
func expandTab(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
l int
|
||||
fill = func(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = ' '
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
)
|
||||
b.Grow(len(s))
|
||||
for _, r := range s {
|
||||
switch r {
|
||||
case '\t':
|
||||
tw := 8 - l%8
|
||||
b.WriteString(fill(tw))
|
||||
l += tw
|
||||
default:
|
||||
b.WriteRune(r)
|
||||
l += 1
|
||||
}
|
||||
}
|
||||
return b.String()
|
||||
}
|
||||
|
||||
type (
|
||||
errLexControl struct{ r rune }
|
||||
errLexEscape struct{ r rune }
|
||||
errLexUTF8 struct{ b byte }
|
||||
errLexInvalidNum struct{ v string }
|
||||
errLexInvalidDate struct{ v string }
|
||||
errParseDate struct{ v string }
|
||||
errLexInlineTableNL struct{}
|
||||
errLexStringNL struct{}
|
||||
errParseRange struct {
|
||||
i any // int or float
|
||||
size string // "int64", "uint16", etc.
|
||||
}
|
||||
errUnsafeFloat struct {
|
||||
i interface{} // float32 or float64
|
||||
size string // "float32" or "float64"
|
||||
}
|
||||
errParseDuration struct{ d string }
|
||||
)
|
||||
|
||||
func (e errLexControl) Error() string {
|
||||
|
@ -171,14 +222,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape
|
|||
func (e errLexEscape) Usage() string { return usageEscape }
|
||||
func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) }
|
||||
func (e errLexUTF8) Usage() string { return "" }
|
||||
func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) }
|
||||
func (e errLexInvalidNum) Usage() string { return "" }
|
||||
func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) }
|
||||
func (e errLexInvalidDate) Usage() string { return "" }
|
||||
func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) }
|
||||
func (e errParseDate) Usage() string { return usageDate }
|
||||
func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" }
|
||||
func (e errLexInlineTableNL) Usage() string { return usageInlineNewline }
|
||||
func (e errLexStringNL) Error() string { return "strings cannot contain newlines" }
|
||||
func (e errLexStringNL) Usage() string { return usageStringNewline }
|
||||
func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) }
|
||||
func (e errParseRange) Usage() string { return usageIntOverflow }
|
||||
func (e errUnsafeFloat) Error() string {
|
||||
return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size)
|
||||
}
|
||||
func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat }
|
||||
func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) }
|
||||
func (e errParseDuration) Usage() string { return usageDuration }
|
||||
|
||||
const usageEscape = `
|
||||
A '\' inside a "-delimited string is interpreted as an escape character.
|
||||
|
@ -227,3 +284,73 @@ Instead use """ or ''' to split strings over multiple lines:
|
|||
string = """Hello,
|
||||
world!"""
|
||||
`
|
||||
|
||||
const usageIntOverflow = `
|
||||
This number is too large; this may be an error in the TOML, but it can also be a
|
||||
bug in the program that uses too small of an integer.
|
||||
|
||||
The maximum and minimum values are:
|
||||
|
||||
size │ lowest │ highest
|
||||
───────┼────────────────┼──────────────
|
||||
int8 │ -128 │ 127
|
||||
int16 │ -32,768 │ 32,767
|
||||
int32 │ -2,147,483,648 │ 2,147,483,647
|
||||
int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷
|
||||
uint8 │ 0 │ 255
|
||||
uint16 │ 0 │ 65,535
|
||||
uint32 │ 0 │ 4,294,967,295
|
||||
uint64 │ 0 │ 1.8 × 10¹⁸
|
||||
|
||||
int refers to int32 on 32-bit systems and int64 on 64-bit systems.
|
||||
`
|
||||
|
||||
const usageUnsafeFloat = `
|
||||
This number is outside of the "safe" range for floating point numbers; whole
|
||||
(non-fractional) numbers outside the below range can not always be represented
|
||||
accurately in a float, leading to some loss of accuracy.
|
||||
|
||||
Explicitly mark a number as a fractional unit by adding ".0", which will incur
|
||||
some loss of accuracy; for example:
|
||||
|
||||
f = 2_000_000_000.0
|
||||
|
||||
Accuracy ranges:
|
||||
|
||||
float32 = 16,777,215
|
||||
float64 = 9,007,199,254,740,991
|
||||
`
|
||||
|
||||
const usageDuration = `
|
||||
A duration must be as "number<unit>", without any spaces. Valid units are:
|
||||
|
||||
ns nanoseconds (billionth of a second)
|
||||
us, µs microseconds (millionth of a second)
|
||||
ms milliseconds (thousands of a second)
|
||||
s seconds
|
||||
m minutes
|
||||
h hours
|
||||
|
||||
You can combine multiple units; for example "5m10s" for 5 minutes and 10
|
||||
seconds.
|
||||
`
|
||||
|
||||
const usageDate = `
|
||||
A TOML datetime must be in one of the following formats:
|
||||
|
||||
2006-01-02T15:04:05Z07:00 Date and time, with timezone.
|
||||
2006-01-02T15:04:05 Date and time, but without timezone.
|
||||
2006-01-02 Date without a time or timezone.
|
||||
15:04:05 Just a time, without any timezone.
|
||||
|
||||
Seconds may optionally have a fraction, up to nanosecond precision:
|
||||
|
||||
15:04:05.123
|
||||
15:04:05.856018510
|
||||
`
|
||||
|
||||
// TOML 1.1:
|
||||
// The seconds part in times is optional, and may be omitted:
|
||||
// 2006-01-02T15:04Z07:00
|
||||
// 2006-01-02T15:04
|
||||
// 15:04
|
||||
|
|
|
@ -17,6 +17,7 @@ const (
|
|||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemStringEsc
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
|
@ -46,12 +47,14 @@ func (p Position) String() string {
|
|||
}
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
tomlNext bool
|
||||
esc bool
|
||||
|
||||
// Allow for backing up up to 4 runes. This is necessary because TOML
|
||||
// contains 3-rune tokens (""" and ''').
|
||||
|
@ -82,18 +85,19 @@ func (lx *lexer) nextItem() item {
|
|||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
//fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
func lex(input string, tomlNext bool) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
state: lexTop,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
line: 1,
|
||||
input: input,
|
||||
state: lexTop,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
line: 1,
|
||||
tomlNext: tomlNext,
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
@ -128,6 +132,11 @@ func (lx lexer) getPos() Position {
|
|||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
// Needed for multiline strings ending with an incomplete UTF-8 sequence.
|
||||
if lx.start > lx.pos {
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return
|
||||
}
|
||||
lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
@ -157,7 +166,7 @@ func (lx *lexer) next() (r rune) {
|
|||
}
|
||||
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
if r == utf8.RuneError {
|
||||
if r == utf8.RuneError && w == 1 {
|
||||
lx.error(errLexUTF8{lx.input[lx.pos]})
|
||||
return utf8.RuneError
|
||||
}
|
||||
|
@ -263,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn {
|
|||
}
|
||||
|
||||
// errorf is like error, and creates a new error.
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
func (lx *lexer) errorf(format string, values ...any) stateFn {
|
||||
if lx.atEOF {
|
||||
pos := lx.getPos()
|
||||
pos.Line--
|
||||
|
@ -326,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn {
|
|||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
|
||||
r)
|
||||
return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
|
@ -403,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn {
|
|||
// Lexes only one part, e.g. only 'a' inside 'a.b'.
|
||||
func lexBareName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
if isBareKeyChar(r, lx.tomlNext) {
|
||||
return lexBareName
|
||||
}
|
||||
lx.backup()
|
||||
|
@ -613,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn {
|
|||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
if lx.tomlNext {
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
}
|
||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||
case r == '#':
|
||||
lx.push(lexInlineTableValue)
|
||||
|
@ -635,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
|||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
if lx.tomlNext {
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
}
|
||||
return lx.errorPrevLine(errLexInlineTableNL{})
|
||||
case r == '#':
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
|
@ -643,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
|
|||
lx.ignore()
|
||||
lx.skip(isWhitespace)
|
||||
if lx.peek() == '}' {
|
||||
if lx.tomlNext {
|
||||
return lexInlineTableValueEnd
|
||||
}
|
||||
return lx.errorf("trailing comma not allowed in inline tables")
|
||||
}
|
||||
return lexInlineTableValue
|
||||
|
@ -682,7 +698,12 @@ func lexString(lx *lexer) stateFn {
|
|||
return lexStringEscape
|
||||
case r == '"':
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
if lx.esc {
|
||||
lx.esc = false
|
||||
lx.emit(itemStringEsc)
|
||||
} else {
|
||||
lx.emit(itemString)
|
||||
}
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
|
@ -711,7 +732,17 @@ func lexMultilineString(lx *lexer) stateFn {
|
|||
if lx.peek() == '"' {
|
||||
/// Check if we already lexed 5 's; if so we have 6 now, and
|
||||
/// that's just too many man!
|
||||
if strings.HasSuffix(lx.current(), `"""""`) {
|
||||
///
|
||||
/// Second check is for the edge case:
|
||||
///
|
||||
/// two quotes allowed.
|
||||
/// vv
|
||||
/// """lol \""""""
|
||||
/// ^^ ^^^---- closing three
|
||||
/// escaped
|
||||
///
|
||||
/// But ugly, but it works
|
||||
if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) {
|
||||
return lx.errorf(`unexpected '""""""'`)
|
||||
}
|
||||
lx.backup()
|
||||
|
@ -722,6 +753,7 @@ func lexMultilineString(lx *lexer) stateFn {
|
|||
lx.backup() /// backup: don't include the """ in the item.
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.esc = false
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next() /// Read over ''' again and discard it.
|
||||
lx.next()
|
||||
|
@ -755,8 +787,8 @@ func lexRawString(lx *lexer) stateFn {
|
|||
}
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a
|
||||
// string. It assumes that the beginning triple-' has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
|
@ -802,8 +834,7 @@ func lexMultilineRawString(lx *lexer) stateFn {
|
|||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
if isNL(lx.next()) { /// \ escaping newline.
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
|
@ -812,8 +843,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
|
|||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
lx.esc = true
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'e':
|
||||
if !lx.tomlNext {
|
||||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
fallthrough
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
|
@ -832,6 +869,11 @@ func lexStringEscape(lx *lexer) stateFn {
|
|||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'x':
|
||||
if !lx.tomlNext {
|
||||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
return lexHexEscape
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
|
@ -840,14 +882,23 @@ func lexStringEscape(lx *lexer) stateFn {
|
|||
return lx.error(errLexEscape{r})
|
||||
}
|
||||
|
||||
func lexHexEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 2; i++ {
|
||||
r = lx.next()
|
||||
if !isHex(r) {
|
||||
return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(
|
||||
`expected four hexadecimal digits after '\u', but got %q instead`,
|
||||
lx.current())
|
||||
if !isHex(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
|
@ -857,10 +908,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
|
|||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(
|
||||
`expected eight hexadecimal digits after '\U', but got %q instead`,
|
||||
lx.current())
|
||||
if !isHex(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
|
@ -927,7 +976,7 @@ func lexDatetime(lx *lexer) stateFn {
|
|||
// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
|
||||
func lexHexInteger(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isHexadecimal(r) {
|
||||
if isHex(r) {
|
||||
return lexHexInteger
|
||||
}
|
||||
switch r {
|
||||
|
@ -1061,7 +1110,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn {
|
|||
return lexOctalInteger
|
||||
case 'x':
|
||||
r = lx.peek()
|
||||
if !isHexadecimal(r) {
|
||||
if !isHex(r) {
|
||||
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
|
||||
}
|
||||
return lexHexInteger
|
||||
|
@ -1159,7 +1208,7 @@ func (itype itemType) String() string {
|
|||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
|
@ -1192,7 +1241,7 @@ func (itype itemType) String() string {
|
|||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
return fmt.Sprintf("(%s, %s)", item.typ, item.val)
|
||||
}
|
||||
|
||||
func isWhitespace(r rune) bool { return r == '\t' || r == ' ' }
|
||||
|
@ -1208,10 +1257,23 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n
|
|||
func isDigit(r rune) bool { return r >= '0' && r <= '9' }
|
||||
func isBinary(r rune) bool { return r == '0' || r == '1' }
|
||||
func isOctal(r rune) bool { return r >= '0' && r <= '7' }
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F')
|
||||
}
|
||||
func isBareKeyChar(r rune) bool {
|
||||
func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') }
|
||||
func isBareKeyChar(r rune, tomlNext bool) bool {
|
||||
if tomlNext {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' || r == '-' ||
|
||||
r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) ||
|
||||
(r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) ||
|
||||
(r >= 0x037f && r <= 0x1fff) ||
|
||||
(r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) ||
|
||||
(r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) ||
|
||||
(r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) ||
|
||||
(r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) ||
|
||||
(r >= 0x10000 && r <= 0xeffff)
|
||||
}
|
||||
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
|
|
|
@ -12,10 +12,11 @@ import (
|
|||
type MetaData struct {
|
||||
context Key // Used only during decoding.
|
||||
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keyInfo map[string]keyInfo
|
||||
mapping map[string]any
|
||||
keys []Key
|
||||
decoded map[string]struct{}
|
||||
data []byte // Input file; for errors.
|
||||
}
|
||||
|
||||
// IsDefined reports if the key exists in the TOML data.
|
||||
|
@ -30,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
|||
}
|
||||
|
||||
var (
|
||||
hash map[string]interface{}
|
||||
hash map[string]any
|
||||
ok bool
|
||||
hashOrVal interface{} = md.mapping
|
||||
hashOrVal any = md.mapping
|
||||
)
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
if hash, ok = hashOrVal.(map[string]any); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
|
@ -50,8 +51,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
|
|||
// Type will return the empty string if given an empty key or a key that does
|
||||
// not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
if typ, ok := md.types[Key(key).String()]; ok {
|
||||
return typ.typeString()
|
||||
if ki, ok := md.keyInfo[Key(key).String()]; ok {
|
||||
return ki.tomlType.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
@ -70,7 +71,7 @@ func (md *MetaData) Keys() []Key {
|
|||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// This includes keys that haven't been decoded because of a [Primitive] value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
|
@ -88,33 +89,60 @@ func (md *MetaData) Undecoded() []Key {
|
|||
return undecoded
|
||||
}
|
||||
|
||||
// Key represents any TOML key, including key groups. Use (MetaData).Keys to get
|
||||
// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get
|
||||
// values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
ss := make([]string, len(k))
|
||||
for i := range k {
|
||||
ss[i] = k.maybeQuoted(i)
|
||||
// This is called quite often, so it's a bit funky to make it faster.
|
||||
var b strings.Builder
|
||||
b.Grow(len(k) * 25)
|
||||
outer:
|
||||
for i, kk := range k {
|
||||
if i > 0 {
|
||||
b.WriteByte('.')
|
||||
}
|
||||
if kk == "" {
|
||||
b.WriteString(`""`)
|
||||
} else {
|
||||
for _, r := range kk {
|
||||
// "Inline" isBareKeyChar
|
||||
if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') {
|
||||
b.WriteByte('"')
|
||||
b.WriteString(dblQuotedReplacer.Replace(kk))
|
||||
b.WriteByte('"')
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
b.WriteString(kk)
|
||||
}
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
if k[i] == "" {
|
||||
return `""`
|
||||
}
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
for _, r := range k[i] {
|
||||
if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' {
|
||||
continue
|
||||
}
|
||||
return `"` + dblQuotedReplacer.Replace(k[i]) + `"`
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
// Like append(), but only increase the cap by 1.
|
||||
func (k Key) add(piece string) Key {
|
||||
if cap(k) > len(k) {
|
||||
return append(k, piece)
|
||||
}
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece.
|
||||
func (k Key) last() string { return k[len(k)-1] } // last piece of this key.
|
||||
|
|
|
@ -2,6 +2,8 @@ package toml
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -15,14 +17,23 @@ type parser struct {
|
|||
context Key // Full key for the current hash in scope.
|
||||
currentKey string // Base key name for everything except hashes.
|
||||
pos Position // Current position in the TOML file.
|
||||
tomlNext bool
|
||||
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
mapping map[string]interface{} // Map keyname → key value.
|
||||
types map[string]tomlType // Map keyname → TOML type.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
ordered []Key // List of keys in the order that they appear in the TOML data.
|
||||
|
||||
keyInfo map[string]keyInfo // Map keyname → info about the TOML key.
|
||||
mapping map[string]any // Map keyname → key value.
|
||||
implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names").
|
||||
}
|
||||
|
||||
type keyInfo struct {
|
||||
pos Position
|
||||
tomlType tomlType
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
_, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110")
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if pErr, ok := r.(ParseError); ok {
|
||||
|
@ -35,9 +46,13 @@ func parse(data string) (p *parser, err error) {
|
|||
}()
|
||||
|
||||
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
|
||||
// which mangles stuff.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
|
||||
// which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add
|
||||
// it anyway.
|
||||
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16
|
||||
data = data[2:]
|
||||
//lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447
|
||||
} else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8
|
||||
data = data[3:]
|
||||
}
|
||||
|
||||
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
|
||||
|
@ -57,11 +72,12 @@ func parse(data string) (p *parser, err error) {
|
|||
}
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
keyInfo: make(map[string]keyInfo),
|
||||
mapping: make(map[string]any),
|
||||
lx: lex(data, tomlNext),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]struct{}),
|
||||
tomlNext: tomlNext,
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
|
@ -74,7 +90,16 @@ func parse(data string) (p *parser, err error) {
|
|||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
||||
func (p *parser) panicErr(it item, err error) {
|
||||
panic(ParseError{
|
||||
err: err,
|
||||
Position: it.pos,
|
||||
Line: it.pos.Len,
|
||||
LastKey: p.current(),
|
||||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicItemf(it item, format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: it.pos,
|
||||
|
@ -83,7 +108,7 @@ func (p *parser) panicItemf(it item, format string, v ...interface{}) {
|
|||
})
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
func (p *parser) panicf(format string, v ...any) {
|
||||
panic(ParseError{
|
||||
Message: fmt.Sprintf(format, v...),
|
||||
Position: p.pos,
|
||||
|
@ -94,7 +119,7 @@ func (p *parser) panicf(format string, v ...interface{}) {
|
|||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
|
||||
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val)
|
||||
if it.typ == itemError {
|
||||
if it.err != nil {
|
||||
panic(ParseError{
|
||||
|
@ -116,7 +141,7 @@ func (p *parser) nextPos() item {
|
|||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
func (p *parser) bug(format string, v ...any) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
}
|
||||
|
||||
|
@ -146,7 +171,7 @@ func (p *parser) topLevel(item item) {
|
|||
p.assertEqual(itemTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.setType("", tomlHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart: // [[ .. ]]
|
||||
name := p.nextPos()
|
||||
|
@ -158,7 +183,7 @@ func (p *parser) topLevel(item item) {
|
|||
p.assertEqual(itemArrayTableEnd, name.typ)
|
||||
|
||||
p.addContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.setType("", tomlArrayHash, item.pos)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart: // key = ..
|
||||
outerContext := p.context
|
||||
|
@ -171,19 +196,21 @@ func (p *parser) topLevel(item item) {
|
|||
p.assertEqual(itemKeyEnd, k.typ)
|
||||
|
||||
/// The current key is the last part.
|
||||
p.currentKey = key[len(key)-1]
|
||||
p.currentKey = key.last()
|
||||
|
||||
/// All the other parts (if any) are the context; need to set each part
|
||||
/// as implicit.
|
||||
context := key[:len(key)-1]
|
||||
context := key.parent()
|
||||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Set value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
vItem := p.next()
|
||||
val, typ := p.value(vItem, false)
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ, vItem.pos)
|
||||
|
||||
/// Remove the context we added (preserving any context from [tbl] lines).
|
||||
p.context = outerContext
|
||||
|
@ -198,7 +225,7 @@ func (p *parser) keyString(it item) string {
|
|||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
case itemString, itemStringEsc, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it, false)
|
||||
return s.(string)
|
||||
|
@ -215,12 +242,14 @@ var datetimeRepl = strings.NewReplacer(
|
|||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
func (p *parser) value(it item, parentIsArray bool) (any, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemStringEsc:
|
||||
return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
|
||||
return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
|
@ -250,7 +279,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
|
|||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
||||
func (p *parser) valueInteger(it item) (any, tomlType) {
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val)
|
||||
}
|
||||
|
@ -266,7 +295,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
|||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "int64"})
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
|
@ -274,7 +303,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) {
|
|||
return num, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
||||
func (p *parser) valueFloat(it item) (any, tomlType) {
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
|
@ -298,31 +327,42 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) {
|
|||
p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
|
||||
signbit := false
|
||||
if val == "+nan" || val == "-nan" {
|
||||
signbit = val == "-nan"
|
||||
val = "nan"
|
||||
}
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
if err != nil {
|
||||
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
|
||||
p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
|
||||
p.panicErr(it, errParseRange{i: it.val, size: "float64"})
|
||||
} else {
|
||||
p.panicItemf(it, "Invalid float value: %q", it.val)
|
||||
}
|
||||
}
|
||||
if signbit {
|
||||
num = math.Copysign(num, -1)
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
var dtTypes = []struct {
|
||||
fmt string
|
||||
zone *time.Location
|
||||
next bool
|
||||
}{
|
||||
{time.RFC3339Nano, time.Local},
|
||||
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
|
||||
{"2006-01-02", internal.LocalDate},
|
||||
{"15:04:05.999999999", internal.LocalTime},
|
||||
{time.RFC3339Nano, time.Local, false},
|
||||
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false},
|
||||
{"2006-01-02", internal.LocalDate, false},
|
||||
{"15:04:05.999999999", internal.LocalTime, false},
|
||||
|
||||
// tomlNext
|
||||
{"2006-01-02T15:04Z07:00", time.Local, true},
|
||||
{"2006-01-02T15:04", internal.LocalDatetime, true},
|
||||
{"15:04", internal.LocalTime, true},
|
||||
}
|
||||
|
||||
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
||||
func (p *parser) valueDatetime(it item) (any, tomlType) {
|
||||
it.val = datetimeRepl.Replace(it.val)
|
||||
var (
|
||||
t time.Time
|
||||
|
@ -330,29 +370,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
|
|||
err error
|
||||
)
|
||||
for _, dt := range dtTypes {
|
||||
if dt.next && !p.tomlNext {
|
||||
continue
|
||||
}
|
||||
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
|
||||
if err == nil {
|
||||
if missingLeadingZero(it.val, dt.fmt) {
|
||||
p.panicErr(it, errParseDate{it.val})
|
||||
}
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val)
|
||||
p.panicErr(it, errParseDate{it.val})
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray)
|
||||
// Go's time.Parse() will accept numbers without a leading zero; there isn't any
|
||||
// way to require it. https://github.com/golang/go/issues/29911
|
||||
//
|
||||
// Depend on the fact that the separators (- and :) should always be at the same
|
||||
// location.
|
||||
func missingLeadingZero(d, l string) bool {
|
||||
for i, c := range []byte(l) {
|
||||
if c == '.' || c == 'Z' {
|
||||
return false
|
||||
}
|
||||
if (c < '0' || c > '9') && d[i] != c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *parser) valueArray(it item) (any, tomlType) {
|
||||
p.setType(p.currentKey, tomlArray, it.pos)
|
||||
|
||||
// p.setType(p.currentKey, typ)
|
||||
var (
|
||||
types []tomlType
|
||||
|
||||
// Initialize to a non-nil empty slice. This makes it consistent with
|
||||
// how S = [] decodes into a non-nil slice inside something like struct
|
||||
// { S []string }. See #338
|
||||
array = []interface{}{}
|
||||
// Initialize to a non-nil slice to make it consistent with how S = []
|
||||
// decodes into a non-nil slice inside something like struct { S
|
||||
// []string }. See #338
|
||||
array = make([]any, 0, 2)
|
||||
)
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
|
@ -362,20 +422,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) {
|
|||
|
||||
val, typ := p.value(it, true)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
|
||||
// XXX: types isn't used here, we need it to record the accurate type
|
||||
// XXX: type isn't used here, we need it to record the accurate type
|
||||
// information.
|
||||
//
|
||||
// Not entirely sure how to best store this; could use "key[0]",
|
||||
// "key[1]" notation, or maybe store it on the Array type?
|
||||
_ = typ
|
||||
}
|
||||
return array, tomlArray
|
||||
}
|
||||
|
||||
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
|
||||
func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) {
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
topHash = make(map[string]any)
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
@ -403,19 +463,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
|||
p.assertEqual(itemKeyEnd, k.typ)
|
||||
|
||||
/// The current key is the last part.
|
||||
p.currentKey = key[len(key)-1]
|
||||
p.currentKey = key.last()
|
||||
|
||||
/// All the other parts (if any) are the context; need to set each part
|
||||
/// as implicit.
|
||||
context := key[:len(key)-1]
|
||||
context := key.parent()
|
||||
for i := range context {
|
||||
p.addImplicitContext(append(p.context, context[i:i+1]...))
|
||||
}
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
|
||||
/// Set the value.
|
||||
val, typ := p.value(p.next(), false)
|
||||
p.set(p.currentKey, val, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ, it.pos)
|
||||
|
||||
hash := topHash
|
||||
for _, c := range context {
|
||||
h, ok := hash[c]
|
||||
if !ok {
|
||||
h = make(map[string]any)
|
||||
hash[c] = h
|
||||
}
|
||||
hash, ok = h.(map[string]any)
|
||||
if !ok {
|
||||
p.panicf("%q is not a table", p.context)
|
||||
}
|
||||
}
|
||||
hash[p.currentKey] = val
|
||||
|
||||
/// Restore context.
|
||||
|
@ -423,7 +497,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom
|
|||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
return topHash, tomlHash
|
||||
}
|
||||
|
||||
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
|
||||
|
@ -453,9 +527,9 @@ func numUnderscoresOK(s string) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// isHexadecimal is a superset of all the permissable characters
|
||||
// surrounding an underscore.
|
||||
accept = isHexadecimal(r)
|
||||
// isHexis a superset of all the permissable characters surrounding an
|
||||
// underscore.
|
||||
accept = isHex(r)
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
@ -478,21 +552,19 @@ func numPeriodsOK(s string) bool {
|
|||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) addContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
/// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
keyContext := make(Key, 0, len(key)-1)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
/// We only need implicit hashes for the parents.
|
||||
for _, k := range key.parent() {
|
||||
_, ok := hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
hashContext[k] = make(map[string]any)
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
|
@ -501,9 +573,9 @@ func (p *parser) addContext(key Key, array bool) {
|
|||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
case []map[string]any:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
|
@ -514,39 +586,33 @@ func (p *parser) addContext(key Key, array bool) {
|
|||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
k := key.last()
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 4)
|
||||
hashContext[k] = make([]map[string]any, 0, 4)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
if hash, ok := hashContext[k].([]map[string]any); ok {
|
||||
hashContext[k] = append(hash, make(map[string]any))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as an array.", key)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
p.setValue(key.last(), make(map[string]any))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// set calls setValue and setType.
|
||||
func (p *parser) set(key string, val interface{}, typ tomlType) {
|
||||
p.setValue(key, val)
|
||||
p.setType(key, typ)
|
||||
p.context = append(p.context, key.last())
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
func (p *parser) setValue(key string, value any) {
|
||||
var (
|
||||
tmpHash interface{}
|
||||
tmpHash any
|
||||
ok bool
|
||||
hash = p.mapping
|
||||
keyContext Key
|
||||
keyContext = make(Key, 0, len(p.context)+1)
|
||||
)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
|
@ -554,11 +620,11 @@ func (p *parser) setValue(key string, value interface{}) {
|
|||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
case []map[string]any:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
case map[string]any:
|
||||
hash = t
|
||||
default:
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
|
@ -585,9 +651,8 @@ func (p *parser) setValue(key string, value interface{}) {
|
|||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
// Otherwise, we have a concrete key trying to override a previous key,
|
||||
// which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
|
||||
|
@ -599,7 +664,7 @@ func (p *parser) setValue(key string, value interface{}) {
|
|||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
func (p *parser) setType(key string, typ tomlType, pos Position) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
keyContext = append(keyContext, p.context...)
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
|
@ -611,19 +676,16 @@ func (p *parser) setType(key string, typ tomlType) {
|
|||
if len(keyContext) == 0 {
|
||||
keyContext = Key{""}
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos}
|
||||
}
|
||||
|
||||
// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
|
||||
// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) {
|
||||
p.addImplicit(key)
|
||||
p.addContext(key, false)
|
||||
}
|
||||
func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} }
|
||||
func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) }
|
||||
func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok }
|
||||
func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray }
|
||||
func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) }
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
|
@ -646,112 +708,131 @@ func stripFirstNewline(s string) string {
|
|||
return s
|
||||
}
|
||||
|
||||
// Remove newlines inside triple-quoted strings if a line ends with "\".
|
||||
func stripEscapedNewlines(s string) string {
|
||||
split := strings.Split(s, "\n")
|
||||
if len(split) < 1 {
|
||||
return s
|
||||
}
|
||||
// stripEscapedNewlines removes whitespace after line-ending backslashes in
|
||||
// multiline strings.
|
||||
//
|
||||
// A line-ending backslash is an unescaped \ followed only by whitespace until
|
||||
// the next newline. After a line-ending backslash, all whitespace is removed
|
||||
// until the next non-whitespace character.
|
||||
func (p *parser) stripEscapedNewlines(s string) string {
|
||||
var (
|
||||
b strings.Builder
|
||||
i int
|
||||
)
|
||||
b.Grow(len(s))
|
||||
for {
|
||||
ix := strings.Index(s[i:], `\`)
|
||||
if ix < 0 {
|
||||
b.WriteString(s)
|
||||
return b.String()
|
||||
}
|
||||
i += ix
|
||||
|
||||
escNL := false // Keep track of the last non-blank line was escaped.
|
||||
for i, line := range split {
|
||||
line = strings.TrimRight(line, " \t\r")
|
||||
|
||||
if len(line) == 0 || line[len(line)-1] != '\\' {
|
||||
split[i] = strings.TrimRight(split[i], "\r")
|
||||
if !escNL && i != len(split)-1 {
|
||||
split[i] += "\n"
|
||||
if len(s) > i+1 && s[i+1] == '\\' {
|
||||
// Escaped backslash.
|
||||
i += 2
|
||||
continue
|
||||
}
|
||||
// Scan until the next non-whitespace.
|
||||
j := i + 1
|
||||
whitespaceLoop:
|
||||
for ; j < len(s); j++ {
|
||||
switch s[j] {
|
||||
case ' ', '\t', '\r', '\n':
|
||||
default:
|
||||
break whitespaceLoop
|
||||
}
|
||||
}
|
||||
if j == i+1 {
|
||||
// Not a whitespace escape.
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
escBS := true
|
||||
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
|
||||
escBS = !escBS
|
||||
}
|
||||
if escNL {
|
||||
line = strings.TrimLeft(line, " \t\r")
|
||||
}
|
||||
escNL = !escBS
|
||||
|
||||
if escBS {
|
||||
split[i] += "\n"
|
||||
if !strings.Contains(s[i:j], "\n") {
|
||||
// This is not a line-ending backslash. (It's a bad escape sequence,
|
||||
// but we can let replaceEscapes catch it.)
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
split[i] = line[:len(line)-1] // Remove \
|
||||
if len(split)-1 > i {
|
||||
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
|
||||
}
|
||||
b.WriteString(s[:i])
|
||||
s = s[j:]
|
||||
i = 0
|
||||
}
|
||||
return strings.Join(split, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(it item, str string) string {
|
||||
replaced := make([]rune, 0, len(str))
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
var (
|
||||
b strings.Builder
|
||||
skip = 0
|
||||
)
|
||||
b.Grow(len(str))
|
||||
for i, c := range str {
|
||||
if skip > 0 {
|
||||
skip--
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
if c != '\\' {
|
||||
b.WriteRune(c)
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= len(str) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
switch str[i+1] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
p.bug("Expected valid escape code after \\, but got %q.", str[i+1])
|
||||
case ' ', '\t':
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", s[r])
|
||||
return ""
|
||||
p.panicItemf(it, "invalid escape: '\\%c'", str[i+1])
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
b.WriteByte(0x08)
|
||||
skip = 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
b.WriteByte(0x09)
|
||||
skip = 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
b.WriteByte(0x0a)
|
||||
skip = 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
b.WriteByte(0x0c)
|
||||
skip = 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
b.WriteByte(0x0d)
|
||||
skip = 1
|
||||
case 'e':
|
||||
if p.tomlNext {
|
||||
b.WriteByte(0x1b)
|
||||
skip = 1
|
||||
}
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
b.WriteByte(0x22)
|
||||
skip = 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
b.WriteByte(0x5c)
|
||||
skip = 1
|
||||
// The lexer guarantees the correct number of characters are present;
|
||||
// don't need to check here.
|
||||
case 'x':
|
||||
if p.tomlNext {
|
||||
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4])
|
||||
b.WriteRune(escaped)
|
||||
skip = 3
|
||||
}
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6])
|
||||
b.WriteRune(escaped)
|
||||
skip = 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10])
|
||||
b.WriteRune(escaped)
|
||||
skip = 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune {
|
||||
s := string(bs)
|
||||
func (p *parser) asciiEscapeToUnicode(it item, s string) rune {
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err)
|
||||
|
|
|
@ -25,10 +25,8 @@ type field struct {
|
|||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
|
@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool {
|
|||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
|
|
|
@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool {
|
|||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
func (btype tomlBaseType) typeString() string { return string(btype) }
|
||||
func (btype tomlBaseType) String() string { return btype.typeString() }
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
|
@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
|||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
case itemString, itemStringEsc:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
|
|
|
@ -6,25 +6,20 @@ linters:
|
|||
disable-all: true
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- goconst
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- maligned
|
||||
- misspell
|
||||
- prealloc
|
||||
- scopelint
|
||||
- exportloopref
|
||||
- revive
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
|
|
|
@ -9,18 +9,39 @@ before:
|
|||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
- >-
|
||||
{{- if eq .Os "darwin" }}
|
||||
{{- if eq .Arch "amd64"}}CC=o64-clang{{- end }}
|
||||
{{- if eq .Arch "arm64"}}CC=aarch64-apple-darwin20.2-clang{{- end }}
|
||||
{{- end }}
|
||||
{{- if eq .Os "windows" }}
|
||||
{{- if eq .Arch "amd64" }}CC=x86_64-w64-mingw32-gcc{{- end }}
|
||||
{{- end }}
|
||||
main: ./cmd/escargs
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
archives:
|
||||
- replacements:
|
||||
darwin: Darwin
|
||||
linux: Linux
|
||||
windows: Windows
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
- freebsd
|
||||
goarch:
|
||||
- amd64
|
||||
- arm64
|
||||
- arm
|
||||
goarm:
|
||||
- 6
|
||||
- 7
|
||||
goamd64:
|
||||
- v2
|
||||
- v3
|
||||
ignore:
|
||||
- goos: darwin
|
||||
goarch: 386
|
||||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: 7
|
||||
- goarm: mips64
|
||||
- gomips: hardfloat
|
||||
- goamd64: v4
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
snapshot:
|
||||
|
|
|
@ -25,9 +25,9 @@ and [much faster][v2-bench]. If you only need reading and writing TOML documents
|
|||
(majority of cases), those features are implemented and the API unlikely to
|
||||
change.
|
||||
|
||||
The remaining features (Document structure editing and tooling) will be added
|
||||
shortly. While pull-requests are welcome on v1, no active development is
|
||||
expected on it. When v2.0.0 is released, v1 will be deprecated.
|
||||
The remaining features will be added shortly. While pull-requests are welcome on
|
||||
v1, no active development is expected on it. When v2.0.0 is released, v1 will be
|
||||
deprecated.
|
||||
|
||||
👉 [go-toml v2][v2]
|
||||
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ---------- | ------------------ |
|
||||
| Latest 2.x | :white_check_mark: |
|
||||
| All 1.x | :x: |
|
||||
| All 0.x | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Email a vulnerability report to `security@pelletier.codes`. Make sure to include
|
||||
as many details as possible to reproduce the vulnerability. This is a
|
||||
side-project: I will try to get back to you as quickly as possible, time
|
||||
permitting in my personal life. Providing a working patch helps very much!
|
|
@ -1113,7 +1113,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref
|
|||
return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String())
|
||||
}
|
||||
|
||||
if val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
|
||||
if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 {
|
||||
return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String())
|
||||
}
|
||||
if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) {
|
||||
|
|
|
@ -293,42 +293,41 @@ func (p *tomlParser) parseRvalue() interface{} {
|
|||
return math.NaN()
|
||||
case tokenInteger:
|
||||
cleanedVal := cleanupNumberToken(tok.val)
|
||||
var err error
|
||||
var val int64
|
||||
base := 10
|
||||
s := cleanedVal
|
||||
checkInvalidUnderscore := numberContainsInvalidUnderscore
|
||||
if len(cleanedVal) >= 3 && cleanedVal[0] == '0' {
|
||||
switch cleanedVal[1] {
|
||||
case 'x':
|
||||
err = hexNumberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal[2:], 16, 64)
|
||||
checkInvalidUnderscore = hexNumberContainsInvalidUnderscore
|
||||
base = 16
|
||||
case 'o':
|
||||
err = numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal[2:], 8, 64)
|
||||
base = 8
|
||||
case 'b':
|
||||
err = numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal[2:], 2, 64)
|
||||
base = 2
|
||||
default:
|
||||
panic("invalid base") // the lexer should catch this first
|
||||
}
|
||||
} else {
|
||||
err = numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
val, err = strconv.ParseInt(cleanedVal, 10, 64)
|
||||
s = cleanedVal[2:]
|
||||
}
|
||||
|
||||
err := checkInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
p.raiseError(tok, "%s", err)
|
||||
}
|
||||
return val
|
||||
|
||||
var val interface{}
|
||||
val, err = strconv.ParseInt(s, base, 64)
|
||||
if err == nil {
|
||||
return val
|
||||
}
|
||||
|
||||
if s[0] != '-' {
|
||||
if val, err = strconv.ParseUint(s, base, 64); err == nil {
|
||||
return val
|
||||
}
|
||||
}
|
||||
p.raiseError(tok, "%s", err)
|
||||
case tokenFloat:
|
||||
err := numberContainsInvalidUnderscore(tok.val)
|
||||
if err != nil {
|
||||
|
|
|
@ -471,7 +471,7 @@ func LoadBytes(b []byte) (tree *Tree, err error) {
|
|||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
}
|
||||
err = errors.New(r.(string))
|
||||
err = fmt.Errorf("%s", r)
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
## explicit; go 1.16
|
||||
github.com/Azure/go-ansiterm
|
||||
github.com/Azure/go-ansiterm/winterm
|
||||
# github.com/BurntSushi/toml v1.0.0
|
||||
## explicit; go 1.16
|
||||
# github.com/BurntSushi/toml v1.4.0
|
||||
## explicit; go 1.18
|
||||
github.com/BurntSushi/toml
|
||||
github.com/BurntSushi/toml/internal
|
||||
# github.com/MakeNowJust/heredoc v1.0.0
|
||||
|
@ -15,7 +15,7 @@ github.com/NYTimes/gziphandler
|
|||
# github.com/adhocore/gronx v1.6.3
|
||||
## explicit; go 1.13
|
||||
github.com/adhocore/gronx
|
||||
# github.com/alessio/shellescape v1.4.1
|
||||
# github.com/alessio/shellescape v1.4.2
|
||||
## explicit; go 1.14
|
||||
github.com/alessio/shellescape
|
||||
# github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df
|
||||
|
@ -366,7 +366,7 @@ github.com/opensearch-project/opensearch-go/internal/version
|
|||
github.com/opensearch-project/opensearch-go/opensearchapi
|
||||
github.com/opensearch-project/opensearch-go/opensearchtransport
|
||||
github.com/opensearch-project/opensearch-go/signer
|
||||
# github.com/pelletier/go-toml v1.9.4
|
||||
# github.com/pelletier/go-toml v1.9.5
|
||||
## explicit; go 1.12
|
||||
github.com/pelletier/go-toml
|
||||
# github.com/pelletier/go-toml/v2 v2.1.0
|
||||
|
@ -1847,8 +1847,8 @@ sigs.k8s.io/custom-metrics-apiserver/pkg/registry/external_metrics
|
|||
## explicit; go 1.18
|
||||
sigs.k8s.io/json
|
||||
sigs.k8s.io/json/internal/golang/encoding/json
|
||||
# sigs.k8s.io/kind v0.22.0
|
||||
## explicit; go 1.16
|
||||
# sigs.k8s.io/kind v0.24.0
|
||||
## explicit; go 1.17
|
||||
sigs.k8s.io/kind/pkg/apis/config/defaults
|
||||
sigs.k8s.io/kind/pkg/apis/config/v1alpha4
|
||||
sigs.k8s.io/kind/pkg/cluster
|
||||
|
@ -1871,6 +1871,7 @@ sigs.k8s.io/kind/pkg/cluster/internal/logs
|
|||
sigs.k8s.io/kind/pkg/cluster/internal/providers
|
||||
sigs.k8s.io/kind/pkg/cluster/internal/providers/common
|
||||
sigs.k8s.io/kind/pkg/cluster/internal/providers/docker
|
||||
sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl
|
||||
sigs.k8s.io/kind/pkg/cluster/internal/providers/podman
|
||||
sigs.k8s.io/kind/pkg/cluster/nodes
|
||||
sigs.k8s.io/kind/pkg/cluster/nodeutils
|
||||
|
|
|
@ -18,4 +18,4 @@ limitations under the License.
|
|||
package defaults
|
||||
|
||||
// Image is the default for the Config.Image field, aka the default node image.
|
||||
const Image = "kindest/node:v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245"
|
||||
const Image = "kindest/node:v1.31.0@sha256:53df588e04085fd41ae12de0c3fe4c72f7013bba32a20e7325357a1ac94ba865"
|
||||
|
|
|
@ -186,7 +186,7 @@ type Networking struct {
|
|||
// If DisableDefaultCNI is true, kind will not install the default CNI setup.
|
||||
// Instead the user should install their own CNI after creating the cluster.
|
||||
DisableDefaultCNI bool `yaml:"disableDefaultCNI,omitempty" json:"disableDefaultCNI,omitempty"`
|
||||
// KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode
|
||||
// KubeProxyMode defines if kube-proxy should operate in iptables, ipvs or nftables mode
|
||||
// Defaults to 'iptables' mode
|
||||
KubeProxyMode ProxyMode `yaml:"kubeProxyMode,omitempty" json:"kubeProxyMode,omitempty"`
|
||||
// DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host.
|
||||
|
@ -213,6 +213,8 @@ const (
|
|||
IPTablesProxyMode ProxyMode = "iptables"
|
||||
// IPVSProxyMode sets ProxyMode to ipvs
|
||||
IPVSProxyMode ProxyMode = "ipvs"
|
||||
// NFTablesProxyMode sets ProxyMode to nftables
|
||||
NFTablesProxyMode ProxyMode = "nftables"
|
||||
)
|
||||
|
||||
// PatchJSON6902 represents an inline kustomize json 6902 patch
|
||||
|
|
34
vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go
generated
vendored
34
vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go
generated
vendored
|
@ -60,24 +60,38 @@ func (a *action) Execute(ctx *actions.ActionContext) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// skip preflight checks, as these have undesirable side effects
|
||||
// and don't tell us much. requires kubeadm 1.13+
|
||||
skipPhases := "preflight"
|
||||
if a.skipKubeProxy {
|
||||
skipPhases += ",addon/kube-proxy"
|
||||
kubeVersionStr, err := nodeutils.KubeVersion(node)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get kubernetes version from node")
|
||||
}
|
||||
kubeVersion, err := version.ParseGeneric(kubeVersionStr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse kubernetes version %q", kubeVersionStr)
|
||||
}
|
||||
|
||||
// run kubeadm
|
||||
cmd := node.Command(
|
||||
args := []string{
|
||||
// init because this is the control plane node
|
||||
"kubeadm", "init",
|
||||
"--skip-phases="+skipPhases,
|
||||
"init",
|
||||
// specify our generated config file
|
||||
"--config=/kind/kubeadm.conf",
|
||||
"--skip-token-print",
|
||||
// increase verbosity for debugging
|
||||
"--v=6",
|
||||
)
|
||||
}
|
||||
|
||||
// Newer versions set this in the config file.
|
||||
if kubeVersion.LessThan(version.MustParseSemantic("v1.23.0")) {
|
||||
// Skip preflight to avoid pulling images.
|
||||
// Kind pre-pulls images and preflight may conflict with that.
|
||||
skipPhases := "preflight"
|
||||
if a.skipKubeProxy {
|
||||
skipPhases += ",addon/kube-proxy"
|
||||
}
|
||||
args = append(args, "--skip-phases="+skipPhases)
|
||||
}
|
||||
|
||||
// run kubeadm
|
||||
cmd := node.Command("kubeadm", args...)
|
||||
lines, err := exec.CombinedOutputLines(cmd)
|
||||
ctx.Logger.V(3).Info(strings.Join(lines, "\n"))
|
||||
if err != nil {
|
||||
|
|
30
vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go
generated
vendored
30
vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go
generated
vendored
|
@ -24,6 +24,7 @@ import (
|
|||
"sigs.k8s.io/kind/pkg/cluster/nodes"
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
"sigs.k8s.io/kind/pkg/internal/version"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
|
||||
|
@ -117,18 +118,31 @@ func joinWorkers(
|
|||
|
||||
// runKubeadmJoin executes kubeadm join command
|
||||
func runKubeadmJoin(logger log.Logger, node nodes.Node) error {
|
||||
// run kubeadm join
|
||||
// TODO(bentheelder): this should be using the config file
|
||||
cmd := node.Command(
|
||||
"kubeadm", "join",
|
||||
kubeVersionStr, err := nodeutils.KubeVersion(node)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get kubernetes version from node")
|
||||
}
|
||||
kubeVersion, err := version.ParseGeneric(kubeVersionStr)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse kubernetes version %q", kubeVersionStr)
|
||||
}
|
||||
|
||||
args := []string{
|
||||
"join",
|
||||
// the join command uses the config file generated in a well known location
|
||||
"--config", "/kind/kubeadm.conf",
|
||||
// skip preflight checks, as these have undesirable side effects
|
||||
// and don't tell us much. requires kubeadm 1.13+
|
||||
"--skip-phases=preflight",
|
||||
// increase verbosity for debugging
|
||||
"--v=6",
|
||||
)
|
||||
}
|
||||
// Newer versions set this in the config file.
|
||||
if kubeVersion.LessThan(version.MustParseSemantic("v1.23.0")) {
|
||||
// Skip preflight to avoid pulling images.
|
||||
// Kind pre-pulls images and preflight may conflict with that.
|
||||
args = append(args, "--skip-phases=preflight")
|
||||
}
|
||||
|
||||
// run kubeadm join
|
||||
cmd := node.Command("kubeadm", args...)
|
||||
lines, err := exec.CombinedOutputLines(cmd)
|
||||
logger.V(3).Info(strings.Join(lines, "\n"))
|
||||
if err != nil {
|
||||
|
|
|
@ -57,7 +57,7 @@ type ConfigData struct {
|
|||
// The Token for TLS bootstrap
|
||||
Token string
|
||||
|
||||
// KubeProxyMode defines the kube-proxy mode between iptables or ipvs
|
||||
// KubeProxyMode defines the kube-proxy mode between iptables, ipvs or nftables
|
||||
KubeProxyMode string
|
||||
// The subnet used for pods
|
||||
PodSubnet string
|
||||
|
@ -79,10 +79,6 @@ type ConfigData struct {
|
|||
// RootlessProvider is true if kind is running with rootless mode
|
||||
RootlessProvider bool
|
||||
|
||||
// DisableLocalStorageCapacityIsolation is typically set true based on RootlessProvider
|
||||
// based on the Kubernetes version, if true kubelet localStorageCapacityIsolation is set false
|
||||
DisableLocalStorageCapacityIsolation bool
|
||||
|
||||
// DerivedConfigData contains fields computed from the other fields for use
|
||||
// in the config templates and should only be populated by calling Derive()
|
||||
DerivedConfigData
|
||||
|
@ -107,6 +103,10 @@ type DerivedConfigData struct {
|
|||
IPv6 bool
|
||||
// kubelet cgroup driver, based on kubernetes version
|
||||
CgroupDriver string
|
||||
// JoinSkipPhases are the skipPhases values for the JoinConfiguration.
|
||||
JoinSkipPhases []string
|
||||
// InitSkipPhases are the skipPhases values for the InitConfiguration.
|
||||
InitSkipPhases []string
|
||||
}
|
||||
|
||||
type FeatureGate struct {
|
||||
|
@ -166,6 +166,15 @@ func (c *ConfigData) Derive() {
|
|||
runtimeConfig = append(runtimeConfig, fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
c.RuntimeConfigString = strings.Join(runtimeConfig, ",")
|
||||
|
||||
// Skip preflight to avoid pulling images.
|
||||
// Kind pre-pulls images and preflight may conflict with that.
|
||||
// requires kubeadm 1.22+
|
||||
c.JoinSkipPhases = []string{"preflight"}
|
||||
c.InitSkipPhases = []string{"preflight"}
|
||||
if c.KubeProxyMode == string(config.NoneProxyMode) {
|
||||
c.InitSkipPhases = append(c.InitSkipPhases, "addon/kube-proxy")
|
||||
}
|
||||
}
|
||||
|
||||
// See docs for these APIs at:
|
||||
|
@ -285,7 +294,7 @@ evictionHard:
|
|||
{{ range $index, $gate := .SortedFeatureGates }}
|
||||
"{{ (StructuralData $gate.Name) }}": {{ $gate.Value }}
|
||||
{{end}}{{end}}
|
||||
{{if ne .KubeProxyMode "None"}}
|
||||
{{if ne .KubeProxyMode "none"}}
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
|
@ -302,6 +311,12 @@ conntrack:
|
|||
# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
|
||||
# It is a global variable that affects other namespaces
|
||||
maxPerCore: 0
|
||||
# Set sysctl value "net.netfilter.nf_conntrack_tcp_be_liberal"
|
||||
# for nftables proxy (theoretically for kernels older than 6.1)
|
||||
# xref: https://github.com/kubernetes/kubernetes/issues/117924
|
||||
{{if and (eq .KubeProxyMode "nftables") (not .RootlessProvider)}}
|
||||
tcpBeLiberal: true
|
||||
{{end}}
|
||||
{{if .RootlessProvider}}
|
||||
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
|
||||
tcpEstablishedTimeout: 0s
|
||||
|
@ -374,6 +389,12 @@ nodeRegistration:
|
|||
node-ip: "{{ .NodeAddress }}"
|
||||
provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}"
|
||||
node-labels: "{{ .NodeLabels }}"
|
||||
{{ if .InitSkipPhases -}}
|
||||
skipPhases:
|
||||
{{- range $phase := .InitSkipPhases }}
|
||||
- "{{ $phase }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
# no-op entry that exists solely so it can be patched
|
||||
apiVersion: kubeadm.k8s.io/v1beta3
|
||||
|
@ -397,6 +418,12 @@ discovery:
|
|||
apiServerEndpoint: "{{ .ControlPlaneEndpoint }}"
|
||||
token: "{{ .Token }}"
|
||||
unsafeSkipCAVerification: true
|
||||
{{ if .JoinSkipPhases -}}
|
||||
skipPhases:
|
||||
{{ range $phase := .JoinSkipPhases -}}
|
||||
- "{{ $phase }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
|
@ -422,8 +449,7 @@ evictionHard:
|
|||
{{ range $index, $gate := .SortedFeatureGates }}
|
||||
"{{ (StructuralData $gate.Name) }}": {{ $gate.Value }}
|
||||
{{end}}{{end}}
|
||||
{{if .DisableLocalStorageCapacityIsolation}}localStorageCapacityIsolation: false{{end}}
|
||||
{{if ne .KubeProxyMode "None"}}
|
||||
{{if ne .KubeProxyMode "none"}}
|
||||
---
|
||||
apiVersion: kubeproxy.config.k8s.io/v1alpha1
|
||||
kind: KubeProxyConfiguration
|
||||
|
@ -440,6 +466,12 @@ conntrack:
|
|||
# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
|
||||
# It is a global variable that affects other namespaces
|
||||
maxPerCore: 0
|
||||
# Set sysctl value "net.netfilter.nf_conntrack_tcp_be_liberal"
|
||||
# for nftables proxy (theoretically for kernels older than 6.1)
|
||||
# xref: https://github.com/kubernetes/kubernetes/issues/117924
|
||||
{{if and (eq .KubeProxyMode "nftables") (not .RootlessProvider)}}
|
||||
tcpBeLiberal: true
|
||||
{{end}}
|
||||
{{if .RootlessProvider}}
|
||||
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
|
||||
tcpEstablishedTimeout: 0s
|
||||
|
@ -468,16 +500,6 @@ func Config(data ConfigData) (config string, err error) {
|
|||
return "", errors.Errorf("version %q is not compatible with rootless provider (hint: kind v0.11.x may work with this version)", ver)
|
||||
}
|
||||
data.FeatureGates["KubeletInUserNamespace"] = true
|
||||
|
||||
// For avoiding err="failed to get rootfs info: failed to get device for dir \"/var/lib/kubelet\": could not find device with major: 0, minor: 41 in cached partitions map"
|
||||
// https://github.com/kubernetes-sigs/kind/issues/2524
|
||||
if ver.LessThan(version.MustParseSemantic("v1.25.0-alpha.3.440+0064010cddfa00")) {
|
||||
// this feature gate was removed in v1.25 and replaced by an opt-out to disable
|
||||
data.FeatureGates["LocalStorageCapacityIsolation"] = false
|
||||
} else {
|
||||
// added in v1.25 https://github.com/kubernetes/kubernetes/pull/111513
|
||||
data.DisableLocalStorageCapacityIsolation = true
|
||||
}
|
||||
}
|
||||
|
||||
// assume the latest API version, then fallback if the k8s version is too low
|
||||
|
|
|
@ -403,10 +403,7 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings .
|
|||
}
|
||||
|
||||
func createContainer(name string, args []string) error {
|
||||
if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run()
|
||||
}
|
||||
|
||||
func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error {
|
||||
|
|
2
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/OWNERS
generated
vendored
Normal file
2
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
labels:
|
||||
- area/provider/nerdctl
|
24
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/constants.go
generated
vendored
Normal file
24
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/constants.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
// clusterLabelKey is applied to each "node" container for identification
|
||||
const clusterLabelKey = "io.x-k8s.kind.cluster"
|
||||
|
||||
// nodeRoleLabelKey is applied to each "node" container for categorization
|
||||
// of nodes by role
|
||||
const nodeRoleLabelKey = "io.x-k8s.kind.role"
|
91
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/images.go
generated
vendored
Normal file
91
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/images.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/common"
|
||||
"sigs.k8s.io/kind/pkg/internal/apis/config"
|
||||
"sigs.k8s.io/kind/pkg/internal/cli"
|
||||
)
|
||||
|
||||
// ensureNodeImages ensures that the node images used by the create
|
||||
// configuration are present
|
||||
func ensureNodeImages(logger log.Logger, status *cli.Status, cfg *config.Cluster, binaryName string) error {
|
||||
// pull each required image
|
||||
for _, image := range common.RequiredNodeImages(cfg).List() {
|
||||
// prints user friendly message
|
||||
friendlyImageName, image := sanitizeImage(image)
|
||||
status.Start(fmt.Sprintf("Ensuring node image (%s) 🖼", friendlyImageName))
|
||||
if _, err := pullIfNotPresent(logger, image, 4, binaryName); err != nil {
|
||||
status.End(false)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pullIfNotPresent will pull an image if it is not present locally
|
||||
// retrying up to retries times
|
||||
// it returns true if it attempted to pull, and any errors from pulling
|
||||
func pullIfNotPresent(logger log.Logger, image string, retries int, binaryName string) (pulled bool, err error) {
|
||||
// TODO(bentheelder): switch most (all) of the logging here to debug level
|
||||
// once we have configurable log levels
|
||||
// if this did not return an error, then the image exists locally
|
||||
cmd := exec.Command(binaryName, "inspect", "--type=image", image)
|
||||
if err := cmd.Run(); err == nil {
|
||||
logger.V(1).Infof("Image: %s present locally", image)
|
||||
return false, nil
|
||||
}
|
||||
// otherwise try to pull it
|
||||
return true, pull(logger, image, retries, binaryName)
|
||||
}
|
||||
|
||||
// pull pulls an image, retrying up to retries times
|
||||
func pull(logger log.Logger, image string, retries int, binaryName string) error {
|
||||
logger.V(1).Infof("Pulling image: %s ...", image)
|
||||
err := exec.Command(binaryName, "pull", image).Run()
|
||||
// retry pulling up to retries times if necessary
|
||||
if err != nil {
|
||||
for i := 0; i < retries; i++ {
|
||||
time.Sleep(time.Second * time.Duration(i+1))
|
||||
logger.V(1).Infof("Trying again to pull image: %q ... %v", image, err)
|
||||
// TODO(bentheelder): add some backoff / sleep?
|
||||
err = exec.Command(binaryName, "pull", image).Run()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.Wrapf(err, "failed to pull image %q", image)
|
||||
}
|
||||
|
||||
// sanitizeImage is a helper to return human readable image name and
|
||||
// the docker pullable image name from the provided image
|
||||
func sanitizeImage(image string) (string, string) {
|
||||
if strings.Contains(image, "@sha256:") {
|
||||
return strings.Split(image, "@sha256:")[0], image
|
||||
}
|
||||
return image, image
|
||||
}
|
187
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/network.go
generated
vendored
Normal file
187
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/network.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
)
|
||||
|
||||
// This may be overridden by KIND_EXPERIMENTAL_DOCKER_NETWORK env,
|
||||
// experimentally...
|
||||
//
|
||||
// By default currently picking a single network is equivalent to the previous
|
||||
// behavior *except* that we moved from the default bridge to a user defined
|
||||
// network because the default bridge is actually special versus any other
|
||||
// docker network and lacks the embedded DNS
|
||||
//
|
||||
// For now this also makes it easier for apps to join the same network, and
|
||||
// leaves users with complex networking desires to create and manage their own
|
||||
// networks.
|
||||
const fixedNetworkName = "kind"
|
||||
|
||||
// ensureNetwork checks if docker network by name exists, if not it creates it
|
||||
func ensureNetwork(name, binaryName string) error {
|
||||
// check if network exists already and remove any duplicate networks
|
||||
exists, err := checkIfNetworkExists(name, binaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// network already exists, we're good
|
||||
// TODO: the network might already exist and not have ipv6 ... :|
|
||||
// discussion: https://github.com/kubernetes-sigs/kind/pull/1508#discussion_r414594198
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
|
||||
subnet := generateULASubnetFromName(name, 0)
|
||||
mtu := getDefaultNetworkMTU(binaryName)
|
||||
err = createNetwork(name, subnet, mtu, binaryName)
|
||||
if err == nil {
|
||||
// Success!
|
||||
return nil
|
||||
}
|
||||
|
||||
// On the first try check if ipv6 fails entirely on this machine
|
||||
// https://github.com/kubernetes-sigs/kind/issues/1544
|
||||
// Otherwise if it's not a pool overlap error, fail
|
||||
// If it is, make more attempts below
|
||||
if isIPv6UnavailableError(err) {
|
||||
// only one attempt, IPAM is automatic in ipv4 only
|
||||
return createNetwork(name, "", mtu, binaryName)
|
||||
}
|
||||
if isPoolOverlapError(err) {
|
||||
// pool overlap suggests perhaps another process created the network
|
||||
// check if network exists already and remove any duplicate networks
|
||||
exists, err := checkIfNetworkExists(name, binaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
// otherwise we'll start trying with different subnets
|
||||
} else {
|
||||
// unknown error ...
|
||||
return err
|
||||
}
|
||||
|
||||
// keep trying for ipv6 subnets
|
||||
const maxAttempts = 5
|
||||
for attempt := int32(1); attempt < maxAttempts; attempt++ {
|
||||
subnet := generateULASubnetFromName(name, attempt)
|
||||
err = createNetwork(name, subnet, mtu, binaryName)
|
||||
if err == nil {
|
||||
// success!
|
||||
return nil
|
||||
}
|
||||
if isPoolOverlapError(err) {
|
||||
// pool overlap suggests perhaps another process created the network
|
||||
// check if network exists already and remove any duplicate networks
|
||||
exists, err := checkIfNetworkExists(name, binaryName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
// otherwise we'll try again
|
||||
continue
|
||||
}
|
||||
// unknown error ...
|
||||
return err
|
||||
}
|
||||
return errors.New("exhausted attempts trying to find a non-overlapping subnet")
|
||||
}
|
||||
|
||||
func createNetwork(name, ipv6Subnet string, mtu int, binaryName string) error {
|
||||
args := []string{"network", "create", "-d=bridge"}
|
||||
// TODO: Not supported in nerdctl yet
|
||||
// "-o", "com.docker.network.bridge.enable_ip_masquerade=true",
|
||||
if mtu > 0 {
|
||||
args = append(args, "-o", fmt.Sprintf("com.docker.network.driver.mtu=%d", mtu))
|
||||
}
|
||||
if ipv6Subnet != "" {
|
||||
args = append(args, "--ipv6", "--subnet", ipv6Subnet)
|
||||
}
|
||||
args = append(args, name)
|
||||
return exec.Command(binaryName, args...).Run()
|
||||
}
|
||||
|
||||
// getDefaultNetworkMTU obtains the MTU from the docker default network
|
||||
func getDefaultNetworkMTU(binaryName string) int {
|
||||
cmd := exec.Command(binaryName, "network", "inspect", "bridge",
|
||||
"-f", `{{ index .Options "com.docker.network.driver.mtu" }}`)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil || len(lines) != 1 {
|
||||
return 0
|
||||
}
|
||||
mtu, err := strconv.Atoi(lines[0])
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return mtu
|
||||
}
|
||||
|
||||
func checkIfNetworkExists(name, binaryName string) (bool, error) {
|
||||
out, err := exec.Output(exec.Command(
|
||||
binaryName, "network", "inspect",
|
||||
name, "--format={{.Name}}",
|
||||
))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return strings.HasPrefix(string(out), name), err
|
||||
}
|
||||
|
||||
func isIPv6UnavailableError(err error) bool {
|
||||
rerr := exec.RunErrorForError(err)
|
||||
return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Cannot read IPv6 setup for bridge")
|
||||
}
|
||||
|
||||
func isPoolOverlapError(err error) bool {
|
||||
rerr := exec.RunErrorForError(err)
|
||||
return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") || strings.Contains(string(rerr.Output), "networks have overlapping")
|
||||
}
|
||||
|
||||
// generateULASubnetFromName generate an IPv6 subnet based on the
|
||||
// name and Nth probing attempt
|
||||
func generateULASubnetFromName(name string, attempt int32) string {
|
||||
ip := make([]byte, 16)
|
||||
ip[0] = 0xfc
|
||||
ip[1] = 0x00
|
||||
h := sha1.New()
|
||||
_, _ = h.Write([]byte(name))
|
||||
_ = binary.Write(h, binary.LittleEndian, attempt)
|
||||
bs := h.Sum(nil)
|
||||
for i := 2; i < 8; i++ {
|
||||
ip[i] = bs[i]
|
||||
}
|
||||
subnet := &net.IPNet{
|
||||
IP: net.IP(ip),
|
||||
Mask: net.CIDRMask(64, 128),
|
||||
}
|
||||
return subnet.String()
|
||||
}
|
175
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/node.go
generated
vendored
Normal file
175
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/node.go
generated
vendored
Normal file
|
@ -0,0 +1,175 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
)
|
||||
|
||||
// nodes.Node implementation for the docker provider
|
||||
type node struct {
|
||||
name string
|
||||
binaryName string
|
||||
}
|
||||
|
||||
func (n *node) String() string {
|
||||
return n.name
|
||||
}
|
||||
|
||||
func (n *node) Role() (string, error) {
|
||||
cmd := exec.Command(n.binaryName, "inspect",
|
||||
"--format", fmt.Sprintf(`{{ index .Config.Labels "%s"}}`, nodeRoleLabelKey),
|
||||
n.name,
|
||||
)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get role for node")
|
||||
}
|
||||
if len(lines) != 1 {
|
||||
return "", errors.Errorf("failed to get role for node: output lines %d != 1", len(lines))
|
||||
}
|
||||
return lines[0], nil
|
||||
}
|
||||
|
||||
func (n *node) IP() (ipv4 string, ipv6 string, err error) {
|
||||
// retrieve the IP address of the node using docker inspect
|
||||
cmd := exec.Command(n.binaryName, "inspect",
|
||||
"-f", "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}",
|
||||
n.name, // ... against the "node" container
|
||||
)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to get container details")
|
||||
}
|
||||
if len(lines) != 1 {
|
||||
return "", "", errors.Errorf("file should only be one line, got %d lines", len(lines))
|
||||
}
|
||||
ips := strings.Split(lines[0], ",")
|
||||
if len(ips) != 2 {
|
||||
return "", "", errors.Errorf("container addresses should have 2 values, got %d values", len(ips))
|
||||
}
|
||||
return ips[0], ips[1], nil
|
||||
}
|
||||
|
||||
func (n *node) Command(command string, args ...string) exec.Cmd {
|
||||
return &nodeCmd{
|
||||
binaryName: n.binaryName,
|
||||
nameOrID: n.name,
|
||||
command: command,
|
||||
args: args,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *node) CommandContext(ctx context.Context, command string, args ...string) exec.Cmd {
|
||||
return &nodeCmd{
|
||||
binaryName: n.binaryName,
|
||||
nameOrID: n.name,
|
||||
command: command,
|
||||
args: args,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// nodeCmd implements exec.Cmd for docker nodes
|
||||
type nodeCmd struct {
|
||||
binaryName string
|
||||
nameOrID string // the container name or ID
|
||||
command string
|
||||
args []string
|
||||
env []string
|
||||
stdin io.Reader
|
||||
stdout io.Writer
|
||||
stderr io.Writer
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func (c *nodeCmd) Run() error {
|
||||
args := []string{
|
||||
"exec",
|
||||
// run with privileges so we can remount etc..
|
||||
// this might not make sense in the most general sense, but it is
|
||||
// important to many kind commands
|
||||
"--privileged",
|
||||
}
|
||||
if c.stdin != nil {
|
||||
args = append(args,
|
||||
"-i", // interactive so we can supply input
|
||||
)
|
||||
}
|
||||
// set env
|
||||
for _, env := range c.env {
|
||||
args = append(args, "-e", env)
|
||||
}
|
||||
// specify the container and command, after this everything will be
|
||||
// args the command in the container rather than to docker
|
||||
args = append(
|
||||
args,
|
||||
c.nameOrID, // ... against the container
|
||||
c.command, // with the command specified
|
||||
)
|
||||
args = append(
|
||||
args,
|
||||
// finally, with the caller args
|
||||
c.args...,
|
||||
)
|
||||
var cmd exec.Cmd
|
||||
if c.ctx != nil {
|
||||
cmd = exec.CommandContext(c.ctx, c.binaryName, args...)
|
||||
} else {
|
||||
cmd = exec.Command(c.binaryName, args...)
|
||||
}
|
||||
if c.stdin != nil {
|
||||
cmd.SetStdin(c.stdin)
|
||||
}
|
||||
if c.stderr != nil {
|
||||
cmd.SetStderr(c.stderr)
|
||||
}
|
||||
if c.stdout != nil {
|
||||
cmd.SetStdout(c.stdout)
|
||||
}
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (c *nodeCmd) SetEnv(env ...string) exec.Cmd {
|
||||
c.env = env
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *nodeCmd) SetStdin(r io.Reader) exec.Cmd {
|
||||
c.stdin = r
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *nodeCmd) SetStdout(w io.Writer) exec.Cmd {
|
||||
c.stdout = w
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *nodeCmd) SetStderr(w io.Writer) exec.Cmd {
|
||||
c.stderr = w
|
||||
return c
|
||||
}
|
||||
|
||||
func (n *node) SerialLogs(w io.Writer) error {
|
||||
return exec.Command(n.binaryName, "logs", n.name).SetStdout(w).SetStderr(w).Run()
|
||||
}
|
392
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provider.go
generated
vendored
Normal file
392
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provider.go
generated
vendored
Normal file
|
@ -0,0 +1,392 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliep.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
osexec "os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodes"
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
"sigs.k8s.io/kind/pkg/log"
|
||||
|
||||
internallogs "sigs.k8s.io/kind/pkg/cluster/internal/logs"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/common"
|
||||
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
|
||||
"sigs.k8s.io/kind/pkg/internal/apis/config"
|
||||
"sigs.k8s.io/kind/pkg/internal/cli"
|
||||
"sigs.k8s.io/kind/pkg/internal/sets"
|
||||
)
|
||||
|
||||
// NewProvider returns a new provider based on executing `nerdctl ...`
|
||||
func NewProvider(logger log.Logger, binaryName string) providers.Provider {
|
||||
// if binaryName is unset, do a lookup; we may be here via a
|
||||
// library call to provider.DetectNodeProvider(), which returns
|
||||
// true from nerdctl.IsAvailable() by checking for both finch
|
||||
// and nerdctl. If we don't redo the lookup here, then a finch
|
||||
// install that triggered IsAvailable() to be true would fail
|
||||
// to be used if we default to nerdctl when unset.
|
||||
if binaryName == "" {
|
||||
// default to "nerdctl"; but look for "finch" if
|
||||
// nerctl binary lookup fails
|
||||
binaryName = "nerdctl"
|
||||
if _, err := osexec.LookPath("nerdctl"); err != nil {
|
||||
if _, err := osexec.LookPath("finch"); err == nil {
|
||||
binaryName = "finch"
|
||||
}
|
||||
}
|
||||
}
|
||||
return &provider{
|
||||
logger: logger,
|
||||
binaryName: binaryName,
|
||||
}
|
||||
}
|
||||
|
||||
// Provider implements provider.Provider
|
||||
// see NewProvider
|
||||
type provider struct {
|
||||
logger log.Logger
|
||||
binaryName string
|
||||
info *providers.ProviderInfo
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer
|
||||
// NOTE: the value of this should not currently be relied upon for anything!
|
||||
// This is only used for setting the Node's providerID
|
||||
func (p *provider) String() string {
|
||||
return "nerdctl"
|
||||
}
|
||||
|
||||
func (p *provider) Binary() string {
|
||||
return p.binaryName
|
||||
}
|
||||
|
||||
// Provision is part of the providers.Provider interface
|
||||
func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error) {
|
||||
// TODO: validate cfg
|
||||
// ensure node images are pulled before actually provisioning
|
||||
if err := ensureNodeImages(p.logger, status, cfg, p.Binary()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ensure the pre-requisite network exists
|
||||
if err := ensureNetwork(fixedNetworkName, p.Binary()); err != nil {
|
||||
return errors.Wrap(err, "failed to ensure nerdctl network")
|
||||
}
|
||||
|
||||
// actually provision the cluster
|
||||
icons := strings.Repeat("📦 ", len(cfg.Nodes))
|
||||
status.Start(fmt.Sprintf("Preparing nodes %s", icons))
|
||||
defer func() { status.End(err == nil) }()
|
||||
|
||||
// plan creating the containers
|
||||
createContainerFuncs, err := planCreation(cfg, fixedNetworkName, p.Binary())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// actually create nodes
|
||||
// TODO: remove once nerdctl handles concurrency better
|
||||
// xref: https://github.com/containerd/nerdctl/issues/2908
|
||||
for _, f := range createContainerFuncs {
|
||||
if err := f(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListClusters is part of the providers.Provider interface
|
||||
func (p *provider) ListClusters() ([]string, error) {
|
||||
cmd := exec.Command(p.Binary(),
|
||||
"ps",
|
||||
"-a", // show stopped nodes
|
||||
// filter for nodes with the cluster label
|
||||
"--filter", "label="+clusterLabelKey,
|
||||
// format to include the cluster name
|
||||
"--format", fmt.Sprintf(`{{index .Labels "%s"}}`, clusterLabelKey),
|
||||
)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to list clusters")
|
||||
}
|
||||
return sets.NewString(lines...).List(), nil
|
||||
}
|
||||
|
||||
// ListNodes is part of the providers.Provider interface
|
||||
func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) {
|
||||
cmd := exec.Command(p.Binary(),
|
||||
"ps",
|
||||
"-a", // show stopped nodes
|
||||
// filter for nodes with the cluster label
|
||||
"--filter", fmt.Sprintf("label=%s=%s", clusterLabelKey, cluster),
|
||||
// format to include the cluster name
|
||||
"--format", `{{.Names}}`,
|
||||
)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to list nodes")
|
||||
}
|
||||
length := len(lines)
|
||||
// convert names to node handles
|
||||
ret := make([]nodes.Node, 0, length)
|
||||
for _, name := range lines {
|
||||
if name != "" {
|
||||
ret = append(ret, p.node(name))
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// DeleteNodes is part of the providers.Provider interface
|
||||
func (p *provider) DeleteNodes(n []nodes.Node) error {
|
||||
if len(n) == 0 {
|
||||
return nil
|
||||
}
|
||||
argsNoRestart := make([]string, 0, len(n)+2)
|
||||
argsNoRestart = append(argsNoRestart,
|
||||
"update",
|
||||
"--restart=no",
|
||||
)
|
||||
argsStop := make([]string, 0, len(n)+1)
|
||||
argsStop = append(argsStop, "stop")
|
||||
argsWait := make([]string, 0, len(n)+1)
|
||||
argsWait = append(argsWait, "wait")
|
||||
|
||||
argsRm := make([]string, 0, len(n)+3) // allocate once
|
||||
argsRm = append(argsRm,
|
||||
"rm",
|
||||
"-f",
|
||||
"-v", // delete volumes
|
||||
)
|
||||
for _, node := range n {
|
||||
argsRm = append(argsRm, node.String())
|
||||
argsStop = append(argsStop, node.String())
|
||||
argsWait = append(argsWait, node.String())
|
||||
argsNoRestart = append(argsNoRestart, node.String())
|
||||
}
|
||||
if err := exec.Command(p.Binary(), argsNoRestart...).Run(); err != nil {
|
||||
return errors.Wrap(err, "failed to update restart policy to 'no'")
|
||||
}
|
||||
if err := exec.Command(p.Binary(), argsStop...).Run(); err != nil {
|
||||
return errors.Wrap(err, "failed to stop nodes")
|
||||
}
|
||||
if err := exec.Command(p.Binary(), argsWait...).Run(); err != nil {
|
||||
return errors.Wrap(err, "failed to wait for node exit")
|
||||
}
|
||||
if err := exec.Command(p.Binary(), argsRm...).Run(); err != nil {
|
||||
return errors.Wrap(err, "failed to delete nodes")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAPIServerEndpoint is part of the providers.Provider interface
|
||||
func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) {
|
||||
// locate the node that hosts this
|
||||
allNodes, err := p.ListNodes(cluster)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to list nodes")
|
||||
}
|
||||
n, err := nodeutils.APIServerEndpointNode(allNodes)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get api server endpoint")
|
||||
}
|
||||
|
||||
// if the 'desktop.docker.io/ports/<PORT>/tcp' label is present,
|
||||
// defer to its value for the api server endpoint
|
||||
//
|
||||
// For example:
|
||||
// "Labels": {
|
||||
// "desktop.docker.io/ports/6443/tcp": "10.0.1.7:6443",
|
||||
// }
|
||||
cmd := exec.Command(
|
||||
p.Binary(), "inspect",
|
||||
"--format", fmt.Sprintf(
|
||||
"{{ index .Config.Labels \"desktop.docker.io/ports/%d/tcp\" }}", common.APIServerInternalPort,
|
||||
),
|
||||
n.String(),
|
||||
)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get api server port")
|
||||
}
|
||||
if len(lines) == 1 && lines[0] != "" {
|
||||
return lines[0], nil
|
||||
}
|
||||
|
||||
// else, retrieve the specific port mapping via NetworkSettings.Ports
|
||||
cmd = exec.Command(
|
||||
p.Binary(), "inspect",
|
||||
"--format", fmt.Sprintf(
|
||||
"{{ with (index (index .NetworkSettings.Ports \"%d/tcp\") 0) }}{{ printf \"%%s\t%%s\" .HostIp .HostPort }}{{ end }}", common.APIServerInternalPort,
|
||||
),
|
||||
n.String(),
|
||||
)
|
||||
lines, err = exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get api server port")
|
||||
}
|
||||
if len(lines) != 1 {
|
||||
return "", errors.Errorf("network details should only be one line, got %d lines", len(lines))
|
||||
}
|
||||
parts := strings.Split(lines[0], "\t")
|
||||
if len(parts) != 2 {
|
||||
return "", errors.Errorf("network details should only be two parts, got %d", len(parts))
|
||||
}
|
||||
|
||||
// join host and port
|
||||
return net.JoinHostPort(parts[0], parts[1]), nil
|
||||
}
|
||||
|
||||
// GetAPIServerInternalEndpoint is part of the providers.Provider interface
|
||||
func (p *provider) GetAPIServerInternalEndpoint(cluster string) (string, error) {
|
||||
// locate the node that hosts this
|
||||
allNodes, err := p.ListNodes(cluster)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to list nodes")
|
||||
}
|
||||
n, err := nodeutils.APIServerEndpointNode(allNodes)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get api server endpoint")
|
||||
}
|
||||
// NOTE: we're using the nodes's hostnames which are their names
|
||||
return net.JoinHostPort(n.String(), fmt.Sprintf("%d", common.APIServerInternalPort)), nil
|
||||
}
|
||||
|
||||
// node returns a new node handle for this provider
|
||||
func (p *provider) node(name string) nodes.Node {
|
||||
return &node{
|
||||
binaryName: p.binaryName,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// CollectLogs will populate dir with cluster logs and other debug files
|
||||
func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error {
|
||||
execToPathFn := func(cmd exec.Cmd, path string) func() error {
|
||||
return func() error {
|
||||
f, err := common.FileOnHost(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return cmd.SetStdout(f).SetStderr(f).Run()
|
||||
}
|
||||
}
|
||||
// construct a slice of methods to collect logs
|
||||
fns := []func() error{
|
||||
// record info about the host nerdctl
|
||||
execToPathFn(
|
||||
exec.Command(p.Binary(), "info"),
|
||||
filepath.Join(dir, "docker-info.txt"),
|
||||
),
|
||||
}
|
||||
|
||||
// collect /var/log for each node and plan collecting more logs
|
||||
var errs []error
|
||||
for _, n := range nodes {
|
||||
node := n // https://golang.org/doc/faq#closures_and_goroutines
|
||||
name := node.String()
|
||||
path := filepath.Join(dir, name)
|
||||
if err := internallogs.DumpDir(p.logger, node, "/var/log", path); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
fns = append(fns,
|
||||
func() error { return common.CollectLogs(node, path) },
|
||||
execToPathFn(exec.Command(p.Binary(), "inspect", name), filepath.Join(path, "inspect.json")),
|
||||
func() error {
|
||||
f, err := common.FileOnHost(filepath.Join(path, "serial.log"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
return node.SerialLogs(f)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// run and collect up all errors
|
||||
errs = append(errs, errors.AggregateConcurrent(fns))
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// Info returns the provider info.
|
||||
// The info is cached on the first time of the execution.
|
||||
func (p *provider) Info() (*providers.ProviderInfo, error) {
|
||||
var err error
|
||||
if p.info == nil {
|
||||
p.info, err = info(p.Binary())
|
||||
}
|
||||
return p.info, err
|
||||
}
|
||||
|
||||
// dockerInfo corresponds to `docker info --format '{{json .}}'`
|
||||
type dockerInfo struct {
|
||||
CgroupDriver string `json:"CgroupDriver"` // "systemd", "cgroupfs", "none"
|
||||
CgroupVersion string `json:"CgroupVersion"` // e.g. "2"
|
||||
MemoryLimit bool `json:"MemoryLimit"`
|
||||
PidsLimit bool `json:"PidsLimit"`
|
||||
CPUShares bool `json:"CPUShares"`
|
||||
SecurityOptions []string `json:"SecurityOptions"`
|
||||
}
|
||||
|
||||
func info(binaryName string) (*providers.ProviderInfo, error) {
|
||||
cmd := exec.Command(binaryName, "info", "--format", "{{json .}}")
|
||||
out, err := exec.Output(cmd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get nerdctl info")
|
||||
}
|
||||
var dInfo dockerInfo
|
||||
if err := json.Unmarshal(out, &dInfo); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info := providers.ProviderInfo{
|
||||
Cgroup2: dInfo.CgroupVersion == "2",
|
||||
}
|
||||
// When CgroupDriver == "none", the MemoryLimit/PidsLimit/CPUShares
|
||||
// values are meaningless and need to be considered false.
|
||||
// https://github.com/moby/moby/issues/42151
|
||||
if dInfo.CgroupDriver != "none" {
|
||||
info.SupportsMemoryLimit = dInfo.MemoryLimit
|
||||
info.SupportsPidsLimit = dInfo.PidsLimit
|
||||
info.SupportsCPUShares = dInfo.CPUShares
|
||||
}
|
||||
for _, o := range dInfo.SecurityOptions {
|
||||
// o is like "name=seccomp,profile=default", or "name=rootless",
|
||||
csvReader := csv.NewReader(strings.NewReader(o))
|
||||
sliceSlice, err := csvReader.ReadAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range sliceSlice {
|
||||
for _, ff := range f {
|
||||
if ff == "name=rootless" {
|
||||
info.Rootless = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return &info, nil
|
||||
}
|
388
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provision.go
generated
vendored
Normal file
388
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provision.go
generated
vendored
Normal file
|
@ -0,0 +1,388 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/constants"
|
||||
"sigs.k8s.io/kind/pkg/errors"
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
"sigs.k8s.io/kind/pkg/fs"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/common"
|
||||
"sigs.k8s.io/kind/pkg/internal/apis/config"
|
||||
)
|
||||
|
||||
// planCreation creates a slice of funcs that will create the containers
|
||||
func planCreation(cfg *config.Cluster, networkName, binaryName string) (createContainerFuncs []func() error, err error) {
|
||||
// we need to know all the names for NO_PROXY
|
||||
// compute the names first before any actual node details
|
||||
nodeNamer := common.MakeNodeNamer(cfg.Name)
|
||||
names := make([]string, len(cfg.Nodes))
|
||||
for i, node := range cfg.Nodes {
|
||||
name := nodeNamer(string(node.Role)) // name the node
|
||||
names[i] = name
|
||||
}
|
||||
haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg)
|
||||
if haveLoadbalancer {
|
||||
names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue))
|
||||
}
|
||||
|
||||
// these apply to all container creation
|
||||
genericArgs, err := commonArgs(cfg.Name, cfg, networkName, names, binaryName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// only the external LB should reflect the port if we have multiple control planes
|
||||
apiServerPort := cfg.Networking.APIServerPort
|
||||
apiServerAddress := cfg.Networking.APIServerAddress
|
||||
if haveLoadbalancer {
|
||||
// TODO: picking ports locally is less than ideal with remote docker
|
||||
// but this is supposed to be an implementation detail and NOT picking
|
||||
// them breaks host reboot ...
|
||||
// For now remote docker + multi control plane is not supported
|
||||
apiServerPort = 0 // replaced with random ports
|
||||
apiServerAddress = "127.0.0.1" // only the LB needs to be non-local
|
||||
// only for IPv6 only clusters
|
||||
if cfg.Networking.IPFamily == config.IPv6Family {
|
||||
apiServerAddress = "::1" // only the LB needs to be non-local
|
||||
}
|
||||
// plan loadbalancer node
|
||||
name := names[len(names)-1]
|
||||
createContainerFuncs = append(createContainerFuncs, func() error {
|
||||
args, err := runArgsForLoadBalancer(cfg, name, genericArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return createContainer(name, args, binaryName)
|
||||
})
|
||||
}
|
||||
|
||||
// plan normal nodes
|
||||
for i, node := range cfg.Nodes {
|
||||
node := node.DeepCopy() // copy so we can modify
|
||||
name := names[i]
|
||||
|
||||
// fixup relative paths, docker can only handle absolute paths
|
||||
for m := range node.ExtraMounts {
|
||||
hostPath := node.ExtraMounts[m].HostPath
|
||||
if !fs.IsAbs(hostPath) {
|
||||
absHostPath, err := filepath.Abs(hostPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to resolve absolute path for hostPath: %q", hostPath)
|
||||
}
|
||||
node.ExtraMounts[m].HostPath = absHostPath
|
||||
}
|
||||
}
|
||||
|
||||
// plan actual creation based on role
|
||||
switch node.Role {
|
||||
case config.ControlPlaneRole:
|
||||
createContainerFuncs = append(createContainerFuncs, func() error {
|
||||
node.ExtraPortMappings = append(node.ExtraPortMappings,
|
||||
config.PortMapping{
|
||||
ListenAddress: apiServerAddress,
|
||||
HostPort: apiServerPort,
|
||||
ContainerPort: common.APIServerInternalPort,
|
||||
},
|
||||
)
|
||||
args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args, binaryName)
|
||||
})
|
||||
case config.WorkerRole:
|
||||
createContainerFuncs = append(createContainerFuncs, func() error {
|
||||
args, err := runArgsForNode(node, cfg.Networking.IPFamily, name, genericArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args, binaryName)
|
||||
})
|
||||
default:
|
||||
return nil, errors.Errorf("unknown node role: %q", node.Role)
|
||||
}
|
||||
}
|
||||
return createContainerFuncs, nil
|
||||
}
|
||||
|
||||
// commonArgs computes static arguments that apply to all containers
|
||||
func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string, binaryName string) ([]string, error) {
|
||||
// standard arguments all nodes containers need, computed once
|
||||
args := []string{
|
||||
"--detach", // run the container detached
|
||||
"--tty", // allocate a tty for entrypoint logs
|
||||
// label the node with the cluster ID
|
||||
"--label", fmt.Sprintf("%s=%s", clusterLabelKey, cluster),
|
||||
// user a user defined network so we get embedded DNS
|
||||
"--net", networkName,
|
||||
// containerd supports the following restart modes:
|
||||
// - no
|
||||
// - on-failure[:max-retries]
|
||||
// - unless-stopped
|
||||
// - always
|
||||
//
|
||||
// What we desire is:
|
||||
// - restart on host / container runtime reboot
|
||||
// - don't restart for any other reason
|
||||
//
|
||||
"--restart=on-failure:1",
|
||||
// this can be enabled by default in docker daemon.json, so we explicitly
|
||||
// disable it, we want our entrypoint to be PID1, not docker-init / tini
|
||||
"--init=false",
|
||||
}
|
||||
|
||||
// enable IPv6 if necessary
|
||||
if config.ClusterHasIPv6(cfg) {
|
||||
args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1")
|
||||
}
|
||||
|
||||
// pass proxy environment variables
|
||||
proxyEnv, err := getProxyEnv(cfg, networkName, nodeNames, binaryName)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "proxy setup error")
|
||||
}
|
||||
for key, val := range proxyEnv {
|
||||
args = append(args, "-e", fmt.Sprintf("%s=%s", key, val))
|
||||
}
|
||||
|
||||
// enable /dev/fuse explicitly for fuse-overlayfs
|
||||
// (Rootless Docker does not automatically mount /dev/fuse with --privileged)
|
||||
if mountFuse(binaryName) {
|
||||
args = append(args, "--device", "/dev/fuse")
|
||||
}
|
||||
|
||||
if cfg.Networking.DNSSearch != nil {
|
||||
args = append(args, "-e", "KIND_DNS_SEARCH="+strings.Join(*cfg.Networking.DNSSearch, " "))
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) {
|
||||
args = append([]string{
|
||||
"--hostname", name, // make hostname match container name
|
||||
// label the node with the role ID
|
||||
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role),
|
||||
// running containers in a container requires privileged
|
||||
// NOTE: we could try to replicate this with --cap-add, and use less
|
||||
// privileges, but this flag also changes some mounts that are necessary
|
||||
// including some ones docker would otherwise do by default.
|
||||
// for now this is what we want. in the future we may revisit this.
|
||||
"--privileged",
|
||||
"--security-opt", "seccomp=unconfined", // also ignore seccomp
|
||||
"--security-opt", "apparmor=unconfined", // also ignore apparmor
|
||||
// runtime temporary storage
|
||||
"--tmpfs", "/tmp", // various things depend on working /tmp
|
||||
"--tmpfs", "/run", // systemd wants a writable /run
|
||||
// runtime persistent storage
|
||||
// this ensures that E.G. pods, logs etc. are not on the container
|
||||
// filesystem, which is not only better for performance, but allows
|
||||
// running kind in kind for "party tricks"
|
||||
// (please don't depend on doing this though!)
|
||||
"--volume", "/var",
|
||||
// some k8s things want to read /lib/modules
|
||||
"--volume", "/lib/modules:/lib/modules:ro",
|
||||
// propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script
|
||||
"-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER",
|
||||
},
|
||||
args...,
|
||||
)
|
||||
|
||||
// convert mounts and port mappings to container run args
|
||||
args = append(args, generateMountBindings(node.ExtraMounts...)...)
|
||||
mappingArgs, err := generatePortMappings(clusterIPFamily, node.ExtraPortMappings...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, mappingArgs...)
|
||||
|
||||
switch node.Role {
|
||||
case config.ControlPlaneRole:
|
||||
args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf")
|
||||
}
|
||||
|
||||
// finally, specify the image to run
|
||||
return append(args, node.Image), nil
|
||||
}
|
||||
|
||||
func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) {
|
||||
args = append([]string{
|
||||
"--hostname", name, // make hostname match container name
|
||||
// label the node with the role ID
|
||||
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue),
|
||||
},
|
||||
args...,
|
||||
)
|
||||
|
||||
// load balancer port mapping
|
||||
mappingArgs, err := generatePortMappings(cfg.Networking.IPFamily,
|
||||
config.PortMapping{
|
||||
ListenAddress: cfg.Networking.APIServerAddress,
|
||||
HostPort: cfg.Networking.APIServerPort,
|
||||
ContainerPort: common.APIServerInternalPort,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, mappingArgs...)
|
||||
|
||||
// finally, specify the image to run
|
||||
return append(args, loadbalancer.Image), nil
|
||||
}
|
||||
|
||||
func getProxyEnv(cfg *config.Cluster, networkName string, nodeNames []string, binaryName string) (map[string]string, error) {
|
||||
envs := common.GetProxyEnvs(cfg)
|
||||
// Specifically add the docker network subnets to NO_PROXY if we are using a proxy
|
||||
if len(envs) > 0 {
|
||||
subnets, err := getSubnets(networkName, binaryName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
noProxyList := append(subnets, envs[common.NOProxy])
|
||||
noProxyList = append(noProxyList, nodeNames...)
|
||||
// Add pod and service dns names to no_proxy to allow in cluster
|
||||
// Note: this is best effort based on the default CoreDNS spec
|
||||
// https://github.com/kubernetes/dns/blob/master/docs/specification.md
|
||||
// Any user created pod/service hostnames, namespaces, custom DNS services
|
||||
// are expected to be no-proxied by the user explicitly.
|
||||
noProxyList = append(noProxyList, ".svc", ".svc.cluster", ".svc.cluster.local")
|
||||
noProxyJoined := strings.Join(noProxyList, ",")
|
||||
envs[common.NOProxy] = noProxyJoined
|
||||
envs[strings.ToLower(common.NOProxy)] = noProxyJoined
|
||||
}
|
||||
return envs, nil
|
||||
}
|
||||
|
||||
func getSubnets(networkName, binaryName string) ([]string, error) {
|
||||
format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}`
|
||||
cmd := exec.Command(binaryName, "network", "inspect", "-f", format, networkName)
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get subnets")
|
||||
}
|
||||
return strings.Split(strings.TrimSpace(lines[0]), " "), nil
|
||||
}
|
||||
|
||||
// generateMountBindings converts the mount list to a list of args for docker
|
||||
// '<HostPath>:<ContainerPath>[:options]', where 'options'
|
||||
// is a comma-separated list of the following strings:
|
||||
// 'ro', if the path is read only
|
||||
// 'Z', if the volume requires SELinux relabeling
|
||||
func generateMountBindings(mounts ...config.Mount) []string {
|
||||
args := make([]string, 0, len(mounts))
|
||||
for _, m := range mounts {
|
||||
bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath)
|
||||
var attrs []string
|
||||
if m.Readonly {
|
||||
attrs = append(attrs, "ro")
|
||||
}
|
||||
// Only request relabeling if the pod provides an SELinux context. If the pod
|
||||
// does not provide an SELinux context relabeling will label the volume with
|
||||
// the container's randomly allocated MCS label. This would restrict access
|
||||
// to the volume to the container which mounts it first.
|
||||
if m.SelinuxRelabel {
|
||||
attrs = append(attrs, "Z")
|
||||
}
|
||||
switch m.Propagation {
|
||||
case config.MountPropagationNone:
|
||||
// noop, private is default
|
||||
case config.MountPropagationBidirectional:
|
||||
attrs = append(attrs, "rshared")
|
||||
case config.MountPropagationHostToContainer:
|
||||
attrs = append(attrs, "rslave")
|
||||
default: // Falls back to "private"
|
||||
}
|
||||
if len(attrs) > 0 {
|
||||
bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ","))
|
||||
}
|
||||
args = append(args, fmt.Sprintf("--volume=%s", bind))
|
||||
}
|
||||
return args
|
||||
}
|
||||
|
||||
// generatePortMappings converts the portMappings list to a list of args for docker
|
||||
func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings ...config.PortMapping) ([]string, error) {
|
||||
args := make([]string, 0, len(portMappings))
|
||||
for _, pm := range portMappings {
|
||||
// do provider internal defaulting
|
||||
// in a future API revision we will handle this at the API level and remove this
|
||||
if pm.ListenAddress == "" {
|
||||
switch clusterIPFamily {
|
||||
case config.IPv4Family, config.DualStackFamily:
|
||||
pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow
|
||||
case config.IPv6Family:
|
||||
pm.ListenAddress = "::"
|
||||
default:
|
||||
return nil, errors.Errorf("unknown cluster IP family: %v", clusterIPFamily)
|
||||
}
|
||||
}
|
||||
if string(pm.Protocol) == "" {
|
||||
pm.Protocol = config.PortMappingProtocolTCP // TCP is the default
|
||||
}
|
||||
|
||||
// validate that the provider can handle this binding
|
||||
switch pm.Protocol {
|
||||
case config.PortMappingProtocolTCP:
|
||||
case config.PortMappingProtocolUDP:
|
||||
case config.PortMappingProtocolSCTP:
|
||||
default:
|
||||
return nil, errors.Errorf("unknown port mapping protocol: %v", pm.Protocol)
|
||||
}
|
||||
|
||||
// get a random port if necessary (port = 0)
|
||||
hostPort, releaseHostPortFn, err := common.PortOrGetFreePort(pm.HostPort, pm.ListenAddress)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get random host port for port mapping")
|
||||
}
|
||||
if releaseHostPortFn != nil {
|
||||
defer releaseHostPortFn()
|
||||
}
|
||||
|
||||
// generate the actual mapping arg
|
||||
protocol := string(pm.Protocol)
|
||||
hostPortBinding := net.JoinHostPort(pm.ListenAddress, fmt.Sprintf("%d", hostPort))
|
||||
args = append(args, fmt.Sprintf("--publish=%s:%d/%s", hostPortBinding, pm.ContainerPort, protocol))
|
||||
}
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func createContainer(name string, args []string, binaryName string) error {
|
||||
return exec.Command(binaryName, append([]string{"run", "--name", name}, args...)...).Run()
|
||||
}
|
||||
|
||||
func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string, binaryName string) error {
|
||||
if err := exec.Command(binaryName, append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
logCmd := exec.CommandContext(logCtx, binaryName, "logs", "-f", name)
|
||||
defer logCancel()
|
||||
return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp())
|
||||
}
|
52
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/util.go
generated
vendored
Normal file
52
vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/util.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package nerdctl
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"sigs.k8s.io/kind/pkg/exec"
|
||||
)
|
||||
|
||||
// IsAvailable checks if nerdctl (or finch) is available in the system
|
||||
func IsAvailable() bool {
|
||||
cmd := exec.Command("nerdctl", "-v")
|
||||
lines, err := exec.OutputLines(cmd)
|
||||
if err != nil || len(lines) != 1 {
|
||||
// check finch
|
||||
cmd = exec.Command("finch", "-v")
|
||||
lines, err = exec.OutputLines(cmd)
|
||||
if err != nil || len(lines) != 1 {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(lines[0], "finch version")
|
||||
}
|
||||
return strings.HasPrefix(lines[0], "nerdctl version")
|
||||
}
|
||||
|
||||
// rootless: use fuse-overlayfs by default
|
||||
// https://github.com/kubernetes-sigs/kind/issues/2275
|
||||
func mountFuse(binaryName string) bool {
|
||||
i, err := info(binaryName)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if i != nil && i.Rootless {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -421,10 +421,7 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings .
|
|||
}
|
||||
|
||||
func createContainer(name string, args []string) error {
|
||||
if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run()
|
||||
}
|
||||
|
||||
func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error {
|
||||
|
|
|
@ -34,6 +34,7 @@ import (
|
|||
"sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig"
|
||||
internalproviders "sigs.k8s.io/kind/pkg/cluster/internal/providers"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/docker"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl"
|
||||
"sigs.k8s.io/kind/pkg/cluster/internal/providers/podman"
|
||||
)
|
||||
|
||||
|
@ -102,8 +103,8 @@ var NoNodeProviderDetectedError = errors.NewWithoutStack("failed to detect any s
|
|||
// Pass the returned ProviderOption to NewProvider to pass the auto-detect Docker
|
||||
// or Podman option explicitly (in the future there will be more options)
|
||||
//
|
||||
// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman" or
|
||||
// "docker" currently and does not auto-detect / respects this if set.
|
||||
// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman",
|
||||
// "nerctl" or "docker" currently and does not auto-detect / respects this if set.
|
||||
//
|
||||
// This will be replaced with some other mechanism in the future (likely when
|
||||
// podman support is GA), in the meantime though your tool may wish to match this.
|
||||
|
@ -115,6 +116,9 @@ func DetectNodeProvider() (ProviderOption, error) {
|
|||
if docker.IsAvailable() {
|
||||
return ProviderWithDocker(), nil
|
||||
}
|
||||
if nerdctl.IsAvailable() {
|
||||
return ProviderWithNerdctl(""), nil
|
||||
}
|
||||
if podman.IsAvailable() {
|
||||
return ProviderWithPodman(), nil
|
||||
}
|
||||
|
@ -167,6 +171,13 @@ func ProviderWithPodman() ProviderOption {
|
|||
})
|
||||
}
|
||||
|
||||
// ProviderWithNerdctl configures the provider to use the nerdctl runtime
|
||||
func ProviderWithNerdctl(binaryName string) ProviderOption {
|
||||
return providerRuntimeOption(func(p *Provider) {
|
||||
p.provider = nerdctl.NewProvider(p.logger, binaryName)
|
||||
})
|
||||
}
|
||||
|
||||
// Create provisions and starts a kubernetes-in-docker cluster
|
||||
func (p *Provider) Create(name string, options ...CreateOption) error {
|
||||
// apply options
|
||||
|
|
|
@ -54,11 +54,11 @@ func DisplayVersion() string {
|
|||
}
|
||||
|
||||
// versionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0
|
||||
const versionCore = "0.22.0"
|
||||
const versionCore = "0.24.0"
|
||||
|
||||
// versionPreRelease is the base pre-release portion of the kind CLI version per
|
||||
// Semantic Versioning 2.0.0
|
||||
const versionPreRelease = ""
|
||||
var versionPreRelease = ""
|
||||
|
||||
// gitCommitCount count the commits since the last release.
|
||||
// It is injected at build time.
|
||||
|
|
|
@ -64,10 +64,10 @@ func Copy(src, dst string) error {
|
|||
return err
|
||||
}
|
||||
// do real copy work
|
||||
return copy(src, dst, info)
|
||||
return copyWithSrcInfo(src, dst, info)
|
||||
}
|
||||
|
||||
func copy(src, dst string, info os.FileInfo) error {
|
||||
func copyWithSrcInfo(src, dst string, info os.FileInfo) error {
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
return copySymlink(src, dst)
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func copySymlink(src, dst string) error {
|
|||
return err
|
||||
}
|
||||
// copy the underlying contents
|
||||
return copy(realSrc, dst, info)
|
||||
return copyWithSrcInfo(realSrc, dst, info)
|
||||
}
|
||||
|
||||
func copyDir(src, dst string, info os.FileInfo) error {
|
||||
|
@ -148,7 +148,7 @@ func copyDir(src, dst string, info os.FileInfo) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copy(entrySrc, entryDst, fileInfo); err != nil {
|
||||
if err := copyWithSrcInfo(entrySrc, entryDst, fileInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ type Networking struct {
|
|||
// If DisableDefaultCNI is true, kind will not install the default CNI setup.
|
||||
// Instead the user should install their own CNI after creating the cluster.
|
||||
DisableDefaultCNI bool
|
||||
// KubeProxyMode defines if kube-proxy should operate in iptables or ipvs mode
|
||||
// KubeProxyMode defines if kube-proxy should operate in iptables, ipvs or nftables mode
|
||||
KubeProxyMode ProxyMode
|
||||
// DNSSearch defines the DNS search domain to use for nodes. If not set, this will be inherited from the host.
|
||||
DNSSearch *[]string
|
||||
|
@ -174,6 +174,8 @@ const (
|
|||
IPTablesProxyMode ProxyMode = "iptables"
|
||||
// IPVSProxyMode sets ProxyMode to ipvs
|
||||
IPVSProxyMode ProxyMode = "ipvs"
|
||||
// NFTablesProxyMode sets ProxyMode to nftables
|
||||
NFTablesProxyMode ProxyMode = "nftables"
|
||||
// NoneProxyMode disables kube-proxy
|
||||
NoneProxyMode ProxyMode = "none"
|
||||
)
|
||||
|
|
|
@ -52,6 +52,11 @@ func (c *Cluster) Validate() error {
|
|||
}
|
||||
}
|
||||
|
||||
// ipFamily should be ipv4, ipv6, or dual
|
||||
if c.Networking.IPFamily != IPv4Family && c.Networking.IPFamily != IPv6Family && c.Networking.IPFamily != DualStackFamily {
|
||||
errs = append(errs, errors.Errorf("invalid ipFamily: %s", c.Networking.IPFamily))
|
||||
}
|
||||
|
||||
// podSubnet should be a valid CIDR
|
||||
if err := validateSubnets(c.Networking.PodSubnet, c.Networking.IPFamily); err != nil {
|
||||
errs = append(errs, errors.Errorf("invalid pod subnet %v", err))
|
||||
|
@ -64,7 +69,7 @@ func (c *Cluster) Validate() error {
|
|||
|
||||
// KubeProxyMode should be iptables or ipvs
|
||||
if c.Networking.KubeProxyMode != IPTablesProxyMode && c.Networking.KubeProxyMode != IPVSProxyMode &&
|
||||
c.Networking.KubeProxyMode != NoneProxyMode {
|
||||
c.Networking.KubeProxyMode != NoneProxyMode && c.Networking.KubeProxyMode != NFTablesProxyMode {
|
||||
errs = append(errs, errors.Errorf("invalid kubeProxyMode: %s", c.Networking.KubeProxyMode))
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue