Merge pull request #1492 from RainbowMango/pr_bump_kind

Bump kind version from v0.11.1 to v0.12.0
This commit is contained in:
karmada-bot 2022-03-16 09:16:31 +08:00 committed by GitHub
commit bd9ad392fc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
60 changed files with 2381 additions and 1015 deletions

6
go.mod
View File

@ -4,7 +4,7 @@ go 1.17
require ( require (
github.com/distribution/distribution/v3 v3.0.0-20210507173845-9329f6a62b67 github.com/distribution/distribution/v3 v3.0.0-20210507173845-9329f6a62b67
github.com/evanphx/json-patch/v5 v5.2.0 github.com/evanphx/json-patch/v5 v5.6.0
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/google/uuid v1.1.2 github.com/google/uuid v1.1.2
github.com/kr/pretty v0.3.0 github.com/kr/pretty v0.3.0
@ -36,7 +36,7 @@ require (
k8s.io/utils v0.0.0-20211116205334-6203023598ed k8s.io/utils v0.0.0-20211116205334-6203023598ed
sigs.k8s.io/cluster-api v1.0.1 sigs.k8s.io/cluster-api v1.0.1
sigs.k8s.io/controller-runtime v0.11.1 sigs.k8s.io/controller-runtime v0.11.1
sigs.k8s.io/kind v0.11.1 sigs.k8s.io/kind v0.12.0
sigs.k8s.io/mcs-api v0.1.0 sigs.k8s.io/mcs-api v0.1.0
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 sigs.k8s.io/structured-merge-diff/v4 v4.2.1
sigs.k8s.io/yaml v1.3.0 sigs.k8s.io/yaml v1.3.0
@ -44,7 +44,7 @@ require (
require ( require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/BurntSushi/toml v0.3.1 // indirect github.com/BurntSushi/toml v0.4.1 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect

19
go.sum
View File

@ -64,8 +64,9 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
@ -206,13 +207,12 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/evanphx/json-patch/v5 v5.2.0 h1:8ozOH5xxoMYDt5/u+yMTsVXydVCbTORFnOOoq2lumco= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww=
github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
@ -415,7 +415,6 @@ github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pf
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
@ -623,7 +622,6 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
@ -711,7 +709,6 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
@ -1043,7 +1040,6 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1376,7 +1372,6 @@ k8s.io/apiextensions-apiserver v0.23.4 h1:AFDUEu/yEf0YnuZhqhIFhPLPhhcQQVuR1u3WCh
k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g= k8s.io/apiextensions-apiserver v0.23.4/go.mod h1:TWYAKymJx7nLMxWCgWm2RYGXHrGlVZnxIlGnvtfYu+g=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/apimachinery v0.18.4/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc=
k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM= k8s.io/apimachinery v0.23.4 h1:fhnuMd/xUL3Cjfl64j5ULKZ1/J9n8NuQEgNL+WXWfdM=
@ -1426,7 +1421,6 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
@ -1434,7 +1428,6 @@ k8s.io/kube-aggregator v0.23.4 h1:gLk78rGLVfUXCdD14NrKg/JFBmNNCZ8FEs3tYt+W6Zk=
k8s.io/kube-aggregator v0.23.4/go.mod h1:hpmPi4oaLBe014CkBCqzBYWok64H2C7Ka6FBLJvHgkg= k8s.io/kube-aggregator v0.23.4/go.mod h1:hpmPi4oaLBe014CkBCqzBYWok64H2C7Ka6FBLJvHgkg=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
@ -1468,8 +1461,8 @@ sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WG
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4= sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4=
sigs.k8s.io/kind v0.11.1 h1:pVzOkhUwMBrCB0Q/WllQDO3v14Y+o2V0tFgjTqIUjwA= sigs.k8s.io/kind v0.12.0 h1:LFynXwQkH1MrWI8pM1FQty0oUwEKjU5EkMaVZaPld8E=
sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA= sigs.k8s.io/kind v0.12.0/go.mod h1:EcgDSBVxz8Bvm19fx8xkioFrf9dC30fMJdOTXBSGNoM=
sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g=
sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0= sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0=
sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8=

View File

@ -11,7 +11,7 @@ function usage() {
echo "Example: hack/create-cluster.sh host /root/.kube/karmada.config" echo "Example: hack/create-cluster.sh host /root/.kube/karmada.config"
} }
CLUSTER_VERSION=${CLUSTER_VERSION:-"kindest/node:v1.22.0"} CLUSTER_VERSION=${CLUSTER_VERSION:-"kindest/node:v1.23.4"}
if [[ $# -lt 1 ]]; then if [[ $# -lt 1 ]]; then
usage usage

View File

@ -23,7 +23,7 @@ MEMBER_CLUSTER_2_NAME=${MEMBER_CLUSTER_2_NAME:-"member2"}
PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"} PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"}
HOST_IPADDRESS=${1:-} HOST_IPADDRESS=${1:-}
CLUSTER_VERSION=${CLUSTER_VERSION:-"kindest/node:v1.22.0"} CLUSTER_VERSION=${CLUSTER_VERSION:-"kindest/node:v1.23.4"}
KIND_LOG_FILE=${KIND_LOG_FILE:-"/tmp/karmada"} KIND_LOG_FILE=${KIND_LOG_FILE:-"/tmp/karmada"}
#step0: prepare #step0: prepare
@ -59,7 +59,7 @@ util::cmd_must_exist "go"
util::verify_go_version util::verify_go_version
# install kind and kubectl # install kind and kubectl
kind_version=v0.11.1 kind_version=v0.12.0
echo -n "Preparing: 'kind' existence check - " echo -n "Preparing: 'kind' existence check - "
if util::cmd_exist kind; then if util::cmd_exist kind; then
echo "passed" echo "passed"

View File

@ -1,5 +1,2 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test toml.test
/toml-test

View File

@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View File

@ -1,3 +1 @@
Compatible with TOML version Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View File

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View File

@ -6,27 +6,22 @@ packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data `encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.) representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0).
Compatible with TOML version Documentation: https://godocs.io/github.com/BurntSushi/toml
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml See the [releases page](https://github.com/BurntSushi/toml/releases) for a
changelog; this information is also in the git tag annotations (e.g. `git show
v0.4.0`).
Installation: This library requires Go 1.13 or newer; install it with:
```bash $ go get github.com/BurntSushi/toml
go get github.com/BurntSushi/toml
```
Try the toml validator: It also comes with a TOML validator CLI tool:
```bash $ go get github.com/BurntSushi/toml/cmd/tomlv
go get github.com/BurntSushi/toml/cmd/tomlv $ tomlv some-toml-file.toml
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing ### Testing
@ -36,8 +31,8 @@ and the encoder.
### Examples ### Examples
This package works similarly to how the Go standard library handles `XML` This package works similarly to how the Go standard library handles XML and
and `JSON`. Namely, data is loaded into Go values via reflection. JSON. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys For the simplest example, consider some TOML file as just a list of keys
and values: and values:
@ -54,11 +49,11 @@ Which could be defined in Go as:
```go ```go
type Config struct { type Config struct {
Age int Age int
Cats []string Cats []string
Pi float64 Pi float64
Perfection []int Perfection []int
DOB time.Time // requires `import time` DOB time.Time // requires `import time`
} }
``` ```
@ -84,6 +79,9 @@ type TOML struct {
} }
``` ```
Beware that like other most other decoders **only exported fields** are
considered when encoding and decoding; private fields are silently ignored.
### Using the `encoding.TextUnmarshaler` interface ### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into Here's an example that automatically parses duration strings into
@ -103,19 +101,19 @@ Which can be decoded with:
```go ```go
type song struct { type song struct {
Name string Name string
Duration duration Duration duration
} }
type songs struct { type songs struct {
Song []song Song []song
} }
var favorites songs var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil { if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err) log.Fatal(err)
} }
for _, s := range favorites.Song { for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration) fmt.Printf("%s (%s)\n", s.Name, s.Duration)
} }
``` ```
@ -134,6 +132,9 @@ func (d *duration) UnmarshalText(text []byte) error {
} }
``` ```
To target TOML specifically you can implement `UnmarshalTOML` TOML interface in
a similar way.
### More complex usage ### More complex usage
Here's an example of how to load the example from the official spec page: Here's an example of how to load the example from the official spec page:
@ -180,23 +181,23 @@ And the corresponding Go types are:
```go ```go
type tomlConfig struct { type tomlConfig struct {
Title string Title string
Owner ownerInfo Owner ownerInfo
DB database `toml:"database"` DB database `toml:"database"`
Servers map[string]server Servers map[string]server
Clients clients Clients clients
} }
type ownerInfo struct { type ownerInfo struct {
Name string Name string
Org string `toml:"organization"` Org string `toml:"organization"`
Bio string Bio string
DOB time.Time DOB time.Time
} }
type database struct { type database struct {
Server string Server string
Ports []int Ports []int
ConnMax int `toml:"connection_max"` ConnMax int `toml:"connection_max"`
Enabled bool Enabled bool
} }
@ -207,7 +208,7 @@ type server struct {
} }
type clients struct { type clients struct {
Data [][]interface{} Data [][]interface{}
Hosts []string Hosts []string
} }
``` ```
@ -216,3 +217,4 @@ Note that a case insensitive match will be tried if an exact match can't be
found. found.
A working example of the above can be found in `_examples/example.{go,toml}`. A working example of the above can be found in `_examples/example.{go,toml}`.

View File

@ -1,19 +1,17 @@
package toml package toml
import ( import (
"encoding"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"math" "math"
"os"
"reflect" "reflect"
"strings" "strings"
"time" "time"
) )
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a // Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves. // TOML description of themselves.
type Unmarshaler interface { type Unmarshaler interface {
@ -27,30 +25,21 @@ func Unmarshal(p []byte, v interface{}) error {
} }
// Primitive is a TOML value that hasn't been decoded into a Go value. // Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
// //
// A `Primitive` value can be decoded using the `PrimitiveDecode` function. // This type can be used for any value, which will cause decoding to be delayed.
// You can use the PrimitiveDecode() function to "manually" decode these values.
// //
// The underlying representation of a `Primitive` value is subject to change. // NOTE: The underlying representation of a `Primitive` value is subject to
// Do not rely on it. // change. Do not rely on it.
// //
// N.B. Primitive values are still parsed, so using them will only avoid // NOTE: Primitive values are still parsed, so using them will only avoid the
// the overhead of reflection. They can be useful when you don't know the // overhead of reflection. They can be useful when you don't know the exact type
// exact type of TOML data until run time. // of TOML data until runtime.
type Primitive struct { type Primitive struct {
undecoded interface{} undecoded interface{}
context Key context Key
} }
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it // PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values // decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions, // can *only* be obtained from values filled by the decoder functions,
@ -68,43 +57,51 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
return md.unify(primValue.undecoded, rvalue(v)) return md.unify(primValue.undecoded, rvalue(v))
} }
// Decode will decode the contents of `data` in TOML format into a pointer // Decoder decodes TOML data.
// `v`.
// //
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be // TOML tables correspond to Go structs or maps (dealer's choice they can be
// used interchangeably.) // used interchangeably).
// //
// TOML arrays of tables correspond to either a slice of structs or a slice // TOML table arrays correspond to either a slice of structs or a slice of maps.
// of maps.
// //
// TOML datetimes correspond to Go `time.Time` values. // TOML datetimes correspond to Go time.Time values. Local datetimes are parsed
// in the local timezone.
// //
// All other TOML types (float, string, int, bool and array) correspond // All other TOML types (float, string, int, bool and array) correspond to the
// to the obvious Go types. // obvious Go types.
// //
// An exception to the above rules is if a type implements the // An exception to the above rules is if a type implements the TextUnmarshaler
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value // interface, in which case any primitive TOML value (floats, strings, integers,
// (floats, strings, integers, booleans and datetimes) will be converted to // booleans, datetimes) will be converted to a []byte and given to the value's
// a byte string and given to the value's UnmarshalText method. See the // UnmarshalText method. See the Unmarshaler example for a demonstration with
// Unmarshaler example for a demonstration with time duration strings. // time duration strings.
// //
// Key mapping // Key mapping
// //
// TOML keys can map to either keys in a Go map or field names in a Go // TOML keys can map to either keys in a Go map or field names in a Go struct.
// struct. The special `toml` struct tag may be used to map TOML keys to // The special `toml` struct tag can be used to map TOML keys to struct fields
// struct fields that don't match the key name exactly. (See the example.) // that don't match the key name exactly (see the example). A case insensitive
// A case insensitive match to struct names will be tried if an exact match // match to struct names will be tried if an exact match can't be found.
// can't be found.
// //
// The mapping between TOML values and Go values is loose. That is, there // The mapping between TOML values and Go values is loose. That is, there may
// may exist TOML values that cannot be placed into your representation, and // exist TOML values that cannot be placed into your representation, and there
// there may be parts of your representation that do not correspond to // may be parts of your representation that do not correspond to TOML values.
// TOML values. This loose mapping can be made stricter by using the IsDefined // This loose mapping can be made stricter by using the IsDefined and/or
// and/or Undecoded methods on the MetaData returned. // Undecoded methods on the MetaData returned.
// //
// This decoder will not handle cyclic types. If a cyclic type is passed, // This decoder does not handle cyclic types. Decode will not terminate if a
// `Decode` will not terminate. // cyclic type is passed.
func Decode(data string, v interface{}) (MetaData, error) { type Decoder struct {
r io.Reader
}
// NewDecoder creates a new Decoder.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// Decode TOML data in to the pointer `v`.
func (dec *Decoder) Decode(v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr { if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
@ -112,7 +109,15 @@ func Decode(data string, v interface{}) (MetaData, error) {
if rv.IsNil() { if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
} }
p, err := parse(data)
// TODO: have parser should read from io.Reader? Or at the very least, make
// it read from []byte rather than string
data, err := ioutil.ReadAll(dec.r)
if err != nil {
return MetaData{}, err
}
p, err := parse(string(data))
if err != nil { if err != nil {
return MetaData{}, err return MetaData{}, err
} }
@ -123,24 +128,22 @@ func Decode(data string, v interface{}) (MetaData, error) {
return md, md.unify(p.mapping, indirect(rv)) return md, md.unify(p.mapping, indirect(rv))
} }
// DecodeFile is just like Decode, except it will automatically read the // Decode the TOML data in to the pointer v.
// contents of the file at `fpath` and decode it for you. //
func DecodeFile(fpath string, v interface{}) (MetaData, error) { // See the documentation on Decoder for a description of the decoding process.
bs, err := ioutil.ReadFile(fpath) func Decode(data string, v interface{}) (MetaData, error) {
if err != nil { return NewDecoder(strings.NewReader(data)).Decode(v)
return MetaData{}, err
}
return Decode(string(bs), v)
} }
// DecodeReader is just like Decode, except it will consume all bytes // DecodeFile is just like Decode, except it will automatically read the
// from the reader and decode it for you. // contents of the file at path and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { func DecodeFile(path string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r) fp, err := os.Open(path)
if err != nil { if err != nil {
return MetaData{}, err return MetaData{}, err
} }
return Decode(string(bs), v) defer fp.Close()
return NewDecoder(fp).Decode(v)
} }
// unify performs a sort of type unification based on the structure of `rv`, // unify performs a sort of type unification based on the structure of `rv`,
@ -149,8 +152,8 @@ func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
// Any type mismatch produces an error. Finding a type that we don't know // Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error. // how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error { func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value. // Special case. Look for a `Primitive` value.
// TODO: #76 would make this superfluous after implemented.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive // Save the undecoded data and the key context into the primitive
// value. // value.
@ -170,25 +173,17 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
} }
} }
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface. // Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok { if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return md.unifyText(data, v) return md.unifyText(data, v)
} }
// BUG(burntsushi) // TODO:
// The behavior here is incorrect whenever a Go type satisfies the // The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or
// hash or array. In particular, the unmarshaler should only be applied // array. In particular, the unmarshaler should only be applied to primitive
// to primitive TOML values. But at this point, it will be applied to // TOML values. But at this point, it will be applied to all kinds of values
// all kinds of values and produce an incorrect error whenever those values // and produce an incorrect error whenever those values are hashes or arrays
// are hashes or arrays (including arrays of tables). // (including arrays of tables).
k := rv.Kind() k := rv.Kind()
@ -277,6 +272,12 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
} }
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
if k := rv.Type().Key().Kind(); k != reflect.String {
return fmt.Errorf(
"toml: cannot decode to a map with non-string key type (%s in %q)",
k, rv.Type())
}
tmap, ok := mapping.(map[string]interface{}) tmap, ok := mapping.(map[string]interface{})
if !ok { if !ok {
if tmap == nil { if tmap == nil {
@ -312,10 +313,8 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
} }
return badtype("slice", data) return badtype("slice", data)
} }
sliceLen := datav.Len() if l := datav.Len(); l != rv.Len() {
if sliceLen != rv.Len() { return e("expected array length %d; got TOML array of length %d", rv.Len(), l)
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
} }
return md.unifySliceArray(datav, rv) return md.unifySliceArray(datav, rv)
} }
@ -337,11 +336,10 @@ func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
} }
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len() l := data.Len()
for i := 0; i < sliceLen; i++ { for i := 0; i < l; i++ {
v := data.Index(i).Interface() err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i)))
sliceval := indirect(rv.Index(i)) if err != nil {
if err := md.unify(v, sliceval); err != nil {
return err return err
} }
} }
@ -439,7 +437,7 @@ func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
return nil return nil
} }
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error {
var s string var s string
switch sdata := data.(type) { switch sdata := data.(type) {
case TextMarshaler: case TextMarshaler:
@ -482,7 +480,7 @@ func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr { if v.Kind() != reflect.Ptr {
if v.CanSet() { if v.CanSet() {
pv := v.Addr() pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok { if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok {
return pv return pv
} }
} }
@ -498,12 +496,16 @@ func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() { if rv.CanSet() {
return true return true
} }
if _, ok := rv.Interface().(TextUnmarshaler); ok { if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok {
return true return true
} }
return false return false
} }
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
func badtype(expected string, data interface{}) error { func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected) return e("cannot load TOML value of type %T into a Go %s", data, expected)
} }

18
vendor/github.com/BurntSushi/toml/decode_go116.go generated vendored Normal file
View File

@ -0,0 +1,18 @@
// +build go1.16
package toml
import (
"io/fs"
)
// DecodeFS is just like Decode, except it will automatically read the contents
// of the file at `path` from a fs.FS instance.
func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) {
fp, err := fsys.Open(path)
if err != nil {
return MetaData{}, err
}
defer fp.Close()
return NewDecoder(fp).Decode(v)
}

View File

@ -2,9 +2,9 @@ package toml
import "strings" import "strings"
// MetaData allows access to meta information about TOML data that may not // MetaData allows access to meta information about TOML data that may not be
// be inferrable via reflection. In particular, whether a key has been defined // inferable via reflection. In particular, whether a key has been defined and
// and the TOML type of a key. // the TOML type of a key.
type MetaData struct { type MetaData struct {
mapping map[string]interface{} mapping map[string]interface{}
types map[string]tomlType types map[string]tomlType
@ -13,10 +13,11 @@ type MetaData struct {
context Key // Used only during decoding. context Key // Used only during decoding.
} }
// IsDefined returns true if the key given exists in the TOML data. The key // IsDefined reports if the key exists in the TOML data.
// should be specified hierarchially. e.g., //
// The key should be specified hierarchically, for example to access the TOML
// key "a.b.c" you would use:
// //
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c") // IsDefined("a", "b", "c")
// //
// IsDefined will return false if an empty key given. Keys are case sensitive. // IsDefined will return false if an empty key given. Keys are case sensitive.
@ -41,8 +42,8 @@ func (md *MetaData) IsDefined(key ...string) bool {
// Type returns a string representation of the type of the key specified. // Type returns a string representation of the type of the key specified.
// //
// Type will return the empty string if given an empty key or a key that // Type will return the empty string if given an empty key or a key that does
// does not exist. Keys are case sensitive. // not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string { func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".") fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok { if typ, ok := md.types[fullkey]; ok {
@ -51,13 +52,11 @@ func (md *MetaData) Type(key ...string) string {
return "" return ""
} }
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys // Key represents any TOML key, including key groups. Use (MetaData).Keys to get
// to get values of this type. // values of this type.
type Key []string type Key []string
func (k Key) String() string { func (k Key) String() string { return strings.Join(k, ".") }
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string { func (k Key) maybeQuotedAll() string {
var ss []string var ss []string
@ -68,6 +67,9 @@ func (k Key) maybeQuotedAll() string {
} }
func (k Key) maybeQuoted(i int) string { func (k Key) maybeQuoted(i int) string {
if k[i] == "" {
return `""`
}
quote := false quote := false
for _, c := range k[i] { for _, c := range k[i] {
if !isBareKeyChar(c) { if !isBareKeyChar(c) {
@ -76,7 +78,7 @@ func (k Key) maybeQuoted(i int) string {
} }
} }
if quote { if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" return `"` + quotedReplacer.Replace(k[i]) + `"`
} }
return k[i] return k[i]
} }
@ -89,10 +91,10 @@ func (k Key) add(piece string) Key {
} }
// Keys returns a slice of every key in the TOML data, including key groups. // Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
// //
// The list will have the same order as the keys appeared in the TOML data. // Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific. The list will have the same
// order as the keys appeared in the TOML data.
// //
// All keys returned are non-empty. // All keys returned are non-empty.
func (md *MetaData) Keys() []Key { func (md *MetaData) Keys() []Key {

33
vendor/github.com/BurntSushi/toml/deprecated.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
package toml
import (
"encoding"
"io"
)
// DEPRECATED!
//
// Use the identical encoding.TextMarshaler instead. It is defined here to
// support Go 1.1 and older.
type TextMarshaler encoding.TextMarshaler
// DEPRECATED!
//
// Use the identical encoding.TextUnmarshaler instead. It is defined here to
// support Go 1.1 and older.
type TextUnmarshaler encoding.TextUnmarshaler
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// DEPRECATED!
//
// Use NewDecoder(reader).Decode(&v) instead.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
return NewDecoder(r).Decode(v)
}

View File

@ -1,27 +1,13 @@
/* /*
Package toml provides facilities for decoding and encoding TOML configuration Package toml implements decoding and encoding of TOML files.
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml This package supports TOML v1.0.0, as listed on https://toml.io
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify There is also support for delaying decoding with the Primitive type, and
whether a file is a valid TOML document. It can also be used to print the querying the set of keys in a TOML document with the MetaData type.
type of each key in a TOML document.
Testing The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator,
and can be used to verify if TOML document is valid. It can also be used to
There are two important types of tests used for this package. The first is print the type of each key.
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/ */
package toml package toml

View File

@ -2,48 +2,92 @@ package toml
import ( import (
"bufio" "bufio"
"encoding"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/BurntSushi/toml/internal"
) )
type tomlEncodeError struct{ error } type tomlEncodeError struct{ error }
var ( var (
errArrayMixedElementTypes = errors.New( errArrayNilElement = errors.New("toml: cannot encode array with nil element")
"toml: cannot encode array with mixed element types") errNonString = errors.New("toml: cannot encode a map with non-string key type")
errArrayNilElement = errors.New( errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct")
"toml: cannot encode array with nil element") errNoKey = errors.New("toml: top-level values must be Go maps or structs")
errNonString = errors.New( errAnything = errors.New("") // used in testing
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
) )
var quotedReplacer = strings.NewReplacer( var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"", "\"", "\\\"",
"\\", "\\\\", "\\", "\\\\",
"\x00", `\u0000`,
"\x01", `\u0001`,
"\x02", `\u0002`,
"\x03", `\u0003`,
"\x04", `\u0004`,
"\x05", `\u0005`,
"\x06", `\u0006`,
"\x07", `\u0007`,
"\b", `\b`,
"\t", `\t`,
"\n", `\n`,
"\x0b", `\u000b`,
"\f", `\f`,
"\r", `\r`,
"\x0e", `\u000e`,
"\x0f", `\u000f`,
"\x10", `\u0010`,
"\x11", `\u0011`,
"\x12", `\u0012`,
"\x13", `\u0013`,
"\x14", `\u0014`,
"\x15", `\u0015`,
"\x16", `\u0016`,
"\x17", `\u0017`,
"\x18", `\u0018`,
"\x19", `\u0019`,
"\x1a", `\u001a`,
"\x1b", `\u001b`,
"\x1c", `\u001c`,
"\x1d", `\u001d`,
"\x1e", `\u001e`,
"\x1f", `\u001f`,
"\x7f", `\u007f`,
) )
// Encoder controls the encoding of Go values to a TOML document to some // Encoder encodes a Go to a TOML document.
// io.Writer.
// //
// The indentation level can be controlled with the Indent field. // The mapping between Go values and TOML values should be precisely the same as
// for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.
//
// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes
// are encoded first.
//
// Go maps will be sorted alphabetically by key for deterministic output.
//
// Encoding Go values without a corresponding TOML representation will return an
// error. Examples of this includes maps with non-string keys, slices with nil
// elements, embedded non-struct types, and nested slices containing maps or
// structs. (e.g. [][]map[string]string is not allowed but []map[string]string
// is okay, as is []map[string][]string).
//
// NOTE: Only exported keys are encoded due to the use of reflection. Unexported
// keys are silently discarded.
type Encoder struct { type Encoder struct {
// A single indentation level. By default it is two spaces. // The string to use for a single indentation level. The default is two
// spaces.
Indent string Indent string
// hasWritten is whether we have written any output to w yet. // hasWritten is whether we have written any output to w yet.
@ -51,8 +95,7 @@ type Encoder struct {
w *bufio.Writer w *bufio.Writer
} }
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer // NewEncoder create a new Encoder.
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder { func NewEncoder(w io.Writer) *Encoder {
return &Encoder{ return &Encoder{
w: bufio.NewWriter(w), w: bufio.NewWriter(w),
@ -60,29 +103,10 @@ func NewEncoder(w io.Writer) *Encoder {
} }
} }
// Encode writes a TOML representation of the Go value to the underlying // Encode writes a TOML representation of the Go value to the Encoder's writer.
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
// //
// The mapping between Go values and TOML values should be precisely the same // An error is returned if the value given cannot be encoded to a valid TOML
// as for the Decode* functions. Similarly, the TextMarshaler interface is // document.
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error { func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v)) rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil { if err := enc.safeEncode(Key([]string{}), rv); err != nil {
@ -110,9 +134,13 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. If we can marshal the type to text, then we used that. // Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as // Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is). // generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) { switch t := rv.Interface().(type) {
case time.Time, TextMarshaler: case time.Time, encoding.TextMarshaler:
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
return
// TODO: #76 would make this superfluous after implemented.
case Primitive:
enc.encode(key, reflect.ValueOf(t.undecoded))
return return
} }
@ -123,12 +151,12 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64, reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv) enc.eArrayOfTables(key, rv)
} else { } else {
enc.keyEqElement(key, rv) enc.writeKeyValue(key, rv, false)
} }
case reflect.Interface: case reflect.Interface:
if rv.IsNil() { if rv.IsNil() {
@ -148,22 +176,32 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
case reflect.Struct: case reflect.Struct:
enc.eTable(key, rv) enc.eTable(key, rv)
default: default:
panic(e("unsupported type for key '%s': %s", key, k)) encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k))
} }
} }
// eElement encodes any value that can be an array element (primitives and // eElement encodes any value that can be an array element.
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) { func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) { switch v := rv.Interface().(type) {
case time.Time: case time.Time: // Using TextMarshaler adds extra quotes, which we don't want.
// Special case time.Time as a primitive. Has to come before format := time.RFC3339Nano
// TextMarshaler below because time.Time implements switch v.Location() {
// encoding.TextMarshaler, but we need to always use UTC. case internal.LocalDatetime:
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) format = "2006-01-02T15:04:05.999999999"
case internal.LocalDate:
format = "2006-01-02"
case internal.LocalTime:
format = "15:04:05.999999999"
}
switch v.Location() {
default:
enc.wf(v.Format(format))
case internal.LocalDatetime, internal.LocalDate, internal.LocalTime:
enc.wf(v.In(time.UTC).Format(format))
}
return return
case TextMarshaler: case encoding.TextMarshaler:
// Special case. Use text marshaler if it's available for this value. // Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil { if s, err := v.MarshalText(); err != nil {
encPanic(err) encPanic(err)
} else { } else {
@ -171,32 +209,49 @@ func (enc *Encoder) eElement(rv reflect.Value) {
} }
return return
} }
switch rv.Kind() { switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String: case reflect.String:
enc.writeQuoted(rv.String()) enc.writeQuoted(rv.String())
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32)))
}
case reflect.Float64:
f := rv.Float()
if math.IsNaN(f) {
enc.wf("nan")
} else if math.IsInf(f, 0) {
enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)])
} else {
enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64)))
}
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Struct:
enc.eStruct(nil, rv, true)
case reflect.Map:
enc.eMap(nil, rv, true)
case reflect.Interface:
enc.eElement(rv.Elem())
default: default:
panic(e("unexpected primitive type: %s", rv.Kind())) encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface()))
} }
} }
// By the TOML spec, all floats must have a decimal with at least one // By the TOML spec, all floats must have a decimal with at least one number on
// number on either side. // either side.
func floatAddDecimal(fstr string) string { func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") { if !strings.Contains(fstr, ".") {
return fstr + ".0" return fstr + ".0"
@ -230,16 +285,14 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if isNil(trv) { if isNil(trv) {
continue continue
} }
panicIfInvalidKey(key)
enc.newline() enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline() enc.newline()
enc.eMapOrStruct(key, trv) enc.eMapOrStruct(key, trv, false)
} }
} }
func (enc *Encoder) eTable(key Key, rv reflect.Value) { func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 { if len(key) == 1 {
// Output an extra newline between top-level tables. // Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.) // (The newline isn't written if nothing else has been written though.)
@ -249,21 +302,22 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline() enc.newline()
} }
enc.eMapOrStruct(key, rv) enc.eMapOrStruct(key, rv, false)
} }
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) {
switch rv := eindirect(rv); rv.Kind() { switch rv := eindirect(rv); rv.Kind() {
case reflect.Map: case reflect.Map:
enc.eMap(key, rv) enc.eMap(key, rv, inline)
case reflect.Struct: case reflect.Struct:
enc.eStruct(key, rv) enc.eStruct(key, rv, inline)
default: default:
// Should never happen?
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
} }
} }
func (enc *Encoder) eMap(key Key, rv reflect.Value) { func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) {
rt := rv.Type() rt := rv.Type()
if rt.Key().Kind() != reflect.String { if rt.Key().Kind() != reflect.String {
encPanic(errNonString) encPanic(errNonString)
@ -281,57 +335,76 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value) {
} }
} }
var writeMapKeys = func(mapKeys []string) { var writeMapKeys = func(mapKeys []string, trailC bool) {
sort.Strings(mapKeys) sort.Strings(mapKeys)
for _, mapKey := range mapKeys { for i, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey)) val := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) { if isNil(val) {
// Don't write anything for nil fields.
continue continue
} }
enc.encode(key.add(mapKey), mrv)
if inline {
enc.writeKeyValue(Key{mapKey}, val, true)
if trailC || i != len(mapKeys)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(mapKey), val)
}
} }
} }
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub) if inline {
enc.wf("{")
}
writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0)
writeMapKeys(mapKeysSub, false)
if inline {
enc.wf("}")
}
} }
func (enc *Encoder) eStruct(key Key, rv reflect.Value) { func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) {
// Write keys for fields directly under this key first, because if we write // Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that // a field that creates a new table then all keys under it will be in that
// table (not the one we're writing here). // table (not the one we're writing here).
rt := rv.Type() //
var fieldsDirect, fieldsSub [][]int // Fields is a [][]int: for fieldsDirect this always has one entry (the
var addFields func(rt reflect.Type, rv reflect.Value, start []int) // struct index). For fieldsSub it contains two entries: the parent field
// index from tv, and the field indexes for the fields of the sub.
var (
rt = rv.Type()
fieldsDirect, fieldsSub [][]int
addFields func(rt reflect.Type, rv reflect.Value, start []int)
)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) { addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ { for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i) f := rt.Field(i)
// skip unexported fields if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields.
if f.PkgPath != "" && !f.Anonymous {
continue continue
} }
frv := rv.Field(i) frv := rv.Field(i)
// Treat anonymous struct fields with tag names as though they are
// not anonymous, like encoding/json does.
//
// Non-struct anonymous fields use the normal encoding logic.
if f.Anonymous { if f.Anonymous {
t := f.Type t := f.Type
switch t.Kind() { switch t.Kind() {
case reflect.Struct: case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" { if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index) addFields(t, frv, append(start, f.Index...))
continue continue
} }
case reflect.Ptr: case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct && if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" {
getOptions(f.Tag).name == "" {
if !frv.IsNil() { if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index) addFields(t.Elem(), frv.Elem(), append(start, f.Index...))
} }
continue continue
} }
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
} }
} }
@ -344,35 +417,49 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
} }
addFields(rt, rv, nil) addFields(rt, rv, nil)
var writeFields = func(fields [][]int) { writeFields := func(fields [][]int) {
for _, fieldIndex := range fields { for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex) fieldType := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields. if isNil(fieldVal) { /// Don't write anything for nil fields.
continue continue
} }
opts := getOptions(sft.Tag) opts := getOptions(fieldType.Tag)
if opts.skip { if opts.skip {
continue continue
} }
keyName := sft.Name keyName := fieldType.Name
if opts.name != "" { if opts.name != "" {
keyName = opts.name keyName = opts.name
} }
if opts.omitempty && isEmpty(sf) { if opts.omitempty && isEmpty(fieldVal) {
continue continue
} }
if opts.omitzero && isZero(sf) { if opts.omitzero && isZero(fieldVal) {
continue continue
} }
enc.encode(key.add(keyName), sf) if inline {
enc.writeKeyValue(Key{keyName}, fieldVal, true)
if fieldIndex[0] != len(fields)-1 {
enc.wf(", ")
}
} else {
enc.encode(key.add(keyName), fieldVal)
}
} }
} }
if inline {
enc.wf("{")
}
writeFields(fieldsDirect) writeFields(fieldsDirect)
writeFields(fieldsSub) writeFields(fieldsSub)
if inline {
enc.wf("}")
}
} }
// tomlTypeName returns the TOML type name of the Go value's type. It is // tomlTypeName returns the TOML type name of the Go value's type. It is
@ -411,13 +498,26 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
switch rv.Interface().(type) { switch rv.Interface().(type) {
case time.Time: case time.Time:
return tomlDatetime return tomlDatetime
case TextMarshaler: case encoding.TextMarshaler:
return tomlString return tomlString
default: default:
// Someone used a pointer receiver: we can make it work for pointer
// values.
if rv.CanAddr() {
_, ok := rv.Addr().Interface().(encoding.TextMarshaler)
if ok {
return tomlString
}
}
return tomlHash return tomlHash
} }
default: default:
panic("unexpected reflect.Kind: " + rv.Kind().String()) _, ok := rv.Interface().(encoding.TextMarshaler)
if ok {
return tomlString
}
encPanic(errors.New("unsupported type: " + rv.Kind().String()))
panic("") // Need *some* return value
} }
} }
@ -430,30 +530,19 @@ func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil return nil
} }
/// Don't allow nil.
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
if tomlTypeOfGo(rv.Index(i)) == nil {
encPanic(errArrayNilElement)
}
}
firstType := tomlTypeOfGo(rv.Index(0)) firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil { if firstType == nil {
encPanic(errArrayNilElement) encPanic(errArrayNilElement)
} }
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType return firstType
} }
@ -511,14 +600,20 @@ func (enc *Encoder) newline() {
} }
} }
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { // Write a key/value pair:
//
// key = <any value>
//
// If inline is true it won't add a newline at the end.
func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) {
if len(key) == 0 { if len(key) == 0 {
encPanic(errNoKey) encPanic(errNoKey)
} }
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val) enc.eElement(val)
enc.newline() if !inline {
enc.newline()
}
} }
func (enc *Encoder) wf(format string, v ...interface{}) { func (enc *Encoder) wf(format string, v ...interface{}) {
@ -553,16 +648,3 @@ func isNil(rv reflect.Value) bool {
return false return false
} }
} }
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View File

@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View File

@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

36
vendor/github.com/BurntSushi/toml/internal/tz.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
package internal
import "time"
// Timezones used for local datetime, date, and time TOML types.
//
// The exact way times and dates without a timezone should be interpreted is not
// well-defined in the TOML specification and left to the implementation. These
// defaults to current local timezone offset of the computer, but this can be
// changed by changing these variables before decoding.
//
// TODO:
// Ideally we'd like to offer people the ability to configure the used timezone
// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit
// tricky: the reason we use three different variables for this is to support
// round-tripping without these specific TZ names we wouldn't know which
// format to use.
//
// There isn't a good way to encode this right now though, and passing this sort
// of information also ties in to various related issues such as string format
// encoding, encoding of comments, etc.
//
// So, for the time being, just put this in internal until we can write a good
// comprehensive API for doing all of this.
//
// The reason they're exported is because they're referred from in e.g.
// internal/tag.
//
// Note that this behaviour is valid according to the TOML spec as the exact
// behaviour is left up to implementations.
var (
localOffset = func() int { _, o := time.Now().Zone(); return o }()
LocalDatetime = time.FixedZone("datetime-local", localOffset)
LocalDate = time.FixedZone("date-local", localOffset)
LocalTime = time.FixedZone("time-local", localOffset)
)

View File

@ -2,6 +2,8 @@ package toml
import ( import (
"fmt" "fmt"
"reflect"
"runtime"
"strings" "strings"
"unicode" "unicode"
"unicode/utf8" "unicode/utf8"
@ -29,6 +31,7 @@ const (
itemArrayTableStart itemArrayTableStart
itemArrayTableEnd itemArrayTableEnd
itemKeyStart itemKeyStart
itemKeyEnd
itemCommentStart itemCommentStart
itemInlineTableStart itemInlineTableStart
itemInlineTableEnd itemInlineTableEnd
@ -64,9 +67,9 @@ type lexer struct {
state stateFn state stateFn
items chan item items chan item
// Allow for backing up up to three runes. // Allow for backing up up to four runes.
// This is necessary because TOML contains 3-rune tokens (""" and '''). // This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int prevWidths [4]int
nprev int // how many of prevWidths are in use nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call // If we emit an eof, we can still back up, but it is not OK to call
// next again. // next again.
@ -93,6 +96,7 @@ func (lx *lexer) nextItem() item {
return item return item
default: default:
lx.state = lx.state(lx) lx.state = lx.state(lx)
//fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack)
} }
} }
} }
@ -137,7 +141,7 @@ func (lx *lexer) emitTrim(typ itemType) {
func (lx *lexer) next() (r rune) { func (lx *lexer) next() (r rune) {
if lx.atEOF { if lx.atEOF {
panic("next called after EOF") panic("BUG in lexer: next called after EOF")
} }
if lx.pos >= len(lx.input) { if lx.pos >= len(lx.input) {
lx.atEOF = true lx.atEOF = true
@ -147,12 +151,19 @@ func (lx *lexer) next() (r rune) {
if lx.input[lx.pos] == '\n' { if lx.input[lx.pos] == '\n' {
lx.line++ lx.line++
} }
lx.prevWidths[3] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[1] lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0] lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 { if lx.nprev < 4 {
lx.nprev++ lx.nprev++
} }
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
if r == utf8.RuneError {
lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos])
return utf8.RuneError
}
lx.prevWidths[0] = w lx.prevWidths[0] = w
lx.pos += w lx.pos += w
return r return r
@ -163,18 +174,19 @@ func (lx *lexer) ignore() {
lx.start = lx.pos lx.start = lx.pos
} }
// backup steps back one rune. Can be called only twice between calls to next. // backup steps back one rune. Can be called 4 times between calls to next.
func (lx *lexer) backup() { func (lx *lexer) backup() {
if lx.atEOF { if lx.atEOF {
lx.atEOF = false lx.atEOF = false
return return
} }
if lx.nprev < 1 { if lx.nprev < 1 {
panic("backed up too far") panic("BUG in lexer: backed up too far")
} }
w := lx.prevWidths[0] w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1] lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2] lx.prevWidths[1] = lx.prevWidths[2]
lx.prevWidths[2] = lx.prevWidths[3]
lx.nprev-- lx.nprev--
lx.pos -= w lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
@ -269,8 +281,9 @@ func lexTopEnd(lx *lexer) stateFn {
lx.emit(itemEOF) lx.emit(itemEOF)
return nil return nil
} }
return lx.errorf("expected a top-level item to end with a newline, "+ return lx.errorf(
"comment, or EOF, but got %q instead", r) "expected a top-level item to end with a newline, comment, or EOF, but got %q instead",
r)
} }
// lexTable lexes the beginning of a table. Namely, it makes sure that // lexTable lexes the beginning of a table. Namely, it makes sure that
@ -297,8 +310,9 @@ func lexTableEnd(lx *lexer) stateFn {
func lexArrayTableEnd(lx *lexer) stateFn { func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd { if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+ return lx.errorf(
"but got %q instead", arrayTableEnd, r) "expected end of table array name delimiter %q, but got %q instead",
arrayTableEnd, r)
} }
lx.emit(itemArrayTableEnd) lx.emit(itemArrayTableEnd)
return lexTopEnd return lexTopEnd
@ -308,32 +322,19 @@ func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace) lx.skip(isWhitespace)
switch r := lx.peek(); { switch r := lx.peek(); {
case r == tableEnd || r == eof: case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " + return lx.errorf("unexpected end of table name (table names cannot be empty)")
"(table names cannot be empty)")
case r == tableSep: case r == tableSep:
return lx.errorf("unexpected table separator " + return lx.errorf("unexpected table separator (table names cannot be empty)")
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart: case r == stringStart || r == rawStringStart:
lx.ignore() lx.ignore()
lx.push(lexTableNameEnd) lx.push(lexTableNameEnd)
return lexValue // reuse string lexing return lexQuotedName
default: default:
return lexBareTableName lx.push(lexTableNameEnd)
return lexBareName
} }
} }
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally // lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace. // consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn { func lexTableNameEnd(lx *lexer) stateFn {
@ -347,63 +348,101 @@ func lexTableNameEnd(lx *lexer) stateFn {
case r == tableEnd: case r == tableEnd:
return lx.pop() return lx.pop()
default: default:
return lx.errorf("expected '.' or ']' to end table name, "+ return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r)
"but got %q instead", r)
} }
} }
// lexKeyStart consumes a key name up until the first non-whitespace character. // lexBareName lexes one part of a key or table.
// lexKeyStart will ignore whitespace. //
func lexKeyStart(lx *lexer) stateFn { // It assumes that at least one valid character for the table has already been
r := lx.peek() // read.
//
// Lexes only one part, e.g. only 'a' inside 'a.b'.
func lexBareName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareName
}
lx.backup()
lx.emit(itemText)
return lx.pop()
}
// lexBareName lexes one part of a key or table.
//
// It assumes that at least one valid character for the table has already been
// read.
//
// Lexes only one part, e.g. only '"a"' inside '"a".b'.
func lexQuotedName(lx *lexer) stateFn {
r := lx.next()
switch { switch {
case r == keySep: case isWhitespace(r):
return lx.errorf("unexpected key separator %q", keySep) return lexSkip(lx, lexValue)
case isWhitespace(r) || isNL(r): case r == stringStart:
lx.next() lx.ignore() // ignore the '"'
return lexSkip(lx, lexKeyStart) return lexString
case r == rawStringStart:
lx.ignore() // ignore the "'"
return lexRawString
case r == eof:
return lx.errorf("unexpected EOF; expected value")
default:
return lx.errorf("expected value but found %q instead", r)
}
}
// lexKeyStart consumes all key parts until a '='.
func lexKeyStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == '=' || r == eof:
return lx.errorf("unexpected '=': key name appears blank")
case r == '.':
return lx.errorf("unexpected '.': keys cannot start with a '.'")
case r == stringStart || r == rawStringStart: case r == stringStart || r == rawStringStart:
lx.ignore() lx.ignore()
fallthrough
default: // Bare key
lx.emit(itemKeyStart) lx.emit(itemKeyStart)
lx.push(lexKeyEnd) return lexKeyNameStart
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
} }
} }
// lexBareKey consumes the text of a bare key. Assumes that the first character func lexKeyNameStart(lx *lexer) stateFn {
// (which is not whitespace) has not yet been consumed. lx.skip(isWhitespace)
func lexBareKey(lx *lexer) stateFn { switch r := lx.peek(); {
switch r := lx.next(); { case r == '=' || r == eof:
case isBareKeyChar(r): return lx.errorf("unexpected '='")
return lexBareKey case r == '.':
case isWhitespace(r): return lx.errorf("unexpected '.'")
lx.backup() case r == stringStart || r == rawStringStart:
lx.emit(itemText) lx.ignore()
return lexKeyEnd lx.push(lexKeyEnd)
case r == keySep: return lexQuotedName
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default: default:
return lx.errorf("bare keys cannot contain %q", r) lx.push(lexKeyEnd)
return lexBareName
} }
} }
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key // lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator). // separator).
func lexKeyEnd(lx *lexer) stateFn { func lexKeyEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); { switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexKeyEnd) return lexSkip(lx, lexKeyEnd)
case r == eof:
return lx.errorf("unexpected EOF; expected key separator %q", keySep)
case r == '.':
lx.ignore()
return lexKeyNameStart
case r == '=':
lx.emit(itemKeyEnd)
return lexSkip(lx, lexValue)
default: default:
return lx.errorf("expected key separator %q, but got %q instead", return lx.errorf("expected '.' or '=', but got %q instead", r)
keySep, r)
} }
} }
@ -450,10 +489,15 @@ func lexValue(lx *lexer) stateFn {
} }
lx.ignore() // ignore the "'" lx.ignore() // ignore the "'"
return lexRawString return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'") return lx.errorf("floats must start with a digit, not '.'")
case 'i', 'n':
if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) {
lx.emit(itemFloat)
return lx.pop()
}
case '-', '+':
return lexDecimalNumberStart
} }
if unicode.IsLetter(r) { if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the // Be permissive here; lexBool will give a nice error if the
@ -463,6 +507,9 @@ func lexValue(lx *lexer) stateFn {
lx.backup() lx.backup()
return lexBool return lexBool
} }
if r == eof {
return lx.errorf("unexpected EOF; expected value")
}
return lx.errorf("expected value but found %q instead", r) return lx.errorf("expected value but found %q instead", r)
} }
@ -507,9 +554,8 @@ func lexArrayValueEnd(lx *lexer) stateFn {
return lexArrayEnd return lexArrayEnd
} }
return lx.errorf( return lx.errorf(
"expected a comma or array terminator %q, but got %q instead", "expected a comma or array terminator %q, but got %s instead",
arrayEnd, r, arrayEnd, runeOrEOF(r))
)
} }
// lexArrayEnd finishes the lexing of an array. // lexArrayEnd finishes the lexing of an array.
@ -546,8 +592,7 @@ func lexInlineTableValue(lx *lexer) stateFn {
// key/value pair and the next pair (or the end of the table): // key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'. // it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn { func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next() switch r := lx.next(); {
switch {
case isWhitespace(r): case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd) return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r): case isNL(r):
@ -557,12 +602,25 @@ func lexInlineTableValueEnd(lx *lexer) stateFn {
return lexCommentStart return lexCommentStart
case r == comma: case r == comma:
lx.ignore() lx.ignore()
lx.skip(isWhitespace)
if lx.peek() == '}' {
return lx.errorf("trailing comma not allowed in inline tables")
}
return lexInlineTableValue return lexInlineTableValue
case r == inlineTableEnd: case r == inlineTableEnd:
return lexInlineTableEnd return lexInlineTableEnd
default:
return lx.errorf(
"expected a comma or an inline table terminator %q, but got %s instead",
inlineTableEnd, runeOrEOF(r))
} }
return lx.errorf("expected a comma or an inline table terminator %q, "+ }
"but got %q instead", inlineTableEnd, r)
func runeOrEOF(r rune) string {
if r == eof {
return "end of file"
}
return "'" + string(r) + "'"
} }
// lexInlineTableEnd finishes the lexing of an inline table. // lexInlineTableEnd finishes the lexing of an inline table.
@ -579,7 +637,9 @@ func lexString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == eof: case r == eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected '"'`)
case isControl(r) || r == '\r':
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r): case isNL(r):
return lx.errorf("strings cannot contain newlines") return lx.errorf("strings cannot contain newlines")
case r == '\\': case r == '\\':
@ -598,19 +658,40 @@ func lexString(lx *lexer) stateFn {
// lexMultilineString consumes the inner contents of a string. It assumes that // lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored. // the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn { func lexMultilineString(lx *lexer) stateFn {
switch lx.next() { r := lx.next()
switch r {
case eof: case eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected '"""'`)
case '\r':
if lx.peek() != '\n' {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineString
case '\\': case '\\':
return lexMultilineStringEscape return lexMultilineStringEscape
case stringEnd: case stringEnd:
/// Found " → try to read two more "".
if lx.accept(stringEnd) { if lx.accept(stringEnd) {
if lx.accept(stringEnd) { if lx.accept(stringEnd) {
lx.backup() /// Peek ahead: the string can contain " and "", including at the
/// end: """str"""""
/// 6 or more at the end, however, is an error.
if lx.peek() == stringEnd {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), `"""""`) {
return lx.errorf(`unexpected '""""""'`)
}
lx.backup()
lx.backup()
return lexMultilineString
}
lx.backup() /// backup: don't include the """ in the item.
lx.backup() lx.backup()
lx.backup() lx.backup()
lx.emit(itemMultilineString) lx.emit(itemMultilineString)
lx.next() lx.next() /// Read over ''' again and discard it.
lx.next() lx.next()
lx.next() lx.next()
lx.ignore() lx.ignore()
@ -619,6 +700,10 @@ func lexMultilineString(lx *lexer) stateFn {
lx.backup() lx.backup()
} }
} }
if isControl(r) {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineString return lexMultilineString
} }
@ -628,7 +713,9 @@ func lexRawString(lx *lexer) stateFn {
r := lx.next() r := lx.next()
switch { switch {
case r == eof: case r == eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected "'"`)
case isControl(r) || r == '\r':
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
case isNL(r): case isNL(r):
return lx.errorf("strings cannot contain newlines") return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd: case r == rawStringEnd:
@ -645,17 +732,38 @@ func lexRawString(lx *lexer) stateFn {
// a string. It assumes that the beginning "'''" has already been consumed and // a string. It assumes that the beginning "'''" has already been consumed and
// ignored. // ignored.
func lexMultilineRawString(lx *lexer) stateFn { func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() { r := lx.next()
switch r {
case eof: case eof:
return lx.errorf("unexpected EOF") return lx.errorf(`unexpected EOF; expected "'''"`)
case '\r':
if lx.peek() != '\n' {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineRawString
case rawStringEnd: case rawStringEnd:
/// Found ' → try to read two more ''.
if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) { if lx.accept(rawStringEnd) {
lx.backup() /// Peek ahead: the string can contain ' and '', including at the
/// end: '''str'''''
/// 6 or more at the end, however, is an error.
if lx.peek() == rawStringEnd {
/// Check if we already lexed 5 's; if so we have 6 now, and
/// that's just too many man!
if strings.HasSuffix(lx.current(), "'''''") {
return lx.errorf(`unexpected "''''''"`)
}
lx.backup()
lx.backup()
return lexMultilineRawString
}
lx.backup() /// backup: don't include the ''' in the item.
lx.backup() lx.backup()
lx.backup() lx.backup()
lx.emit(itemRawMultilineString) lx.emit(itemRawMultilineString)
lx.next() lx.next() /// Read over ''' again and discard it.
lx.next() lx.next()
lx.next() lx.next()
lx.ignore() lx.ignore()
@ -664,6 +772,10 @@ func lexMultilineRawString(lx *lexer) stateFn {
lx.backup() lx.backup()
} }
} }
if isControl(r) {
return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r)
}
return lexMultilineRawString return lexMultilineRawString
} }
@ -694,6 +806,10 @@ func lexStringEscape(lx *lexer) stateFn {
fallthrough fallthrough
case '"': case '"':
fallthrough fallthrough
case ' ', '\t':
// Inside """ .. """ strings you can use \ to escape newlines, and any
// amount of whitespace can be between the \ and \n.
fallthrough
case '\\': case '\\':
return lx.pop() return lx.pop()
case 'u': case 'u':
@ -701,8 +817,7 @@ func lexStringEscape(lx *lexer) stateFn {
case 'U': case 'U':
return lexLongUnicodeEscape return lexLongUnicodeEscape
} }
return lx.errorf("invalid escape character %q; only the following "+ return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
} }
@ -711,8 +826,9 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
r = lx.next() r = lx.next()
if !isHexadecimal(r) { if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+ return lx.errorf(
"but got %q instead", lx.current()) `expected four hexadecimal digits after '\u', but got %q instead`,
lx.current())
} }
} }
return lx.pop() return lx.pop()
@ -723,28 +839,33 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
for i := 0; i < 8; i++ { for i := 0; i < 8; i++ {
r = lx.next() r = lx.next()
if !isHexadecimal(r) { if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+ return lx.errorf(
"but got %q instead", lx.current()) `expected eight hexadecimal digits after '\U', but got %q instead`,
lx.current())
} }
} }
return lx.pop() return lx.pop()
} }
// lexNumberOrDateStart consumes either an integer, a float, or datetime. // lexNumberOrDateStart processes the first character of a value which begins
// with a digit. It exists to catch values starting with '0', so that
// lexBaseNumberOrDate can differentiate base prefixed integers from other
// types.
func lexNumberOrDateStart(lx *lexer) stateFn { func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next() r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r { switch r {
case '_': case '0':
return lexNumber return lexBaseNumberOrDate
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
} }
return lx.errorf("expected a digit but got %q", r)
if !isDigit(r) {
// The only way to reach this state is if the value starts
// with a digit, so specifically treat anything else as an
// error.
return lx.errorf("expected a digit but got %q", r)
}
return lexNumberOrDate
} }
// lexNumberOrDate consumes either an integer, float or datetime. // lexNumberOrDate consumes either an integer, float or datetime.
@ -754,10 +875,10 @@ func lexNumberOrDate(lx *lexer) stateFn {
return lexNumberOrDate return lexNumberOrDate
} }
switch r { switch r {
case '-': case '-', ':':
return lexDatetime return lexDatetime
case '_': case '_':
return lexNumber return lexDecimalNumber
case '.', 'e', 'E': case '.', 'e', 'E':
return lexFloat return lexFloat
} }
@ -775,41 +896,156 @@ func lexDatetime(lx *lexer) stateFn {
return lexDatetime return lexDatetime
} }
switch r { switch r {
case '-', 'T', ':', '.', 'Z', '+': case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+':
return lexDatetime return lexDatetime
} }
lx.backup() lx.backup()
lx.emit(itemDatetime) lx.emitTrim(itemDatetime)
return lx.pop() return lx.pop()
} }
// lexNumberStart consumes either an integer or a float. It assumes that a sign // lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix.
// has already been read, but that *no* digits have been consumed. func lexHexInteger(lx *lexer) stateFn {
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next() r := lx.next()
if !isDigit(r) { if isHexadecimal(r) {
if r == '.' { return lexHexInteger
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
} }
switch r { switch r {
case '_': case '_':
return lexNumber return lexHexInteger
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexOctalInteger consumes an octal integer after seeing the '0o' prefix.
func lexOctalInteger(lx *lexer) stateFn {
r := lx.next()
if isOctal(r) {
return lexOctalInteger
}
switch r {
case '_':
return lexOctalInteger
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix.
func lexBinaryInteger(lx *lexer) stateFn {
r := lx.next()
if isBinary(r) {
return lexBinaryInteger
}
switch r {
case '_':
return lexBinaryInteger
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDecimalNumber consumes a decimal float or integer.
func lexDecimalNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDecimalNumber
}
switch r {
case '.', 'e', 'E': case '.', 'e', 'E':
return lexFloat return lexFloat
case '_':
return lexDecimalNumber
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDecimalNumber consumes the first digit of a number beginning with a sign.
// It assumes the sign has already been consumed. Values which start with a sign
// are only allowed to be decimal integers or floats.
//
// The special "nan" and "inf" values are also recognized.
func lexDecimalNumberStart(lx *lexer) stateFn {
r := lx.next()
// Special error cases to give users better error messages
switch r {
case 'i':
if !lx.accept('n') || !lx.accept('f') {
return lx.errorf("invalid float: '%s'", lx.current())
}
lx.emit(itemFloat)
return lx.pop()
case 'n':
if !lx.accept('a') || !lx.accept('n') {
return lx.errorf("invalid float: '%s'", lx.current())
}
lx.emit(itemFloat)
return lx.pop()
case '0':
p := lx.peek()
switch p {
case 'b', 'o', 'x':
return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p)
}
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
if isDigit(r) {
return lexDecimalNumber
}
return lx.errorf("expected a digit but got %q", r)
}
// lexBaseNumberOrDate differentiates between the possible values which
// start with '0'. It assumes that before reaching this state, the initial '0'
// has been consumed.
func lexBaseNumberOrDate(lx *lexer) stateFn {
r := lx.next()
// Note: All datetimes start with at least two digits, so we don't
// handle date characters (':', '-', etc.) here.
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
// Can only be decimal, because there can't be an underscore
// between the '0' and the base designator, and dates can't
// contain underscores.
return lexDecimalNumber
case '.', 'e', 'E':
return lexFloat
case 'b':
r = lx.peek()
if !isBinary(r) {
lx.errorf("not a binary number: '%s%c'", lx.current(), r)
}
return lexBinaryInteger
case 'o':
r = lx.peek()
if !isOctal(r) {
lx.errorf("not an octal number: '%s%c'", lx.current(), r)
}
return lexOctalInteger
case 'x':
r = lx.peek()
if !isHexadecimal(r) {
lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r)
}
return lexHexInteger
} }
lx.backup() lx.backup()
@ -867,21 +1103,22 @@ func lexCommentStart(lx *lexer) stateFn {
// It will consume *up to* the first newline character, and pass control // It will consume *up to* the first newline character, and pass control
// back to the last state on the stack. // back to the last state on the stack.
func lexComment(lx *lexer) stateFn { func lexComment(lx *lexer) stateFn {
r := lx.peek() switch r := lx.next(); {
if isNL(r) || r == eof { case isNL(r) || r == eof:
lx.backup()
lx.emit(itemText) lx.emit(itemText)
return lx.pop() return lx.pop()
case isControl(r):
return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r)
default:
return lexComment
} }
lx.next()
return lexComment
} }
// lexSkip ignores all slurped input and moves on to the next state. // lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn { func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn { lx.ignore()
lx.ignore() return nextState
return nextState
}
} }
// isWhitespace returns true if `r` is a whitespace character according // isWhitespace returns true if `r` is a whitespace character according
@ -894,6 +1131,16 @@ func isNL(r rune) bool {
return r == '\n' || r == '\r' return r == '\n' || r == '\r'
} }
// Control characters except \n, \t
func isControl(r rune) bool {
switch r {
case '\t', '\r', '\n':
return false
default:
return (r >= 0x00 && r <= 0x1f) || r == 0x7f
}
}
func isDigit(r rune) bool { func isDigit(r rune) bool {
return r >= '0' && r <= '9' return r >= '0' && r <= '9'
} }
@ -904,6 +1151,14 @@ func isHexadecimal(r rune) bool {
(r >= 'A' && r <= 'F') (r >= 'A' && r <= 'F')
} }
func isOctal(r rune) bool {
return r >= '0' && r <= '7'
}
func isBinary(r rune) bool {
return r == '0' || r == '1'
}
func isBareKeyChar(r rune) bool { func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') || return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') || (r >= 'a' && r <= 'z') ||
@ -912,6 +1167,17 @@ func isBareKeyChar(r rune) bool {
r == '-' r == '-'
} }
func (s stateFn) String() string {
name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name()
if i := strings.LastIndexByte(name, '.'); i > -1 {
name = name[i+1:]
}
if s == nil {
name = "<nil>"
}
return name + "()"
}
func (itype itemType) String() string { func (itype itemType) String() string {
switch itype { switch itype {
case itemError: case itemError:
@ -938,12 +1204,18 @@ func (itype itemType) String() string {
return "TableEnd" return "TableEnd"
case itemKeyStart: case itemKeyStart:
return "KeyStart" return "KeyStart"
case itemKeyEnd:
return "KeyEnd"
case itemArray: case itemArray:
return "Array" return "Array"
case itemArrayEnd: case itemArrayEnd:
return "ArrayEnd" return "ArrayEnd"
case itemCommentStart: case itemCommentStart:
return "CommentStart" return "CommentStart"
case itemInlineTableStart:
return "InlineTableStart"
case itemInlineTableEnd:
return "InlineTableEnd"
} }
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
} }

View File

@ -1,12 +1,14 @@
package toml package toml
import ( import (
"errors"
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"unicode"
"unicode/utf8" "unicode/utf8"
"github.com/BurntSushi/toml/internal"
) )
type parser struct { type parser struct {
@ -14,39 +16,54 @@ type parser struct {
types map[string]tomlType types map[string]tomlType
lx *lexer lx *lexer
// A list of keys in the order that they appear in the TOML data. ordered []Key // List of keys in the order that they appear in the TOML data.
ordered []Key context Key // Full key for the current hash in scope.
currentKey string // Base key name for everything except hashes.
// the full key for the current hash in scope approxLine int // Rough approximation of line number
context Key implicits map[string]bool // Record implied keys (e.g. 'key.group.names').
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
} }
type parseError string // ParseError is used when a file can't be parsed: for example invalid integer
// literals, duplicate keys, etc.
type ParseError struct {
Message string
Line int
LastKey string
}
func (pe parseError) Error() string { func (pe ParseError) Error() string {
return string(pe) return fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
pe.Line, pe.LastKey, pe.Message)
} }
func parse(data string) (p *parser, err error) { func parse(data string) (p *parser, err error) {
defer func() { defer func() {
if r := recover(); r != nil { if r := recover(); r != nil {
var ok bool var ok bool
if err, ok = r.(parseError); ok { if err, ok = r.(ParseError); ok {
return return
} }
panic(r) panic(r)
} }
}() }()
// Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString()
// which mangles stuff.
if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") {
data = data[2:]
}
// Examine first few bytes for NULL bytes; this probably means it's a UTF-16
// file (second byte in surrogate pair being NULL). Again, do this here to
// avoid having to deal with UTF-8/16 stuff in the lexer.
ex := 6
if len(data) < 6 {
ex = len(data)
}
if strings.ContainsRune(data[:ex], 0) {
return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8")
}
p = &parser{ p = &parser{
mapping: make(map[string]interface{}), mapping: make(map[string]interface{}),
types: make(map[string]tomlType), types: make(map[string]tomlType),
@ -66,13 +83,17 @@ func parse(data string) (p *parser, err error) {
} }
func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", msg := fmt.Sprintf(format, v...)
p.approxLine, p.current(), fmt.Sprintf(format, v...)) panic(ParseError{
panic(parseError(msg)) Message: msg,
Line: p.approxLine,
LastKey: p.current(),
})
} }
func (p *parser) next() item { func (p *parser) next() item {
it := p.lx.nextItem() it := p.lx.nextItem()
//fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val)
if it.typ == itemError { if it.typ == itemError {
p.panicf("%s", it.val) p.panicf("%s", it.val)
} }
@ -97,44 +118,63 @@ func (p *parser) assertEqual(expected, got itemType) {
func (p *parser) topLevel(item item) { func (p *parser) topLevel(item item) {
switch item.typ { switch item.typ {
case itemCommentStart: case itemCommentStart: // # ..
p.approxLine = item.line p.approxLine = item.line
p.expect(itemText) p.expect(itemText)
case itemTableStart: case itemTableStart: // [ .. ]
kg := p.next() name := p.next()
p.approxLine = kg.line p.approxLine = name.line
var key Key var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(kg)) key = append(key, p.keyString(name))
} }
p.assertEqual(itemTableEnd, kg.typ) p.assertEqual(itemTableEnd, name.typ)
p.establishContext(key, false) p.addContext(key, false)
p.setType("", tomlHash) p.setType("", tomlHash)
p.ordered = append(p.ordered, key) p.ordered = append(p.ordered, key)
case itemArrayTableStart: case itemArrayTableStart: // [[ .. ]]
kg := p.next() name := p.next()
p.approxLine = kg.line p.approxLine = name.line
var key Key var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() {
key = append(key, p.keyString(kg)) key = append(key, p.keyString(name))
} }
p.assertEqual(itemArrayTableEnd, kg.typ) p.assertEqual(itemArrayTableEnd, name.typ)
p.establishContext(key, true) p.addContext(key, true)
p.setType("", tomlArrayHash) p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key) p.ordered = append(p.ordered, key)
case itemKeyStart: case itemKeyStart: // key = ..
kname := p.next() outerContext := p.context
p.approxLine = kname.line /// Read all the key parts (e.g. 'a' and 'b' in 'a.b')
p.currentKey = p.keyString(kname) k := p.next()
p.approxLine = k.line
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
val, typ := p.value(p.next()) /// The current key is the last part.
p.setValue(p.currentKey, val) p.currentKey = key[len(key)-1]
p.setType(p.currentKey, typ)
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey)) p.ordered = append(p.ordered, p.context.add(p.currentKey))
/// Remove the context we added (preserving any context from [tbl] lines).
p.context = outerContext
p.currentKey = "" p.currentKey = ""
default: default:
p.bug("Unexpected type at top level: %s", item.typ) p.bug("Unexpected type at top level: %s", item.typ)
@ -148,180 +188,253 @@ func (p *parser) keyString(it item) string {
return it.val return it.val
case itemString, itemMultilineString, case itemString, itemMultilineString,
itemRawString, itemRawMultilineString: itemRawString, itemRawMultilineString:
s, _ := p.value(it) s, _ := p.value(it, false)
return s.(string) return s.(string)
default: default:
p.bug("Unexpected key type: %s", it.typ) p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
} }
panic("unreachable")
} }
var datetimeRepl = strings.NewReplacer(
"z", "Z",
"t", "T",
" ", "T")
// value translates an expected value from the lexer into a Go value wrapped // value translates an expected value from the lexer into a Go value wrapped
// as an empty interface. // as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) { func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) {
switch it.typ { switch it.typ {
case itemString: case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it) return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString: case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it)
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString: case itemRawString:
return it.val, p.typeOfPrimitive(it) return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString: case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it) return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemInteger:
return p.valueInteger(it)
case itemFloat:
return p.valueFloat(it)
case itemBool: case itemBool:
switch it.val { switch it.val {
case "true": case "true":
return true, p.typeOfPrimitive(it) return true, p.typeOfPrimitive(it)
case "false": case "false":
return false, p.typeOfPrimitive(it) return false, p.typeOfPrimitive(it)
default:
p.bug("Expected boolean value, but got '%s'.", it.val)
} }
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime: case itemDatetime:
var t time.Time return p.valueDatetime(it)
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray: case itemArray:
array := make([]interface{}, 0) return p.valueArray(it)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart: case itemInlineTableStart:
var ( return p.valueInlineTable(it, parentIsArray)
hash = make(map[string]interface{}) default:
outerContext = p.context p.bug("Unexpected value type: %s", it.typ)
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
} }
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable") panic("unreachable")
} }
func (p *parser) valueInteger(it item) (interface{}, tomlType) {
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val)
}
if numHasLeadingZero(it.val) {
p.panicf("Invalid integer %q: cannot have leading zeroes", it.val)
}
num, err := strconv.ParseInt(it.val, 0, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
}
func (p *parser) valueFloat(it item) (interface{}, tomlType) {
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val)
}
}
if len(parts) > 0 && numHasLeadingZero(parts[0]) {
p.panicf("Invalid float %q: cannot have leading zeroes", it.val)
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does.
val = "nan"
}
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
}
var dtTypes = []struct {
fmt string
zone *time.Location
}{
{time.RFC3339Nano, time.Local},
{"2006-01-02T15:04:05.999999999", internal.LocalDatetime},
{"2006-01-02", internal.LocalDate},
{"15:04:05.999999999", internal.LocalTime},
}
func (p *parser) valueDatetime(it item) (interface{}, tomlType) {
it.val = datetimeRepl.Replace(it.val)
var (
t time.Time
ok bool
err error
)
for _, dt := range dtTypes {
t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
}
func (p *parser) valueArray(it item) (interface{}, tomlType) {
p.setType(p.currentKey, tomlArray)
// p.setType(p.currentKey, typ)
var (
array []interface{}
types []tomlType
)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it, true)
array = append(array, val)
types = append(types, typ)
}
return array, tomlArray
}
func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) {
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
prevContext := p.context
p.currentKey = ""
p.addImplicit(p.context)
p.addContext(p.context, parentIsArray)
/// Loop over all table key/value pairs.
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
/// Read all key parts.
k := p.next()
p.approxLine = k.line
var key Key
for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() {
key = append(key, p.keyString(k))
}
p.assertEqual(itemKeyEnd, k.typ)
/// The current key is the last part.
p.currentKey = key[len(key)-1]
/// All the other parts (if any) are the context; need to set each part
/// as implicit.
context := key[:len(key)-1]
for i := range context {
p.addImplicitContext(append(p.context, context[i:i+1]...))
}
/// Set the value.
val, typ := p.value(p.next(), false)
p.set(p.currentKey, val, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[p.currentKey] = val
/// Restore context.
p.context = prevContext
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
// numHasLeadingZero checks if this number has leading zeroes, allowing for '0',
// +/- signs, and base prefixes.
func numHasLeadingZero(s string) bool {
if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x
return true
}
if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' {
return true
}
return false
}
// numUnderscoresOK checks whether each underscore in s is surrounded by // numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores. // characters that are not underscores.
func numUnderscoresOK(s string) bool { func numUnderscoresOK(s string) bool {
switch s {
case "nan", "+nan", "-nan", "inf", "-inf", "+inf":
return true
}
accept := false accept := false
for _, r := range s { for _, r := range s {
if r == '_' { if r == '_' {
if !accept { if !accept {
return false return false
} }
accept = false
continue
} }
accept = true
// isHexadecimal is a superset of all the permissable characters
// surrounding an underscore.
accept = isHexadecimal(r)
} }
return accept return accept
} }
@ -338,13 +451,12 @@ func numPeriodsOK(s string) bool {
return !period return !period
} }
// establishContext sets the current context of the parser, // Set the current context of the parser, where the context is either a hash or
// where the context is either a hash or an array of hashes. Which one is // an array of hashes, depending on the value of the `array` parameter.
// set depends on the value of the `array` parameter.
// //
// Establishing the context also makes sure that the key isn't a duplicate, and // Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically. // will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) { func (p *parser) addContext(key Key, array bool) {
var ok bool var ok bool
// Always start at the top level and drill down for our context. // Always start at the top level and drill down for our context.
@ -383,7 +495,7 @@ func (p *parser) establishContext(key Key, array bool) {
// list of tables for it. // list of tables for it.
k := key[len(key)-1] k := key[len(key)-1]
if _, ok := hashContext[k]; !ok { if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5) hashContext[k] = make([]map[string]interface{}, 0, 4)
} }
// Add a new table. But make sure the key hasn't already been used // Add a new table. But make sure the key hasn't already been used
@ -391,8 +503,7 @@ func (p *parser) establishContext(key Key, array bool) {
if hash, ok := hashContext[k].([]map[string]interface{}); ok { if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{})) hashContext[k] = append(hash, make(map[string]interface{}))
} else { } else {
p.panicf("Key '%s' was already created and cannot be used as "+ p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext)
"an array.", keyContext)
} }
} else { } else {
p.setValue(key[len(key)-1], make(map[string]interface{})) p.setValue(key[len(key)-1], make(map[string]interface{}))
@ -400,15 +511,22 @@ func (p *parser) establishContext(key Key, array bool) {
p.context = append(p.context, key[len(key)-1]) p.context = append(p.context, key[len(key)-1])
} }
// set calls setValue and setType.
func (p *parser) set(key string, val interface{}, typ tomlType) {
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
}
// setValue sets the given key to the given value in the current context. // setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for // It will make sure that the key hasn't already been defined, account for
// implicit key groups. // implicit key groups.
func (p *parser) setValue(key string, value interface{}) { func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{} var (
var ok bool tmpHash interface{}
ok bool
hash := p.mapping hash = p.mapping
keyContext := make(Key, 0) keyContext Key
)
for _, k := range p.context { for _, k := range p.context {
keyContext = append(keyContext, k) keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok { if tmpHash, ok = hash[k]; !ok {
@ -422,24 +540,26 @@ func (p *parser) setValue(key string, value interface{}) {
case map[string]interface{}: case map[string]interface{}:
hash = t hash = t
default: default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+ p.panicf("Key '%s' has already been defined.", keyContext)
"it has '%T' instead.", tmpHash)
} }
} }
keyContext = append(keyContext, key) keyContext = append(keyContext, key)
if _, ok := hash[key]; ok { if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have // Normally redefining keys isn't allowed, but the key could have been
// to raise an error since duplicate keys are disallowed. However, // defined implicitly and it's allowed to be redefined concretely. (See
// it's possible that a key was previously defined implicitly. In this // the `valid/implicit-and-explicit-after.toml` in toml-test)
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
// //
// But we have to make sure to stop marking it as an implicit. (So that // But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.) // another redefinition provokes an error.)
// //
// Note that since it has already been defined (as a hash), we don't // Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done. // want to overwrite it. So our business is done.
if p.isArray(keyContext) {
p.removeImplicit(keyContext)
hash[key] = value
return
}
if p.isImplicit(keyContext) { if p.isImplicit(keyContext) {
p.removeImplicit(keyContext) p.removeImplicit(keyContext)
return return
@ -449,6 +569,7 @@ func (p *parser) setValue(key string, value interface{}) {
// key, which is *always* wrong. // key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext) p.panicf("Key '%s' has already been defined.", keyContext)
} }
hash[key] = value hash[key] = value
} }
@ -468,21 +589,15 @@ func (p *parser) setType(key string, typ tomlType) {
p.types[keyContext.String()] = typ p.types[keyContext.String()] = typ
} }
// addImplicit sets the given Key as having been created implicitly. // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and
func (p *parser) addImplicit(key Key) { // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly).
p.implicits[key.String()] = true func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true }
} func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false }
func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] }
// removeImplicit stops tagging the given key as having been implicitly func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray }
// created. func (p *parser) addImplicitContext(key Key) {
func (p *parser) removeImplicit(key Key) { p.addImplicit(key)
p.implicits[key.String()] = false p.addContext(key, false)
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
} }
// current returns the full key name of the current context. // current returns the full key name of the current context.
@ -497,20 +612,54 @@ func (p *parser) current() string {
} }
func stripFirstNewline(s string) string { func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' { if len(s) > 0 && s[0] == '\n' {
return s return s[1:]
} }
return s[1:] if len(s) > 1 && s[0] == '\r' && s[1] == '\n' {
return s[2:]
}
return s
} }
func stripEscapedWhitespace(s string) string { // Remove newlines inside triple-quoted strings if a line ends with "\".
esc := strings.Split(s, "\\\n") func stripEscapedNewlines(s string) string {
if len(esc) > 1 { split := strings.Split(s, "\n")
for i := 1; i < len(esc); i++ { if len(split) < 1 {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) return s
}
escNL := false // Keep track of the last non-blank line was escaped.
for i, line := range split {
line = strings.TrimRight(line, " \t\r")
if len(line) == 0 || line[len(line)-1] != '\\' {
split[i] = strings.TrimRight(split[i], "\r")
if !escNL && i != len(split)-1 {
split[i] += "\n"
}
continue
}
escBS := true
for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- {
escBS = !escBS
}
if escNL {
line = strings.TrimLeft(line, " \t\r")
}
escNL = !escBS
if escBS {
split[i] += "\n"
continue
}
split[i] = line[:len(line)-1] // Remove \
if len(split)-1 > i {
split[i+1] = strings.TrimLeft(split[i+1], " \t\r")
} }
} }
return strings.Join(esc, "") return strings.Join(split, "")
} }
func (p *parser) replaceEscapes(str string) string { func (p *parser) replaceEscapes(str string) string {
@ -533,6 +682,9 @@ func (p *parser) replaceEscapes(str string) string {
default: default:
p.bug("Expected valid escape code after \\, but got %q.", s[r]) p.bug("Expected valid escape code after \\, but got %q.", s[r])
return "" return ""
case ' ', '\t':
p.panicf("invalid escape: '\\%c'", s[r])
return ""
case 'b': case 'b':
replaced = append(replaced, rune(0x0008)) replaced = append(replaced, rune(0x0008))
r += 1 r += 1
@ -585,8 +737,3 @@ func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
} }
return rune(hex) return rune(hex)
} }
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View File

@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View File

@ -68,24 +68,3 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType {
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable") panic("unreachable")
} }
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View File

@ -48,7 +48,9 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
cur, ok := doc.obj[k] cur, ok := doc.obj[k]
if !ok || cur == nil { if !ok || cur == nil {
pruneNulls(v) if !mergeMerge {
pruneNulls(v)
}
_ = doc.set(k, v, &ApplyOptions{}) _ = doc.set(k, v, &ApplyOptions{})
} else { } else {
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
@ -89,8 +91,8 @@ func pruneAryNulls(ary *partialArray) *partialArray {
for _, v := range *ary { for _, v := range *ary {
if v != nil { if v != nil {
pruneNulls(v) pruneNulls(v)
newAry = append(newAry, v)
} }
newAry = append(newAry, v)
} }
*ary = newAry *ary = newAry

View File

@ -27,7 +27,7 @@ var (
startObject = json.Delim('{') startObject = json.Delim('{')
endObject = json.Delim('}') endObject = json.Delim('}')
startArray = json.Delim('[') startArray = json.Delim('[')
endArray = json.Delim(']') endArray = json.Delim(']')
) )
var ( var (
@ -57,7 +57,7 @@ type Patch []Operation
type partialDoc struct { type partialDoc struct {
keys []string keys []string
obj map[string]*lazyNode obj map[string]*lazyNode
} }
type partialArray []*lazyNode type partialArray []*lazyNode
@ -766,9 +766,9 @@ func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
} }
} }
// Check if the next part is a numeric index. // Check if the next part is a numeric index or "-".
// If yes, then create an array, otherwise, create an object. // If yes, then create an array, otherwise, create an object.
if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil { if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil || parts[pi+1] == "-" {
if arrIndex < 0 { if arrIndex < 0 {
if !options.SupportNegativeIndices { if !options.SupportNegativeIndices {
@ -845,6 +845,29 @@ func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) erro
return errors.Wrapf(err, "replace operation failed to decode path") return errors.Wrapf(err, "replace operation failed to decode path")
} }
if path == "" {
val := op.value()
if val.which == eRaw {
if !val.tryDoc() {
if !val.tryAry() {
return errors.Wrapf(err, "replace operation value must be object or array")
}
}
}
switch val.which {
case eAry:
*doc = &val.ary
case eDoc:
*doc = val.doc
case eRaw:
return errors.Wrapf(err, "replace operation hit impossible case")
}
return nil
}
con, key := findObject(doc, path, options) con, key := findObject(doc, path, options)
if con == nil { if con == nil {
@ -911,6 +934,25 @@ func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
return errors.Wrapf(err, "test operation failed to decode path") return errors.Wrapf(err, "test operation failed to decode path")
} }
if path == "" {
var self lazyNode
switch sv := (*doc).(type) {
case *partialDoc:
self.doc = sv
self.which = eDoc
case *partialArray:
self.ary = *sv
self.which = eAry
}
if self.equal(op.value()) {
return nil
}
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
}
con, key := findObject(doc, path, options) con, key := findObject(doc, path, options)
if con == nil { if con == nil {
@ -1026,6 +1068,10 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
// ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions. // ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions.
// It returns the new document indented. // It returns the new document indented.
func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) { func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) {
if len(doc) == 0 {
return doc, nil
}
var pd container var pd container
if doc[0] == '[' { if doc[0] == '[' {
pd = &partialArray{} pd = &partialArray{}

12
vendor/modules.txt vendored
View File

@ -2,9 +2,10 @@
## explicit; go 1.16 ## explicit; go 1.16
github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm
github.com/Azure/go-ansiterm/winterm github.com/Azure/go-ansiterm/winterm
# github.com/BurntSushi/toml v0.3.1 # github.com/BurntSushi/toml v0.4.1
## explicit ## explicit; go 1.16
github.com/BurntSushi/toml github.com/BurntSushi/toml
github.com/BurntSushi/toml/internal
# github.com/MakeNowJust/heredoc v1.0.0 # github.com/MakeNowJust/heredoc v1.0.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/MakeNowJust/heredoc github.com/MakeNowJust/heredoc
@ -56,7 +57,7 @@ github.com/emicklei/go-restful/log
# github.com/evanphx/json-patch v4.12.0+incompatible # github.com/evanphx/json-patch v4.12.0+incompatible
## explicit ## explicit
github.com/evanphx/json-patch github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.2.0 # github.com/evanphx/json-patch/v5 v5.6.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5
# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d
@ -834,7 +835,6 @@ k8s.io/apimachinery/pkg/util/strategicpatch
k8s.io/apimachinery/pkg/util/uuid k8s.io/apimachinery/pkg/util/uuid
k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation
k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/validation/field
k8s.io/apimachinery/pkg/util/version
k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/wait
k8s.io/apimachinery/pkg/util/waitgroup k8s.io/apimachinery/pkg/util/waitgroup
k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/util/yaml
@ -1467,7 +1467,7 @@ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics
## explicit; go 1.16 ## explicit; go 1.16
sigs.k8s.io/json sigs.k8s.io/json
sigs.k8s.io/json/internal/golang/encoding/json sigs.k8s.io/json/internal/golang/encoding/json
# sigs.k8s.io/kind v0.11.1 # sigs.k8s.io/kind v0.12.0
## explicit; go 1.14 ## explicit; go 1.14
sigs.k8s.io/kind/pkg/apis/config/defaults sigs.k8s.io/kind/pkg/apis/config/defaults
sigs.k8s.io/kind/pkg/apis/config/v1alpha4 sigs.k8s.io/kind/pkg/apis/config/v1alpha4
@ -1504,6 +1504,8 @@ sigs.k8s.io/kind/pkg/internal/apis/config
sigs.k8s.io/kind/pkg/internal/apis/config/encoding sigs.k8s.io/kind/pkg/internal/apis/config/encoding
sigs.k8s.io/kind/pkg/internal/cli sigs.k8s.io/kind/pkg/internal/cli
sigs.k8s.io/kind/pkg/internal/env sigs.k8s.io/kind/pkg/internal/env
sigs.k8s.io/kind/pkg/internal/sets
sigs.k8s.io/kind/pkg/internal/version
sigs.k8s.io/kind/pkg/log sigs.k8s.io/kind/pkg/log
# sigs.k8s.io/kustomize/api v0.10.1 # sigs.k8s.io/kustomize/api v0.10.1
## explicit; go 1.16 ## explicit; go 1.16

View File

@ -18,4 +18,4 @@ limitations under the License.
package defaults package defaults
// Image is the default for the Config.Image field, aka the default node image. // Image is the default for the Config.Image field, aka the default node image.
const Image = "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6" const Image = "kindest/node:v1.23.4@sha256:0e34f0d0fd448aa2f2819cfd74e99fe5793a6e4938b328f657c8e3f81ee0dfb9"

View File

@ -277,7 +277,7 @@ type PortMapping struct {
HostPort int32 `yaml:"hostPort,omitempty"` HostPort int32 `yaml:"hostPort,omitempty"`
// TODO: add protocol (tcp/udp) and port-ranges // TODO: add protocol (tcp/udp) and port-ranges
ListenAddress string `yaml:"listenAddress,omitempty"` ListenAddress string `yaml:"listenAddress,omitempty"`
// Protocol (TCP/UDP) // Protocol (TCP/UDP/SCTP)
Protocol PortMappingProtocol `yaml:"protocol,omitempty"` Protocol PortMappingProtocol `yaml:"protocol,omitempty"`
} }

View File

@ -1,3 +1,4 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*

View File

@ -98,63 +98,18 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
} }
} }
// Populate the list of control-plane node labels and the list of worker node labels respectively. // create the kubeadm join configuration for the kubernetes cluster nodes only
// controlPlaneLabels is an array of maps (labels, read from config) associated with all the control-plane nodes. kubeNodes, err := nodeutils.InternalNodes(allNodes)
// workerLabels is an array of maps (labels, read from config) associated with all the worker nodes.
controlPlaneLabels := []map[string]string{}
workerLabels := []map[string]string{}
for _, node := range ctx.Config.Nodes {
if node.Role == config.ControlPlaneRole {
controlPlaneLabels = append(controlPlaneLabels, node.Labels)
} else if node.Role == config.WorkerRole {
workerLabels = append(workerLabels, node.Labels)
} else {
continue
}
}
// hashMapLabelsToCommaSeparatedLabels converts labels in hashmap form to labels in a comma-separated string form like "key1=value1,key2=value2"
hashMapLabelsToCommaSeparatedLabels := func(labels map[string]string) string {
output := ""
for key, value := range labels {
output += fmt.Sprintf("%s=%s,", key, value)
}
return strings.TrimSuffix(output, ",") // remove the last character (comma) in the output string
}
// create the kubeadm join configuration for control plane nodes
controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes)
if err != nil { if err != nil {
return err return err
} }
for i, node := range controlPlanes { for _, node := range kubeNodes {
node := node // capture loop variable node := node // capture loop variable
configData := configData // copy config data configData := configData // copy config data
if len(controlPlaneLabels[i]) > 0 {
configData.NodeLabels = hashMapLabelsToCommaSeparatedLabels(controlPlaneLabels[i]) // updating the config with the respective labels to be written over the current control-plane node in consideration
}
fns = append(fns, kubeadmConfigPlusPatches(node, configData)) fns = append(fns, kubeadmConfigPlusPatches(node, configData))
} }
// then create the kubeadm join config for the worker nodes if any
workers, err := nodeutils.SelectNodesByRole(allNodes, constants.WorkerNodeRoleValue)
if err != nil {
return err
}
if len(workers) > 0 {
// create the workers concurrently
for i, node := range workers {
node := node // capture loop variable
configData := configData // copy config data
configData.ControlPlane = false
if len(workerLabels[i]) > 0 {
configData.NodeLabels = hashMapLabelsToCommaSeparatedLabels(workerLabels[i]) // updating the config with the respective labels to be written over the current worker node in consideration
}
fns = append(fns, kubeadmConfigPlusPatches(node, configData))
}
}
// Create the kubeadm config in all nodes concurrently // Create the kubeadm config in all nodes concurrently
if err := errors.UntilErrorConcurrent(fns); err != nil { if err := errors.UntilErrorConcurrent(fns); err != nil {
return err return err
@ -162,11 +117,6 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
// if we have containerd config, patch all the nodes concurrently // if we have containerd config, patch all the nodes concurrently
if len(ctx.Config.ContainerdConfigPatches) > 0 || len(ctx.Config.ContainerdConfigPatchesJSON6902) > 0 { if len(ctx.Config.ContainerdConfigPatches) > 0 || len(ctx.Config.ContainerdConfigPatchesJSON6902) > 0 {
// we only want to patch kubernetes nodes
// this is a cheap workaround to re-use the already listed
// workers + control planes
kubeNodes := append([]nodes.Node{}, controlPlanes...)
kubeNodes = append(kubeNodes, workers...)
fns := make([]func() error, len(kubeNodes)) fns := make([]func() error, len(kubeNodes))
for i, node := range kubeNodes { for i, node := range kubeNodes {
node := node // capture loop variable node := node // capture loop variable
@ -185,8 +135,8 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
return errors.Wrap(err, "failed to write patched containerd config") return errors.Wrap(err, "failed to write patched containerd config")
} }
// restart containerd now that we've re-configured it // restart containerd now that we've re-configured it
// skip if the systemd (also the containerd) is not running // skip if containerd is not running
if err := node.Command("bash", "-c", `! systemctl is-system-running || systemctl restart containerd`).Run(); err != nil { if err := node.Command("bash", "-c", `! pgrep --exact containerd || systemctl restart containerd`).Run(); err != nil {
return errors.Wrap(err, "failed to restart containerd after patching config") return errors.Wrap(err, "failed to restart containerd after patching config")
} }
return nil return nil
@ -243,10 +193,29 @@ func getKubeadmConfig(cfg *config.Cluster, data kubeadm.ConfigData, node nodes.N
} }
data.NodeAddress = nodeAddressIPv6 data.NodeAddress = nodeAddressIPv6
if cfg.Networking.IPFamily == config.DualStackFamily { if cfg.Networking.IPFamily == config.DualStackFamily {
data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddress, nodeAddressIPv6) // order matters since the nodeAddress will be used later to configure the apiserver advertise address
// Ref: #2484
primaryServiceSubnet := strings.Split(cfg.Networking.ServiceSubnet, ",")[0]
ip, _, err := net.ParseCIDR(primaryServiceSubnet)
if err != nil {
return "", fmt.Errorf("failed to parse primary Service Subnet %s (%s): %w", primaryServiceSubnet, cfg.Networking.ServiceSubnet, err)
}
if ip.To4() != nil {
data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddress, nodeAddressIPv6)
} else {
data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddressIPv6, nodeAddress)
}
} }
} }
// configure the node labels
if len(configNode.Labels) > 0 {
data.NodeLabels = hashMapLabelsToCommaSeparatedLabels(configNode.Labels)
}
// set the node role
data.ControlPlane = string(configNode.Role) == constants.ControlPlaneNodeRoleValue
// generate the config contents // generate the config contents
cf, err := kubeadm.Config(data) cf, err := kubeadm.Config(data)
if err != nil { if err != nil {
@ -299,3 +268,12 @@ func writeKubeadmConfig(kubeadmConfig string, node nodes.Node) error {
return nil return nil
} }
// hashMapLabelsToCommaSeparatedLabels converts labels in hashmap form to labels in a comma-separated string form like "key1=value1,key2=value2"
func hashMapLabelsToCommaSeparatedLabels(labels map[string]string) string {
output := ""
for key, value := range labels {
output += fmt.Sprintf("%s=%s,", key, value)
}
return strings.TrimSuffix(output, ",") // remove the last character (comma) in the output string
}

View File

@ -27,6 +27,7 @@ import (
"sigs.k8s.io/kind/pkg/cluster/internal/create/actions" "sigs.k8s.io/kind/pkg/cluster/internal/create/actions"
"sigs.k8s.io/kind/pkg/internal/apis/config" "sigs.k8s.io/kind/pkg/internal/apis/config"
"sigs.k8s.io/kind/pkg/internal/version"
) )
// kubeadmInitAction implements action for executing the kubeadm init // kubeadmInitAction implements action for executing the kubeadm init
@ -106,14 +107,31 @@ func (a *action) Execute(ctx *actions.ActionContext) error {
} }
} }
// if we are only provisioning one node, remove the master taint // if we are only provisioning one node, remove the control plane taint
// https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#master-isolation // https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/#master-isolation
if len(allNodes) == 1 { if len(allNodes) == 1 {
// TODO: Once kubeadm 1.23 is no longer supported remove the <1.24 handling.
// TODO: Remove only the "control-plane" taint for kubeadm >= 1.25.
// https://github.com/kubernetes-sigs/kind/issues/1699
rawVersion, err := nodeutils.KubeVersion(node)
if err != nil {
return errors.Wrap(err, "failed to get Kubernetes version from node")
}
kubeVersion, err := version.ParseSemantic(rawVersion)
if err != nil {
return errors.Wrap(err, "could not parse Kubernetes version")
}
taints := []string{"node-role.kubernetes.io/control-plane-", "node-role.kubernetes.io/master-"}
if kubeVersion.LessThan(version.MustParseSemantic("v1.24.0")) {
taints = []string{"node-role.kubernetes.io/master-"}
}
taintArgs := []string{"--kubeconfig=/etc/kubernetes/admin.conf", "taint", "nodes", "--all"}
taintArgs = append(taintArgs, taints...)
if err := node.Command( if err := node.Command(
"kubectl", "--kubeconfig=/etc/kubernetes/admin.conf", "kubectl", taintArgs...,
"taint", "nodes", "--all", "node-role.kubernetes.io/master-",
).Run(); err != nil { ).Run(); err != nil {
return errors.Wrap(err, "failed to remove master taint") return errors.Wrap(err, "failed to remove control plane taint")
} }
} }

View File

@ -25,7 +25,9 @@ import (
"sigs.k8s.io/kind/pkg/cluster/internal/create/actions" "sigs.k8s.io/kind/pkg/cluster/internal/create/actions"
"sigs.k8s.io/kind/pkg/cluster/nodes" "sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cluster/nodeutils"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/exec" "sigs.k8s.io/kind/pkg/exec"
"sigs.k8s.io/kind/pkg/internal/version"
) )
// Action implements an action for waiting for the cluster to be ready // Action implements an action for waiting for the cluster to be ready
@ -66,7 +68,23 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
// Wait for the nodes to reach Ready status. // Wait for the nodes to reach Ready status.
startTime := time.Now() startTime := time.Now()
isReady := waitForReady(node, startTime.Add(a.waitTime))
// TODO: Remove the below handling once kubeadm 1.23 is no longer supported.
// https://github.com/kubernetes-sigs/kind/issues/1699
rawVersion, err := nodeutils.KubeVersion(node)
if err != nil {
return errors.Wrap(err, "failed to get Kubernetes version from node")
}
kubeVersion, err := version.ParseSemantic(rawVersion)
if err != nil {
return errors.Wrap(err, "could not parse Kubernetes version")
}
selectorLabel := "node-role.kubernetes.io/control-plane"
if kubeVersion.LessThan(version.MustParseSemantic("v1.24.0")) {
selectorLabel = "node-role.kubernetes.io/master"
}
isReady := waitForReady(node, startTime.Add(a.waitTime), selectorLabel)
if !isReady { if !isReady {
ctx.Status.End(false) ctx.Status.End(false)
ctx.Logger.V(0).Info(" • WARNING: Timed out waiting for Ready ⚠️") ctx.Logger.V(0).Info(" • WARNING: Timed out waiting for Ready ⚠️")
@ -81,14 +99,14 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
// WaitForReady uses kubectl inside the "node" container to check if the // WaitForReady uses kubectl inside the "node" container to check if the
// control plane nodes are "Ready". // control plane nodes are "Ready".
func waitForReady(node nodes.Node, until time.Time) bool { func waitForReady(node nodes.Node, until time.Time, selectorLabel string) bool {
return tryUntil(until, func() bool { return tryUntil(until, func() bool {
cmd := node.Command( cmd := node.Command(
"kubectl", "kubectl",
"--kubeconfig=/etc/kubernetes/admin.conf", "--kubeconfig=/etc/kubernetes/admin.conf",
"get", "get",
"nodes", "nodes",
"--selector=node-role.kubernetes.io/master", "--selector="+selectorLabel,
// When the node reaches status ready, the status field will be set // When the node reaches status ready, the status field will be set
// to true. // to true.
"-o=jsonpath='{.items..status.conditions[-1:].status}'", "-o=jsonpath='{.items..status.conditions[-1:].status}'",

View File

@ -151,7 +151,7 @@ func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) erro
var err error var err error
for _, b := range []time.Duration{0, time.Millisecond, time.Millisecond * 50, time.Millisecond * 100} { for _, b := range []time.Duration{0, time.Millisecond, time.Millisecond * 50, time.Millisecond * 100} {
time.Sleep(b) time.Sleep(b)
if err = kubeconfig.Export(p, opts.Config.Name, opts.KubeconfigPath); err == nil { if err = kubeconfig.Export(p, opts.Config.Name, opts.KubeconfigPath, true); err == nil {
break break
} }
} }

View File

@ -23,9 +23,10 @@ import (
"strings" "strings"
"text/template" "text/template"
"k8s.io/apimachinery/pkg/util/version"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/internal/apis/config" "sigs.k8s.io/kind/pkg/internal/apis/config"
"sigs.k8s.io/kind/pkg/internal/version"
) )
// ConfigData is supplied to the kubeadm config template, with values populated // ConfigData is supplied to the kubeadm config template, with values populated
@ -325,7 +326,6 @@ scheduler:
{{ end }} {{ end }}
# configure ipv6 default addresses for IPv6 clusters # configure ipv6 default addresses for IPv6 clusters
{{ if .IPv6 -}} {{ if .IPv6 -}}
address: "::"
bind-address: "::1" bind-address: "::1"
{{- end }} {{- end }}
networking: networking:
@ -427,6 +427,147 @@ conntrack:
{{end}}{{end}} {{end}}{{end}}
` `
// ConfigTemplateBetaV3 is the kubeadm config template for API version v1beta3
const ConfigTemplateBetaV3 = `# config generated by kind
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
metadata:
name: config
kubernetesVersion: {{.KubernetesVersion}}
clusterName: "{{.ClusterName}}"
{{ if .KubeadmFeatureGates}}featureGates:
{{ range $key, $value := .KubeadmFeatureGates }}
"{{ $key }}": {{ $value }}
{{end}}{{end}}
controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}"
# on docker for mac we have to expose the api server via port forward,
# so we need to ensure the cert is valid for localhost so we can talk
# to the cluster after rewriting the kubeconfig to point to localhost
apiServer:
certSANs: [localhost, "{{.APIServerAddress}}"]
extraArgs:
"runtime-config": "{{ .RuntimeConfigString }}"
{{ if .FeatureGates }}
"feature-gates": "{{ .FeatureGatesString }}"
{{ end}}
controllerManager:
extraArgs:
{{ if .FeatureGates }}
"feature-gates": "{{ .FeatureGatesString }}"
{{ end }}
enable-hostpath-provisioner: "true"
# configure ipv6 default addresses for IPv6 clusters
{{ if .IPv6 -}}
bind-address: "::"
{{- end }}
scheduler:
extraArgs:
{{ if .FeatureGates }}
"feature-gates": "{{ .FeatureGatesString }}"
{{ end }}
# configure ipv6 default addresses for IPv6 clusters
{{ if .IPv6 -}}
bind-address: "::1"
{{- end }}
networking:
podSubnet: "{{ .PodSubnet }}"
serviceSubnet: "{{ .ServiceSubnet }}"
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
metadata:
name: config
# we use a well know token for TLS bootstrap
bootstrapTokens:
- token: "{{ .Token }}"
# we use a well know port for making the API server discoverable inside docker network.
# from the host machine such port will be accessible via a random local port instead.
localAPIEndpoint:
advertiseAddress: "{{ .AdvertiseAddress }}"
bindPort: {{.APIBindPort}}
nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock"
kubeletExtraArgs:
fail-swap-on: "false"
node-ip: "{{ .NodeAddress }}"
provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}"
node-labels: "{{ .NodeLabels }}"
---
# no-op entry that exists solely so it can be patched
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
metadata:
name: config
{{ if .ControlPlane -}}
controlPlane:
localAPIEndpoint:
advertiseAddress: "{{ .AdvertiseAddress }}"
bindPort: {{.APIBindPort}}
{{- end }}
nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock"
kubeletExtraArgs:
fail-swap-on: "false"
node-ip: "{{ .NodeAddress }}"
provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}"
node-labels: "{{ .NodeLabels }}"
discovery:
bootstrapToken:
apiServerEndpoint: "{{ .ControlPlaneEndpoint }}"
token: "{{ .Token }}"
unsafeSkipCAVerification: true
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
metadata:
name: config
# explicitly set default cgroup driver
# unblocks https://github.com/kubernetes/kubernetes/pull/99471
# TODO: consider switching to systemd instead
# tracked in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroupDriver: cgroupfs
# configure ipv6 addresses in IPv6 mode
{{ if .IPv6 -}}
address: "::"
healthzBindAddress: "::"
{{- end }}
# disable disk resource management by default
# kubelet will see the host disk that the inner container runtime
# is ultimately backed by and attempt to recover disk space. we don't want that.
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
{{if .FeatureGates}}featureGates:
{{ range $key := .SortedFeatureGateKeys }}
"{{ $key }}": {{ index $.FeatureGates $key }}
{{end}}{{end}}
{{if ne .KubeProxyMode "None"}}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
metadata:
name: config
mode: "{{ .KubeProxyMode }}"
{{if .FeatureGates}}featureGates:
{{ range $key := .SortedFeatureGateKeys }}
"{{ $key }}": {{ index $.FeatureGates $key }}
{{end}}{{end}}
iptables:
minSyncPeriod: 1s
conntrack:
# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
# It is a global variable that affects other namespaces
maxPerCore: 0
{{if .RootlessProvider}}
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
{{end}}{{end}}
`
// Config returns a kubeadm config generated from config data, in particular // Config returns a kubeadm config generated from config data, in particular
// the kubernetes version // the kubernetes version
func Config(data ConfigData) (config string, err error) { func Config(data ConfigData) (config string, err error) {
@ -440,13 +581,25 @@ func Config(data ConfigData) (config string, err error) {
data.FeatureGates = make(map[string]bool) data.FeatureGates = make(map[string]bool)
} }
// assume the latest API version, then fallback if the k8s version is too low if data.RootlessProvider {
templateSource := ConfigTemplateBetaV2 if ver.LessThan(version.MustParseSemantic("v1.22.0")) {
if ver.LessThan(version.MustParseSemantic("v1.15.0")) { // rootless kind v0.12.x supports Kubernetes v1.22 with KubeletInUserNamespace gate.
if data.RootlessProvider { // rootless kind v0.11.x supports older Kubernetes with fake procfs.
return "", errors.Errorf("version %q is not compatible with rootless provider", ver) return "", errors.Errorf("version %q is not compatible with rootless provider (hint: kind v0.11.x may work with this version)", ver)
} }
data.FeatureGates["KubeletInUserNamespace"] = true
// For avoiding err="failed to get rootfs info: failed to get device for dir \"/var/lib/kubelet\": could not find device with major: 0, minor: 41 in cached partitions map"
// https://github.com/kubernetes-sigs/kind/issues/2524
data.FeatureGates["LocalStorageCapacityIsolation"] = false
}
// assume the latest API version, then fallback if the k8s version is too low
templateSource := ConfigTemplateBetaV3
if ver.LessThan(version.MustParseSemantic("v1.15.0")) {
templateSource = ConfigTemplateBetaV1 templateSource = ConfigTemplateBetaV1
} else if ver.LessThan(version.MustParseSemantic("v1.23.0")) {
templateSource = ConfigTemplateBetaV2
} }
t, err := template.New("kubeadm-config").Parse(templateSource) t, err := template.New("kubeadm-config").Parse(templateSource)

View File

@ -22,7 +22,7 @@ import (
"path/filepath" "path/filepath"
"runtime" "runtime"
"k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/kind/pkg/internal/sets"
) )
const kubeconfigEnv = "KUBECONFIG" const kubeconfigEnv = "KUBECONFIG"

View File

@ -32,8 +32,8 @@ import (
// Export exports the kubeconfig given the cluster context and a path to write it to // Export exports the kubeconfig given the cluster context and a path to write it to
// This will always be an external kubeconfig // This will always be an external kubeconfig
func Export(p providers.Provider, name, explicitPath string) error { func Export(p providers.Provider, name, explicitPath string, external bool) error {
cfg, err := get(p, name, true) cfg, err := get(p, name, external)
if err != nil { if err != nil {
return err return err
} }
@ -63,7 +63,7 @@ func Get(p providers.Provider, name string, external bool) (string, error) {
} }
// ContextForCluster returns the context name for a kind cluster based on // ContextForCluster returns the context name for a kind cluster based on
// it's name. This key is used for all list entries of kind clusters // its name. This key is used for all list entries of kind clusters
func ContextForCluster(kindClusterName string) string { func ContextForCluster(kindClusterName string) string {
return kubeconfig.KINDClusterKey(kindClusterName) return kubeconfig.KINDClusterKey(kindClusterName)
} }
@ -80,7 +80,8 @@ func get(p providers.Provider, name string, external bool) (*kubeconfig.Config,
return nil, err return nil, err
} }
if len(nodes) < 1 { if len(nodes) < 1 {
return nil, errors.New("could not locate any control plane nodes") return nil, errors.Errorf("could not locate any control plane nodes for cluster named '%s'. "+
"Use the --name option to select a different cluster", name)
} }
node := nodes[0] node := nodes[0]

View File

@ -17,7 +17,7 @@ limitations under the License.
package loadbalancer package loadbalancer
// Image defines the loadbalancer image:tag // Image defines the loadbalancer image:tag
const Image = "kindest/haproxy:v20200708-548e36db" const Image = "kindest/haproxy:v20220207-ca68f7d4"
// ConfigPath defines the path to the config file in the image // ConfigPath defines the path to the config file in the image
const ConfigPath = "/usr/local/etc/haproxy/haproxy.cfg" const ConfigPath = "/usr/local/etc/haproxy/haproxy.cfg"

View File

@ -0,0 +1,85 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bufio"
"context"
"os"
"regexp"
"sync"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/exec"
)
var nodeReachedCgroupsReadyRegexp *regexp.Regexp
var nodeReachedCgroupsReadyRegexpCompileOnce sync.Once
// NodeReachedCgroupsReadyRegexp returns a regexp for use with WaitUntilLogRegexpMatches
//
// This is used to avoid "ERROR: this script needs /sys/fs/cgroup/cgroup.procs to be empty (for writing the top-level cgroup.subtree_control)"
// See https://github.com/kubernetes-sigs/kind/issues/2409
//
// This pattern matches either "detected cgroupv1" from the kind node image's entrypoint logs
// or "Multi-User System" target if is using cgroups v2,
// so that `docker exec` can be executed safely without breaking cgroup v2 hierarchy.
func NodeReachedCgroupsReadyRegexp() *regexp.Regexp {
nodeReachedCgroupsReadyRegexpCompileOnce.Do(func() {
// This is an approximation, see: https://github.com/kubernetes-sigs/kind/pull/2421
nodeReachedCgroupsReadyRegexp = regexp.MustCompile("Reached target .*Multi-User System.*|detected cgroup v1")
})
return nodeReachedCgroupsReadyRegexp
}
// WaitUntilLogRegexpMatches waits until logCmd output produces a line matching re.
// It will use logCtx to determine if the logCmd deadline was exceeded for producing
// the most useful error message in failure cases, logCtx should be the context
// supplied to create logCmd with CommandContext
func WaitUntilLogRegexpMatches(logCtx context.Context, logCmd exec.Cmd, re *regexp.Regexp) error {
pr, pw, err := os.Pipe()
if err != nil {
return err
}
logCmd.SetStdout(pw)
logCmd.SetStderr(pw)
defer pr.Close()
cmdErrC := make(chan error, 1)
go func() {
defer pw.Close()
cmdErrC <- logCmd.Run()
}()
sc := bufio.NewScanner(pr)
for sc.Scan() {
line := sc.Text()
if re.MatchString(line) {
return nil
}
}
// when we timeout the process will have been killed due to the timeout, which is not interesting
// in other cases if the command errored this may be a useful error
if ctxErr := logCtx.Err(); ctxErr != context.DeadlineExceeded {
if cmdErr := <-cmdErrC; cmdErr != nil {
return errors.Wrap(cmdErr, "failed to read logs")
}
}
// otherwise generic error
return errors.Errorf("could not find a log line that matches %q", re.String())
}

View File

@ -17,9 +17,8 @@ limitations under the License.
package common package common
import ( import (
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/kind/pkg/internal/apis/config" "sigs.k8s.io/kind/pkg/internal/apis/config"
"sigs.k8s.io/kind/pkg/internal/sets"
) )
// RequiredNodeImages returns the set of _node_ images specified by the config // RequiredNodeImages returns the set of _node_ images specified by the config

View File

@ -266,7 +266,7 @@ func isIPv6UnavailableError(err error) bool {
func isPoolOverlapError(err error) bool { func isPoolOverlapError(err error) bool {
rerr := exec.RunErrorForError(err) rerr := exec.RunErrorForError(err)
return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") return rerr != nil && strings.HasPrefix(string(rerr.Output), "Error response from daemon: Pool overlaps with other one on this address space") || strings.Contains(string(rerr.Output), "networks have overlapping")
} }
func isNetworkAlreadyExistsError(err error) bool { func isNetworkAlreadyExistsError(err error) bool {
@ -275,7 +275,6 @@ func isNetworkAlreadyExistsError(err error) bool {
} }
// returns true if: // returns true if:
// - err is nil
// - err only contains no such network errors // - err only contains no such network errors
func isOnlyErrorNoSuchNetwork(err error) bool { func isOnlyErrorNoSuchNetwork(err error) bool {
rerr := exec.RunErrorForError(err) rerr := exec.RunErrorForError(err)
@ -291,7 +290,7 @@ func isOnlyErrorNoSuchNetwork(err error) bool {
} else if err != nil { } else if err != nil {
return false return false
} }
// if the line begins with Eror: No such network: it's fine // if the line begins with Error: No such network: it's fine
s := string(l) s := string(l)
if strings.HasPrefix(s, "Error: No such network:") { if strings.HasPrefix(s, "Error: No such network:") {
continue continue

View File

@ -25,8 +25,6 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"k8s.io/apimachinery/pkg/util/sets"
"sigs.k8s.io/kind/pkg/cluster/nodes" "sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/exec" "sigs.k8s.io/kind/pkg/exec"
@ -38,6 +36,7 @@ import (
"sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cluster/nodeutils"
"sigs.k8s.io/kind/pkg/internal/apis/config" "sigs.k8s.io/kind/pkg/internal/apis/config"
"sigs.k8s.io/kind/pkg/internal/cli" "sigs.k8s.io/kind/pkg/internal/cli"
"sigs.k8s.io/kind/pkg/internal/sets"
) )
// NewProvider returns a new provider based on executing `docker ...` // NewProvider returns a new provider based on executing `docker ...`
@ -124,7 +123,7 @@ func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) {
) )
lines, err := exec.OutputLines(cmd) lines, err := exec.OutputLines(cmd)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to list clusters") return nil, errors.Wrap(err, "failed to list nodes")
} }
// convert names to node handles // convert names to node handles
ret := make([]nodes.Node, 0, len(lines)) ret := make([]nodes.Node, 0, len(lines))

View File

@ -17,10 +17,12 @@ limitations under the License.
package docker package docker
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
"sigs.k8s.io/kind/pkg/cluster/constants" "sigs.k8s.io/kind/pkg/cluster/constants"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
@ -42,7 +44,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
name := nodeNamer(string(node.Role)) // name the node name := nodeNamer(string(node.Role)) // name the node
names[i] = name names[i] = name
} }
haveLoadbalancer := clusterHasImplicitLoadBalancer(cfg) haveLoadbalancer := config.ClusterHasImplicitLoadBalancer(cfg)
if haveLoadbalancer { if haveLoadbalancer {
names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue)) names = append(names, nodeNamer(constants.ExternalLoadBalancerNodeRoleValue))
} }
@ -74,7 +76,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
if err != nil { if err != nil {
return err return err
} }
return createContainer(args) return createContainer(name, args)
}) })
} }
@ -110,7 +112,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
if err != nil { if err != nil {
return err return err
} }
return createContainer(args) return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args)
}) })
case config.WorkerRole: case config.WorkerRole:
createContainerFuncs = append(createContainerFuncs, func() error { createContainerFuncs = append(createContainerFuncs, func() error {
@ -118,7 +120,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
if err != nil { if err != nil {
return err return err
} }
return createContainer(args) return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args)
}) })
default: default:
return nil, errors.Errorf("unknown node role: %q", node.Role) return nil, errors.Errorf("unknown node role: %q", node.Role)
@ -127,28 +129,6 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
return createContainerFuncs, nil return createContainerFuncs, nil
} }
func createContainer(args []string) error {
if err := exec.Command("docker", args...).Run(); err != nil {
return errors.Wrap(err, "docker run error")
}
return nil
}
func clusterIsIPv6(cfg *config.Cluster) bool {
return cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily
}
func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool {
controlPlanes := 0
for _, configNode := range cfg.Nodes {
role := string(configNode.Role)
if role == constants.ControlPlaneNodeRoleValue {
controlPlanes++
}
}
return controlPlanes > 1
}
// commonArgs computes static arguments that apply to all containers // commonArgs computes static arguments that apply to all containers
func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) { func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNames []string) ([]string, error) {
// standard arguments all nodes containers need, computed once // standard arguments all nodes containers need, computed once
@ -190,7 +170,7 @@ func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNam
} }
// enable IPv6 if necessary // enable IPv6 if necessary
if clusterIsIPv6(cfg) { if config.ClusterHasIPv6(cfg) {
args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1")
} }
@ -214,14 +194,17 @@ func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNam
args = append(args, "--volume", "/dev/mapper:/dev/mapper") args = append(args, "--volume", "/dev/mapper:/dev/mapper")
} }
// enable /dev/fuse explicitly for fuse-overlayfs
// (Rootless Docker does not automatically mount /dev/fuse with --privileged)
if mountFuse() {
args = append(args, "--device", "/dev/fuse")
}
return args, nil return args, nil
} }
func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) { func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, name string, args []string) ([]string, error) {
args = append([]string{ args = append([]string{
"run",
"--hostname", name, // make hostname match container name "--hostname", name, // make hostname match container name
"--name", name, // ... and set the container name
// label the node with the role ID // label the node with the role ID
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role),
// running containers in a container requires privileged // running containers in a container requires privileged
@ -243,6 +226,8 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
"--volume", "/var", "--volume", "/var",
// some k8s things want to read /lib/modules // some k8s things want to read /lib/modules
"--volume", "/lib/modules:/lib/modules:ro", "--volume", "/lib/modules:/lib/modules:ro",
// propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script
"-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER",
}, },
args..., args...,
) )
@ -266,9 +251,7 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) {
args = append([]string{ args = append([]string{
"run",
"--hostname", name, // make hostname match container name "--hostname", name, // make hostname match container name
"--name", name, // ... and set the container name
// label the node with the role ID // label the node with the role ID
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue),
}, },
@ -371,7 +354,7 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings .
// in a future API revision we will handle this at the API level and remove this // in a future API revision we will handle this at the API level and remove this
if pm.ListenAddress == "" { if pm.ListenAddress == "" {
switch clusterIPFamily { switch clusterIPFamily {
case config.IPv4Family: case config.IPv4Family, config.DualStackFamily:
pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow pm.ListenAddress = "0.0.0.0" // this is the docker default anyhow
case config.IPv6Family: case config.IPv6Family:
pm.ListenAddress = "::" pm.ListenAddress = "::"
@ -405,3 +388,21 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings .
} }
return args, nil return args, nil
} }
func createContainer(name string, args []string) error {
if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
return err
}
return nil
}
func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error {
if err := exec.Command("docker", append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
return err
}
logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second)
logCmd := exec.CommandContext(logCtx, "docker", "logs", "-f", name)
defer logCancel()
return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp())
}

View File

@ -85,3 +85,16 @@ func mountDevMapper() bool {
return storage == "btrfs" || storage == "zfs" || storage == "xfs" return storage == "btrfs" || storage == "zfs" || storage == "xfs"
} }
// rootless: use fuse-overlayfs by default
// https://github.com/kubernetes-sigs/kind/issues/2275
func mountFuse() bool {
i, err := info()
if err != nil {
return false
}
if i != nil && i.Rootless {
return true
}
return false
}

View File

@ -83,10 +83,33 @@ func pull(logger log.Logger, image string, retries int) error {
// sanitizeImage is a helper to return human readable image name and // sanitizeImage is a helper to return human readable image name and
// the podman pullable image name from the provided image // the podman pullable image name from the provided image
func sanitizeImage(image string) (string, string) { func sanitizeImage(image string) (friendlyImageName, pullImageName string) {
const (
defaultDomain = "docker.io/"
officialRepoName = "library"
)
var remainder string
if strings.Contains(image, "@sha256:") { if strings.Contains(image, "@sha256:") {
splits := strings.Split(image, "@sha256:") splits := strings.Split(image, "@sha256:")
return splits[0], strings.Split(splits[0], ":")[0] + "@sha256:" + splits[1] friendlyImageName = splits[0]
remainder = strings.Split(splits[0], ":")[0] + "@sha256:" + splits[1]
} else {
friendlyImageName = image
remainder = image
} }
return image, image
if !strings.ContainsRune(remainder, '/') {
remainder = officialRepoName + "/" + remainder
}
i := strings.IndexRune(friendlyImageName, '/')
if i == -1 || (!strings.ContainsAny(friendlyImageName[:i], ".:") && friendlyImageName[:i] != "localhost") {
pullImageName = defaultDomain + remainder
} else {
pullImageName = remainder
}
return
} }

View File

@ -25,9 +25,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/version"
"sigs.k8s.io/kind/pkg/cluster/nodes" "sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/cluster/nodeutils" "sigs.k8s.io/kind/pkg/cluster/nodeutils"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
@ -39,6 +36,8 @@ import (
"sigs.k8s.io/kind/pkg/cluster/internal/providers/common" "sigs.k8s.io/kind/pkg/cluster/internal/providers/common"
"sigs.k8s.io/kind/pkg/internal/apis/config" "sigs.k8s.io/kind/pkg/internal/apis/config"
"sigs.k8s.io/kind/pkg/internal/cli" "sigs.k8s.io/kind/pkg/internal/cli"
"sigs.k8s.io/kind/pkg/internal/sets"
"sigs.k8s.io/kind/pkg/internal/version"
) )
// NewProvider returns a new provider based on executing `podman ...` // NewProvider returns a new provider based on executing `podman ...`
@ -130,7 +129,7 @@ func (p *provider) ListNodes(cluster string) ([]nodes.Node, error) {
) )
lines, err := exec.OutputLines(cmd) lines, err := exec.OutputLines(cmd)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to list clusters") return nil, errors.Wrap(err, "failed to list nodes")
} }
// convert names to node handles // convert names to node handles
ret := make([]nodes.Node, 0, len(lines)) ret := make([]nodes.Node, 0, len(lines))
@ -166,6 +165,9 @@ func (p *provider) DeleteNodes(n []nodes.Node) error {
} }
nodeVolumes = append(nodeVolumes, volumes...) nodeVolumes = append(nodeVolumes, volumes...)
} }
if len(nodeVolumes) == 0 {
return nil
}
return deleteVolumes(nodeVolumes) return deleteVolumes(nodeVolumes)
} }
@ -372,8 +374,9 @@ func (p *provider) Info() (*providers.ProviderInfo, error) {
// and lacks information about the availability of the cgroup controllers. // and lacks information about the availability of the cgroup controllers.
type podmanInfo struct { type podmanInfo struct {
Host struct { Host struct {
CgroupVersion string `json:"cgroupVersion,omitempty"` // "v2" CgroupVersion string `json:"cgroupVersion,omitempty"` // "v2"
Security struct { CgroupControllers []string `json:"cgroupControllers,omitempty"`
Security struct {
Rootless bool `json:"rootless,omitempty"` Rootless bool `json:"rootless,omitempty"`
} `json:"security"` } `json:"security"`
} `json:"host"` } `json:"host"`
@ -393,23 +396,47 @@ func info(logger log.Logger) (*providers.ProviderInfo, error) {
if err := json.Unmarshal(out, &pInfo); err != nil { if err := json.Unmarshal(out, &pInfo); err != nil {
return nil, err return nil, err
} }
info := &providers.ProviderInfo{ stringSliceContains := func(s []string, str string) bool {
Rootless: pInfo.Host.Security.Rootless, for _, v := range s {
Cgroup2: pInfo.Host.CgroupVersion == "v2", if v == str {
// We assume all the cgroup controllers to be available. return true
// }
// For rootless, this assumption is not always correct, }
// so we print the warning below. return false
//
// TODO: We wiil be able to implement proper cgroup controller detection
// after the GA of Podman 3.2.x: https://github.com/containers/podman/pull/10387
SupportsMemoryLimit: true, // not guaranteed to be correct
SupportsPidsLimit: true, // not guaranteed to be correct
SupportsCPUShares: true, // not guaranteed to be correct
} }
if info.Rootless {
logger.Warn("Cgroup controller detection is not implemented for Podman. " + // Since Podman version before v4.0.0 does not gives controller info.
"If you see cgroup-related errors, you might need to set systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/") // We assume all the cgroup controllers to be available.
// For rootless, this assumption is not always correct,
// so we print the warning below.
cgroupSupportsMemoryLimit := true
cgroupSupportsPidsLimit := true
cgroupSupportsCPUShares := true
v, err := getPodmanVersion()
if err != nil {
return nil, errors.Wrap(err, "failed to check podman version")
}
// Info for controllers must be available after v4.0.0
// via https://github.com/containers/podman/pull/10387
if v.AtLeast(version.MustParseSemantic("4.0.0")) {
cgroupSupportsMemoryLimit = stringSliceContains(pInfo.Host.CgroupControllers, "memory")
cgroupSupportsPidsLimit = stringSliceContains(pInfo.Host.CgroupControllers, "pids")
cgroupSupportsCPUShares = stringSliceContains(pInfo.Host.CgroupControllers, "cpu")
}
info := &providers.ProviderInfo{
Rootless: pInfo.Host.Security.Rootless,
Cgroup2: pInfo.Host.CgroupVersion == "v2",
SupportsMemoryLimit: cgroupSupportsMemoryLimit,
SupportsPidsLimit: cgroupSupportsPidsLimit,
SupportsCPUShares: cgroupSupportsCPUShares,
}
if info.Rootless && !v.AtLeast(version.MustParseSemantic("4.0.0")) {
if logger != nil {
logger.Warn("Cgroup controller detection is not implemented for Podman. " +
"If you see cgroup-related errors, you might need to set systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/")
}
} }
return info, nil return info, nil
} }

View File

@ -17,10 +17,12 @@ limitations under the License.
package podman package podman
import ( import (
"context"
"fmt" "fmt"
"net" "net"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
"sigs.k8s.io/kind/pkg/cluster/constants" "sigs.k8s.io/kind/pkg/cluster/constants"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
@ -43,7 +45,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
// only the external LB should reflect the port if we have multiple control planes // only the external LB should reflect the port if we have multiple control planes
apiServerPort := cfg.Networking.APIServerPort apiServerPort := cfg.Networking.APIServerPort
apiServerAddress := cfg.Networking.APIServerAddress apiServerAddress := cfg.Networking.APIServerAddress
if clusterHasImplicitLoadBalancer(cfg) { if config.ClusterHasImplicitLoadBalancer(cfg) {
// TODO: picking ports locally is less than ideal with a remote runtime // TODO: picking ports locally is less than ideal with a remote runtime
// (does podman have this?) // (does podman have this?)
// but this is supposed to be an implementation detail and NOT picking // but this is supposed to be an implementation detail and NOT picking
@ -62,7 +64,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
if err != nil { if err != nil {
return err return err
} }
return createContainer(args) return createContainer(name, args)
}) })
} }
@ -96,7 +98,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
if err != nil { if err != nil {
return err return err
} }
return createContainer(args) return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args)
}) })
case config.WorkerRole: case config.WorkerRole:
createContainerFuncs = append(createContainerFuncs, func() error { createContainerFuncs = append(createContainerFuncs, func() error {
@ -104,7 +106,7 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
if err != nil { if err != nil {
return err return err
} }
return createContainer(args) return createContainerWithWaitUntilSystemdReachesMultiUserSystem(name, args)
}) })
default: default:
return nil, errors.Errorf("unknown node role: %q", node.Role) return nil, errors.Errorf("unknown node role: %q", node.Role)
@ -113,28 +115,6 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
return createContainerFuncs, nil return createContainerFuncs, nil
} }
func createContainer(args []string) error {
if err := exec.Command("podman", args...).Run(); err != nil {
return errors.Wrap(err, "podman run error")
}
return nil
}
func clusterIsIPv6(cfg *config.Cluster) bool {
return cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily
}
func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool {
controlPlanes := 0
for _, configNode := range cfg.Nodes {
role := string(configNode.Role)
if role == constants.ControlPlaneNodeRoleValue {
controlPlanes++
}
}
return controlPlanes > 1
}
// commonArgs computes static arguments that apply to all containers // commonArgs computes static arguments that apply to all containers
func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) { func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) {
// standard arguments all nodes containers need, computed once // standard arguments all nodes containers need, computed once
@ -149,7 +129,7 @@ func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) {
} }
// enable IPv6 if necessary // enable IPv6 if necessary
if clusterIsIPv6(cfg) { if config.ClusterHasIPv6(cfg) {
args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1") args = append(args, "--sysctl=net.ipv6.conf.all.disable_ipv6=0", "--sysctl=net.ipv6.conf.all.forwarding=1")
} }
@ -168,6 +148,12 @@ func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) {
args = append(args, "--volume", "/dev/mapper:/dev/mapper") args = append(args, "--volume", "/dev/mapper:/dev/mapper")
} }
// rootless: use fuse-overlayfs by default
// https://github.com/kubernetes-sigs/kind/issues/2275
if mountFuse() {
args = append(args, "--device", "/dev/fuse")
}
return args, nil return args, nil
} }
@ -180,9 +166,7 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
} }
args = append([]string{ args = append([]string{
"run",
"--hostname", name, // make hostname match container name "--hostname", name, // make hostname match container name
"--name", name, // ... and set the container name
// label the node with the role ID // label the node with the role ID
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role), "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, node.Role),
// running containers in a container requires privileged // running containers in a container requires privileged
@ -206,6 +190,8 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
"--volume", fmt.Sprintf("%s:/var:suid,exec,dev", varVolume), "--volume", fmt.Sprintf("%s:/var:suid,exec,dev", varVolume),
// some k8s things want to read /lib/modules // some k8s things want to read /lib/modules
"--volume", "/lib/modules:/lib/modules:ro", "--volume", "/lib/modules:/lib/modules:ro",
// propagate KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER to the entrypoint script
"-e", "KIND_EXPERIMENTAL_CONTAINERD_SNAPSHOTTER",
}, },
args..., args...,
) )
@ -230,9 +216,7 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) { func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]string, error) {
args = append([]string{ args = append([]string{
"run",
"--hostname", name, // make hostname match container name "--hostname", name, // make hostname match container name
"--name", name, // ... and set the container name
// label the node with the role ID // label the node with the role ID
"--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue), "--label", fmt.Sprintf("%s=%s", nodeRoleLabelKey, constants.ExternalLoadBalancerNodeRoleValue),
}, },
@ -336,7 +320,7 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings .
// in a future API revision we will handle this at the API level and remove this // in a future API revision we will handle this at the API level and remove this
if pm.ListenAddress == "" { if pm.ListenAddress == "" {
switch clusterIPFamily { switch clusterIPFamily {
case config.IPv4Family: case config.IPv4Family, config.DualStackFamily:
pm.ListenAddress = "0.0.0.0" pm.ListenAddress = "0.0.0.0"
case config.IPv6Family: case config.IPv6Family:
pm.ListenAddress = "::" pm.ListenAddress = "::"
@ -375,3 +359,21 @@ func generatePortMappings(clusterIPFamily config.ClusterIPFamily, portMappings .
} }
return args, nil return args, nil
} }
func createContainer(name string, args []string) error {
if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
return err
}
return nil
}
func createContainerWithWaitUntilSystemdReachesMultiUserSystem(name string, args []string) error {
if err := exec.Command("podman", append([]string{"run", "--name", name}, args...)...).Run(); err != nil {
return err
}
logCtx, logCancel := context.WithTimeout(context.Background(), 30*time.Second)
defer logCancel()
logCmd := exec.CommandContext(logCtx, "podman", "logs", "-f", name)
return common.WaitUntilLogRegexpMatches(logCtx, logCmd, common.NodeReachedCgroupsReadyRegexp())
}

View File

@ -17,13 +17,14 @@ limitations under the License.
package podman package podman
import ( import (
"encoding/json"
"fmt" "fmt"
"strings" "strings"
"k8s.io/apimachinery/pkg/util/version"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/exec" "sigs.k8s.io/kind/pkg/exec"
"sigs.k8s.io/kind/pkg/internal/version"
) )
// IsAvailable checks if podman is available in the system // IsAvailable checks if podman is available in the system
@ -99,6 +100,10 @@ func getVolumes(label string) ([]string, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if string(output) == "" {
// no volumes
return nil, nil
}
// Trim away the last `\n`. // Trim away the last `\n`.
trimmedOutput := strings.TrimSuffix(string(output), "\n") trimmedOutput := strings.TrimSuffix(string(output), "\n")
// Get names of all volumes by splitting via `\n`. // Get names of all volumes by splitting via `\n`.
@ -118,16 +123,47 @@ func deleteVolumes(names []string) error {
// mountDevMapper checks if the podman storage driver is Btrfs or ZFS // mountDevMapper checks if the podman storage driver is Btrfs or ZFS
func mountDevMapper() bool { func mountDevMapper() bool {
storage := "" cmd := exec.Command("podman", "info", "--format", "json")
cmd := exec.Command("podman", "info", "-f", out, err := exec.Output(cmd)
`{{ index .Store.GraphStatus "Backing Filesystem"}}`)
lines, err := exec.OutputLines(cmd)
if err != nil { if err != nil {
return false return false
} }
if len(lines) > 0 { var pInfo podmanStorageInfo
storage = strings.ToLower(strings.TrimSpace(lines[0])) if err := json.Unmarshal(out, &pInfo); err != nil {
return false
} }
return storage == "btrfs" || storage == "zfs"
// match docker logic pkg/cluster/internal/providers/docker/util.go
if pInfo.Store.GraphDriverName == "btrfs" ||
pInfo.Store.GraphDriverName == "zfs" ||
pInfo.Store.GraphDriverName == "devicemapper" ||
pInfo.Store.GraphStatus.BackingFilesystem == "btrfs" ||
pInfo.Store.GraphStatus.BackingFilesystem == "xfs" ||
pInfo.Store.GraphStatus.BackingFilesystem == "zfs" {
return true
}
return false
}
type podmanStorageInfo struct {
Store struct {
GraphDriverName string `json:"graphDriverName,omitempty"`
GraphStatus struct {
BackingFilesystem string `json:"Backing Filesystem,omitempty"` // "v2"
} `json:"graphStatus"`
} `json:"store"`
}
// rootless: use fuse-overlayfs by default
// https://github.com/kubernetes-sigs/kind/issues/2275
func mountFuse() bool {
i, err := info(nil)
if err != nil {
return false
}
if i != nil && i.Rootless {
return true
}
return false
} }

View File

@ -23,6 +23,8 @@ import (
"path" "path"
"strings" "strings"
"github.com/pelletier/go-toml"
"sigs.k8s.io/kind/pkg/cluster/nodes" "sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/errors" "sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/exec" "sigs.k8s.io/kind/pkg/exec"
@ -76,13 +78,37 @@ func CopyNodeToNode(a, b nodes.Node, file string) error {
// LoadImageArchive loads image onto the node, where image is a Reader over an image archive // LoadImageArchive loads image onto the node, where image is a Reader over an image archive
func LoadImageArchive(n nodes.Node, image io.Reader) error { func LoadImageArchive(n nodes.Node, image io.Reader) error {
cmd := n.Command("ctr", "--namespace=k8s.io", "images", "import", "-").SetStdin(image) snapshotter, err := getSnapshotter(n)
if err != nil {
return err
}
cmd := n.Command("ctr", "--namespace=k8s.io", "images", "import", "--snapshotter", snapshotter, "-").SetStdin(image)
if err := cmd.Run(); err != nil { if err := cmd.Run(); err != nil {
return errors.Wrap(err, "failed to load image") return errors.Wrap(err, "failed to load image")
} }
return nil return nil
} }
func getSnapshotter(n nodes.Node) (string, error) {
out, err := exec.Output(n.Command("containerd", "config", "dump"))
if err != nil {
return "", errors.Wrap(err, "failed to detect containerd snapshotter")
}
return parseSnapshotter(string(out))
}
func parseSnapshotter(config string) (string, error) {
parsed, err := toml.Load(config)
if err != nil {
return "", errors.Wrap(err, "failed to detect containerd snapshotter")
}
snapshotter, ok := parsed.GetPath([]string{"plugins", "io.containerd.grpc.v1.cri", "containerd", "snapshotter"}).(string)
if !ok {
return "", errors.New("failed to detect containerd snapshotter")
}
return snapshotter, nil
}
// ImageID returns ID of image on the node with the given image name if present // ImageID returns ID of image on the node with the given image name if present
func ImageID(n nodes.Node, image string) (string, error) { func ImageID(n nodes.Node, image string) (string, error) {
var out bytes.Buffer var out bytes.Buffer

View File

@ -204,8 +204,8 @@ func (p *Provider) KubeConfig(name string, internal bool) (string, error) {
// it into the selected file, following the rules from // it into the selected file, following the rules from
// https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config // https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#config
// where explicitPath is the --kubeconfig value. // where explicitPath is the --kubeconfig value.
func (p *Provider) ExportKubeConfig(name string, explicitPath string) error { func (p *Provider) ExportKubeConfig(name string, explicitPath string, internal bool) error {
return kubeconfig.Export(p.provider, defaultName(name), explicitPath) return kubeconfig.Export(p.provider, defaultName(name), explicitPath, !internal)
} }
// ListNodes returns the list of container IDs for the "nodes" in the cluster // ListNodes returns the list of container IDs for the "nodes" in the cluster

View File

@ -50,7 +50,7 @@ func DisplayVersion() string {
} }
// VersionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0 // VersionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0
const VersionCore = "0.11.1" const VersionCore = "0.12.0"
// VersionPreRelease is the pre-release portion of the kind CLI version per // VersionPreRelease is the pre-release portion of the kind CLI version per
// Semantic Versioning 2.0.0 // Semantic Versioning 2.0.0

View File

@ -16,18 +16,14 @@ limitations under the License.
package errors package errors
import ( // NewAggregate is a k8s.io/apimachinery/pkg/util/errors.NewAggregate compatible wrapper
k8serrors "k8s.io/apimachinery/pkg/util/errors"
)
// NewAggregate is a k8s.io/apimachinery/pkg/util/errors.NewAggregate wrapper
// note that while it returns a StackTrace wrapped Aggregate // note that while it returns a StackTrace wrapped Aggregate
// That has been Flattened and Reduced // That has been Flattened and Reduced
func NewAggregate(errlist []error) error { func NewAggregate(errlist []error) error {
return WithStack( return WithStack(
k8serrors.Reduce( reduce(
k8serrors.Flatten( flatten(
k8serrors.NewAggregate(errlist), newAggregate(errlist),
), ),
), ),
) )
@ -35,9 +31,9 @@ func NewAggregate(errlist []error) error {
// Errors returns the deepest Aggregate in a Cause chain // Errors returns the deepest Aggregate in a Cause chain
func Errors(err error) []error { func Errors(err error) []error {
var errors k8serrors.Aggregate var errors Aggregate
for { for {
if v, ok := err.(k8serrors.Aggregate); ok { if v, ok := err.(Aggregate); ok {
errors = v errors = v
} }
if causerErr, ok := err.(Causer); ok { if causerErr, ok := err.(Causer); ok {

167
vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go generated vendored Normal file
View File

@ -0,0 +1,167 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"errors"
"sigs.k8s.io/kind/pkg/internal/sets"
)
/*
The contents of this file are lightly forked from k8s.io/apimachinery/pkg/util/errors
Forking makes kind easier to import, and this code is stable.
Currently the only source changes are renaming some methods so as to not
export them.
*/
// Aggregate represents an object that contains multiple errors, but does not
// necessarily have singular semantic meaning.
// The aggregate can be used with `errors.Is()` to check for the occurrence of
// a specific error type.
// Errors.As() is not supported, because the caller presumably cares about a
// specific error of potentially multiple that match the given type.
//
// NOTE: this type is originally from k8s.io/apimachinery/pkg/util/errors.Aggregate
// Since it is an interface, you can use the implementing types interchangeably
type Aggregate interface {
error
Errors() []error
Is(error) bool
}
func newAggregate(errlist []error) Aggregate {
if len(errlist) == 0 {
return nil
}
// In case of input error list contains nil
var errs []error
for _, e := range errlist {
if e != nil {
errs = append(errs, e)
}
}
if len(errs) == 0 {
return nil
}
return aggregate(errs)
}
// flatten takes an Aggregate, which may hold other Aggregates in arbitrary
// nesting, and flattens them all into a single Aggregate, recursively.
func flatten(agg Aggregate) Aggregate {
result := []error{}
if agg == nil {
return nil
}
for _, err := range agg.Errors() {
if a, ok := err.(Aggregate); ok {
r := flatten(a)
if r != nil {
result = append(result, r.Errors()...)
}
} else {
if err != nil {
result = append(result, err)
}
}
}
return newAggregate(result)
}
// reduce will return err or, if err is an Aggregate and only has one item,
// the first item in the aggregate.
func reduce(err error) error {
if agg, ok := err.(Aggregate); ok && err != nil {
switch len(agg.Errors()) {
case 1:
return agg.Errors()[0]
case 0:
return nil
}
}
return err
}
// This helper implements the error and Errors interfaces. Keeping it private
// prevents people from making an aggregate of 0 errors, which is not
// an error, but does satisfy the error interface.
type aggregate []error
// Error is part of the error interface.
func (agg aggregate) Error() string {
if len(agg) == 0 {
// This should never happen, really.
return ""
}
if len(agg) == 1 {
return agg[0].Error()
}
seenerrs := sets.NewString()
result := ""
agg.visit(func(err error) bool {
msg := err.Error()
if seenerrs.Has(msg) {
return false
}
seenerrs.Insert(msg)
if len(seenerrs) > 1 {
result += ", "
}
result += msg
return false
})
if len(seenerrs) == 1 {
return result
}
return "[" + result + "]"
}
func (agg aggregate) Is(target error) bool {
return agg.visit(func(err error) bool {
return errors.Is(err, target)
})
}
func (agg aggregate) visit(f func(err error) bool) bool {
for _, err := range agg {
switch err := err.(type) {
case aggregate:
if match := err.visit(f); match {
return match
}
case Aggregate:
for _, nestedErr := range err.Errors() {
if match := f(nestedErr); match {
return match
}
}
default:
if match := f(err); match {
return match
}
}
}
return false
}
// Errors is part of the Aggregate interface.
func (agg aggregate) Errors() []error {
return []error(agg)
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
// ClusterHasIPv6 returns true if the cluster should have IPv6 enabled due to either
// being IPv6 cluster family or Dual Stack
func ClusterHasIPv6(c *Cluster) bool {
return c.Networking.IPFamily == IPv6Family || c.Networking.IPFamily == DualStackFamily
}
// ClusterHasImplicitLoadBalancer returns true if this cluster has an implicit api-server LoadBalancer
func ClusterHasImplicitLoadBalancer(c *Cluster) bool {
controlPlanes := 0
for _, node := range c.Nodes {
if node.Role == ControlPlaneRole {
controlPlanes++
}
}
return controlPlanes > 1
}

View File

@ -235,7 +235,7 @@ type PortMapping struct {
HostPort int32 HostPort int32
// TODO: add protocol (tcp/udp) and port-ranges // TODO: add protocol (tcp/udp) and port-ranges
ListenAddress string ListenAddress string
// Protocol (TCP/UDP) // Protocol (TCP/UDP/SCTP)
Protocol PortMappingProtocol Protocol PortMappingProtocol
} }

View File

@ -51,14 +51,13 @@ func (c *Cluster) Validate() error {
} }
} }
isDualStack := c.Networking.IPFamily == DualStackFamily
// podSubnet should be a valid CIDR // podSubnet should be a valid CIDR
if err := validateSubnets(c.Networking.PodSubnet, isDualStack); err != nil { if err := validateSubnets(c.Networking.PodSubnet, c.Networking.IPFamily); err != nil {
errs = append(errs, errors.Errorf("invalid pod subnet %v", err)) errs = append(errs, errors.Errorf("invalid pod subnet %v", err))
} }
// serviceSubnet should be a valid CIDR // serviceSubnet should be a valid CIDR
if err := validateSubnets(c.Networking.ServiceSubnet, isDualStack); err != nil { if err := validateSubnets(c.Networking.ServiceSubnet, c.Networking.IPFamily); err != nil {
errs = append(errs, errors.Errorf("invalid service subnet %v", err)) errs = append(errs, errors.Errorf("invalid service subnet %v", err))
} }
@ -140,7 +139,7 @@ func validatePort(port int32) error {
return nil return nil
} }
func validateSubnets(subnetStr string, dualstack bool) error { func validateSubnets(subnetStr string, ipFamily ClusterIPFamily) error {
allErrs := []error{} allErrs := []error{}
cidrsString := strings.Split(subnetStr, ",") cidrsString := strings.Split(subnetStr, ",")
@ -153,7 +152,11 @@ func validateSubnets(subnetStr string, dualstack bool) error {
subnets = append(subnets, cidr) subnets = append(subnets, cidr)
} }
dualstack := ipFamily == DualStackFamily
switch { switch {
// if no subnets are defined
case len(subnets) == 0:
allErrs = append(allErrs, errors.New("no subnets defined"))
// if DualStack only 2 CIDRs allowed // if DualStack only 2 CIDRs allowed
case dualstack && len(subnets) > 2: case dualstack && len(subnets) > 2:
allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking")) allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking"))
@ -168,6 +171,10 @@ func validateSubnets(subnetStr string, dualstack bool) error {
// if not DualStack only one CIDR allowed // if not DualStack only one CIDR allowed
case !dualstack && len(subnets) > 1: case !dualstack && len(subnets) > 1:
allErrs = append(allErrs, errors.New("only one CIDR allowed for single-stack networking")) allErrs = append(allErrs, errors.New("only one CIDR allowed for single-stack networking"))
case ipFamily == IPv4Family && subnets[0].IP.To4() == nil:
allErrs = append(allErrs, errors.New("expected IPv4 CIDR for IPv4 family"))
case ipFamily == IPv6Family && subnets[0].IP.To4() != nil:
allErrs = append(allErrs, errors.New("expected IPv6 CIDR for IPv6 family"))
} }
if len(allErrs) > 0 { if len(allErrs) > 0 {

View File

@ -1,3 +1,4 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*

25
vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package sets implements set types.
//
// This is forked from k8s.io/apimachinery/pkg/util/sets (under the same project
// and license), because k8s.io/apimachinery is a relatively heavy dependency
// and we only need some trivial utilities. Avoiding importing k8s.io/apimachinery
// makes kind easier to embed in other projects for testing etc.
//
// The set implementation is relatively small and very stable.
package sets

23
vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by set-gen. DO NOT EDIT.
package sets
// Empty is public since it is used by some internal API objects for conversions between external
// string arrays and internal sets, and conversion logic requires public types today.
type Empty struct{}

205
vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go generated vendored Normal file
View File

@ -0,0 +1,205 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by set-gen. DO NOT EDIT.
package sets
import (
"reflect"
"sort"
)
// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption.
type String map[string]Empty
// NewString creates a String from a list of values.
func NewString(items ...string) String {
ss := String{}
ss.Insert(items...)
return ss
}
// StringKeySet creates a String from a keys of a map[string](? extends interface{}).
// If the value passed in is not actually a map, this will panic.
func StringKeySet(theMap interface{}) String {
v := reflect.ValueOf(theMap)
ret := String{}
for _, keyValue := range v.MapKeys() {
ret.Insert(keyValue.Interface().(string))
}
return ret
}
// Insert adds items to the set.
func (s String) Insert(items ...string) String {
for _, item := range items {
s[item] = Empty{}
}
return s
}
// Delete removes all items from the set.
func (s String) Delete(items ...string) String {
for _, item := range items {
delete(s, item)
}
return s
}
// Has returns true if and only if item is contained in the set.
func (s String) Has(item string) bool {
_, contained := s[item]
return contained
}
// HasAll returns true if and only if all items are contained in the set.
func (s String) HasAll(items ...string) bool {
for _, item := range items {
if !s.Has(item) {
return false
}
}
return true
}
// HasAny returns true if any items are contained in the set.
func (s String) HasAny(items ...string) bool {
for _, item := range items {
if s.Has(item) {
return true
}
}
return false
}
// Difference returns a set of objects that are not in s2
// For example:
// s1 = {a1, a2, a3}
// s2 = {a1, a2, a4, a5}
// s1.Difference(s2) = {a3}
// s2.Difference(s1) = {a4, a5}
func (s String) Difference(s2 String) String {
result := NewString()
for key := range s {
if !s2.Has(key) {
result.Insert(key)
}
}
return result
}
// Union returns a new set which includes items in either s1 or s2.
// For example:
// s1 = {a1, a2}
// s2 = {a3, a4}
// s1.Union(s2) = {a1, a2, a3, a4}
// s2.Union(s1) = {a1, a2, a3, a4}
func (s1 String) Union(s2 String) String {
result := NewString()
for key := range s1 {
result.Insert(key)
}
for key := range s2 {
result.Insert(key)
}
return result
}
// Intersection returns a new set which includes the item in BOTH s1 and s2
// For example:
// s1 = {a1, a2}
// s2 = {a2, a3}
// s1.Intersection(s2) = {a2}
func (s1 String) Intersection(s2 String) String {
var walk, other String
result := NewString()
if s1.Len() < s2.Len() {
walk = s1
other = s2
} else {
walk = s2
other = s1
}
for key := range walk {
if other.Has(key) {
result.Insert(key)
}
}
return result
}
// IsSuperset returns true if and only if s1 is a superset of s2.
func (s1 String) IsSuperset(s2 String) bool {
for item := range s2 {
if !s1.Has(item) {
return false
}
}
return true
}
// Equal returns true if and only if s1 is equal (as a set) to s2.
// Two sets are equal if their membership is identical.
// (In practice, this means same elements, order doesn't matter)
func (s1 String) Equal(s2 String) bool {
return len(s1) == len(s2) && s1.IsSuperset(s2)
}
type sortableSliceOfString []string
func (s sortableSliceOfString) Len() int { return len(s) }
func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) }
func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
// List returns the contents as a sorted string slice.
func (s String) List() []string {
res := make(sortableSliceOfString, 0, len(s))
for key := range s {
res = append(res, key)
}
sort.Sort(res)
return []string(res)
}
// UnsortedList returns the slice with contents in random order.
func (s String) UnsortedList() []string {
res := make([]string, 0, len(s))
for key := range s {
res = append(res, key)
}
return res
}
// Returns a single element from the set.
func (s String) PopAny() (string, bool) {
for key := range s {
s.Delete(key)
return key, true
}
var zeroValue string
return zeroValue, false
}
// Len returns the size of the set.
func (s String) Len() int {
return len(s)
}
func lessString(lhs, rhs string) bool {
return lhs < rhs
}

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2016 The Kubernetes Authors. Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -15,4 +15,8 @@ limitations under the License.
*/ */
// Package version provides utilities for version number comparisons // Package version provides utilities for version number comparisons
package version // import "k8s.io/apimachinery/pkg/util/version" //
// This is forked from k8s.io/apimachinery/pkg/util/version to make
// kind easier to import (k8s.io/apimachinery/pkg/util/version is a stable,
// mature package with no externaldependencies within a large, heavy module)
package version