Merge pull request #690 from RainbowMango/pr_update_kind

Update kind version to v0.11.1 from 0.10.0
This commit is contained in:
karmada-bot 2021-09-02 14:56:58 +08:00 committed by GitHub
commit 7694ba2f7d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
40 changed files with 1353 additions and 279 deletions

4
go.mod
View File

@ -4,7 +4,7 @@ go 1.16
require (
github.com/distribution/distribution/v3 v3.0.0-20210507173845-9329f6a62b67
github.com/evanphx/json-patch/v5 v5.1.0
github.com/evanphx/json-patch/v5 v5.2.0
github.com/gogo/protobuf v1.3.2
github.com/google/uuid v1.1.2
github.com/kr/pretty v0.3.0
@ -27,7 +27,7 @@ require (
k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471
sigs.k8s.io/cluster-api v0.4.0
sigs.k8s.io/controller-runtime v0.9.5
sigs.k8s.io/kind v0.10.0
sigs.k8s.io/kind v0.11.1
sigs.k8s.io/mcs-api v0.1.0
)

13
go.sum
View File

@ -79,8 +79,9 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/alessio/shellescape v1.2.2 h1:8LnL+ncxhWT2TR00dfJRT25JWWrhkMZXneHVWnetDZg=
github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0=
github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
@ -214,8 +215,8 @@ github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/evanphx/json-patch/v5 v5.1.0 h1:B0aXl1o/1cP8NbviYiBMkcHBtUjIJ1/Ccg6b+SwCLQg=
github.com/evanphx/json-patch/v5 v5.1.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/evanphx/json-patch/v5 v5.2.0 h1:8ozOH5xxoMYDt5/u+yMTsVXydVCbTORFnOOoq2lumco=
github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
@ -989,7 +990,6 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200928205150-006507a75852/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1238,7 +1238,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
@ -1322,8 +1321,8 @@ sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapT
sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA=
sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI=
sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4=
sigs.k8s.io/kind v0.10.0 h1:Tm+QITIqdRd+efLOsxZHMAfLnr5K4e3/RH8MePspEXs=
sigs.k8s.io/kind v0.10.0/go.mod h1:fb32zUw7ewC47bPwLnwhf47wd/vADtv3c38KP7sjIlo=
sigs.k8s.io/kind v0.11.1 h1:pVzOkhUwMBrCB0Q/WllQDO3v14Y+o2V0tFgjTqIUjwA=
sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA=
sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY=
sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0=
sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo=

View File

@ -22,3 +22,7 @@ _testmain.go
*.exe
*.test
*.prof
.idea/
escargs

64
vendor/github.com/alessio/shellescape/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,64 @@
# run:
# # timeout for analysis, e.g. 30s, 5m, default is 1m
# timeout: 5m
linters:
disable-all: true
enable:
- bodyclose
- deadcode
- depguard
- dogsled
- goconst
- gocritic
- gofmt
- goimports
- golint
- gosec
- gosimple
- govet
- ineffassign
- interfacer
- maligned
- misspell
- prealloc
- scopelint
- staticcheck
- structcheck
- stylecheck
- typecheck
- unconvert
- unparam
- unused
- misspell
- wsl
issues:
exclude-rules:
- text: "Use of weak random number generator"
linters:
- gosec
- text: "comment on exported var"
linters:
- golint
- text: "don't use an underscore in package name"
linters:
- golint
- text: "ST1003:"
linters:
- stylecheck
# FIXME: Disabled until golangci-lint updates stylecheck with this fix:
# https://github.com/dominikh/go-tools/issues/389
- text: "ST1016:"
linters:
- stylecheck
linters-settings:
dogsled:
max-blank-identifiers: 3
maligned:
# print struct with more effective memory layout or not, false by default
suggest-new: true
run:
tests: false

33
vendor/github.com/alessio/shellescape/.goreleaser.yml generated vendored Normal file
View File

@ -0,0 +1,33 @@
# This is an example goreleaser.yaml file with some sane defaults.
# Make sure to check the documentation at http://goreleaser.com
before:
hooks:
# You may remove this if you don't use go modules.
- go mod download
# you may remove this if you don't need go generate
- go generate ./...
builds:
- env:
- CGO_ENABLED=0
main: ./cmd/escargs
goos:
- linux
- windows
- darwin
archives:
- replacements:
darwin: Darwin
linux: Linux
windows: Windows
386: i386
amd64: x86_64
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'

View File

@ -1,4 +0,0 @@
language: go
go:
- 1.14

View File

@ -0,0 +1,76 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at alessio@debian.org. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

View File

@ -1,7 +1,10 @@
[![GoDoc](https://godoc.org/github.com/alessio/shellescape?status.svg)](https://godoc.org/github.com/alessio/shellescape)
[![Travis-CI Status](https://api.travis-ci.org/alessio/shellescape.png?branch=master)](http://travis-ci.org/#!/alessio/shellescape)
![Build](https://github.com/alessio/shellescape/workflows/Build/badge.svg)
[![GoDoc](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/github.com/alessio/shellescape?tab=overview)
[![sourcegraph](https://sourcegraph.com/github.com/alessio/shellescape/-/badge.svg)](https://sourcegraph.com/github.com/alessio/shellescape)
[![codecov](https://codecov.io/gh/alessio/shellescape/branch/master/graph/badge.svg)](https://codecov.io/gh/alessio/shellescape)
[![Coverage](https://gocover.io/_badge/github.com/alessio/shellescape)](https://gocover.io/github.com/alessio/shellescape)
[![Coverage Status](https://coveralls.io/repos/github/alessio/shellescape/badge.svg?branch=master)](https://coveralls.io/github/alessio/shellescape?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/alessio/shellescape)](https://goreportcard.com/report/github.com/alessio/shellescape)
# shellescape
Escape arbitrary strings for safe use as command line arguments.
## Contents of the package
@ -11,8 +14,8 @@ shell-escaped copy of a string. This functionality could be helpful
in those cases where it is known that the output of a Go program will
be appended to/used in the context of shell programs' command line arguments.
This work was inspired by the Python original package [shellescape]
(https://pypi.python.org/pypi/shellescape).
This work was inspired by the Python original package
[shellescape](https://pypi.python.org/pypi/shellescape).
## Usage
@ -33,7 +36,7 @@ func main() {
_[See in Go Playground](https://play.golang.org/p/Wj2WoUfH_d)_
Especially when creating pipeline of commands which might end up being
executed by a shell interpreter, tt is particularly unsafe to not
executed by a shell interpreter, it is particularly unsafe to not
escape arguments.
`shellescape.Quote()` comes in handy and to safely escape strings:

View File

@ -17,6 +17,7 @@ be appended to/used in the context of shell programs' command line arguments.
import (
"regexp"
"strings"
"unicode"
)
var pattern *regexp.Regexp
@ -31,9 +32,35 @@ func Quote(s string) string {
if len(s) == 0 {
return "''"
}
if pattern.MatchString(s) {
return "'" + strings.Replace(s, "'", "'\"'\"'", -1) + "'"
return "'" + strings.ReplaceAll(s, "'", "'\"'\"'") + "'"
}
return s
}
// QuoteCommand returns a shell-escaped version of the slice of strings.
// The returned value is a string that can safely be used as shell command arguments.
func QuoteCommand(args []string) string {
l := make([]string, len(args))
for i, s := range args {
l[i] = Quote(s)
}
return strings.Join(l, " ")
}
// StripUnsafe remove non-printable runes, e.g. control characters in
// a string that is meant for consumption by terminals that support
// control characters.
func StripUnsafe(s string) string {
return strings.Map(func(r rune) rune {
if unicode.IsPrint(r) {
return r
}
return -1
}, s)
}

View File

@ -6,7 +6,7 @@ modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Evan Phoenix nor the names of its contributors

View File

@ -27,21 +27,31 @@ func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
}
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
for k, v := range *patch {
for k, v := range patch.obj {
if v == nil {
if mergeMerge {
(*doc)[k] = nil
idx := -1
for i, key := range doc.keys {
if key == k {
idx = i
break
}
}
if idx == -1 {
doc.keys = append(doc.keys, k)
}
doc.obj[k] = nil
} else {
delete(*doc, k)
_ = doc.remove(k, &ApplyOptions{})
}
} else {
cur, ok := (*doc)[k]
cur, ok := doc.obj[k]
if !ok || cur == nil {
pruneNulls(v)
(*doc)[k] = v
_ = doc.set(k, v, &ApplyOptions{})
} else {
(*doc)[k] = merge(cur, v, mergeMerge)
_ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{})
}
}
}
@ -62,9 +72,9 @@ func pruneNulls(n *lazyNode) {
}
func pruneDocNulls(doc *partialDoc) *partialDoc {
for k, v := range *doc {
for k, v := range doc.obj {
if v == nil {
delete(*doc, k)
_ = doc.remove(k, &ApplyOptions{})
} else {
pruneNulls(v)
}
@ -113,19 +123,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
patchErr := json.Unmarshal(patchData, patch)
if _, ok := docErr.(*json.SyntaxError); ok {
if isSyntaxError(docErr) {
return nil, errBadJSONDoc
}
if _, ok := patchErr.(*json.SyntaxError); ok {
if isSyntaxError(patchErr) {
return nil, errBadJSONPatch
}
if docErr == nil && *doc == nil {
if docErr == nil && doc.obj == nil {
return nil, errBadJSONDoc
}
if patchErr == nil && *patch == nil {
if patchErr == nil && patch.obj == nil {
return nil, errBadJSONPatch
}
@ -162,6 +172,16 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
return json.Marshal(doc)
}
func isSyntaxError(err error) bool {
if _, ok := err.(*json.SyntaxError); ok {
return true
}
if _, ok := err.(*syntaxError); ok {
return true
}
return false
}
// resemblesJSONArray indicates whether the byte-slice "appears" to be
// a JSON array or not.
// False-positives are possible, as this function does not check the internal

View File

@ -24,6 +24,10 @@ var (
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64 = 0
startObject = json.Delim('{')
endObject = json.Delim('}')
startArray = json.Delim('[')
endArray = json.Delim(']')
)
var (
@ -32,11 +36,15 @@ var (
ErrUnknownType = errors.New("unknown object type")
ErrInvalid = errors.New("invalid state detected")
ErrInvalidIndex = errors.New("invalid index referenced")
rawJSONArray = []byte("[]")
rawJSONObject = []byte("{}")
rawJSONNull = []byte("null")
)
type lazyNode struct {
raw *json.RawMessage
doc partialDoc
doc *partialDoc
ary partialArray
which int
}
@ -47,20 +55,58 @@ type Operation map[string]*json.RawMessage
// Patch is an ordered collection of Operations.
type Patch []Operation
type partialDoc map[string]*lazyNode
type partialDoc struct {
keys []string
obj map[string]*lazyNode
}
type partialArray []*lazyNode
type container interface {
get(key string) (*lazyNode, error)
set(key string, val *lazyNode) error
add(key string, val *lazyNode) error
remove(key string) error
get(key string, options *ApplyOptions) (*lazyNode, error)
set(key string, val *lazyNode, options *ApplyOptions) error
add(key string, val *lazyNode, options *ApplyOptions) error
remove(key string, options *ApplyOptions) error
}
// ApplyOptions specifies options for calls to ApplyWithOptions.
// Use NewApplyOptions to obtain default values for ApplyOptions.
type ApplyOptions struct {
// SupportNegativeIndices decides whether to support non-standard practice of
// allowing negative indices to mean indices starting at the end of an array.
// Default to true.
SupportNegativeIndices bool
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64
// AllowMissingPathOnRemove indicates whether to fail "remove" operations when the target path is missing.
// Default to false.
AllowMissingPathOnRemove bool
// EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation.
// Default to false.
EnsurePathExistsOnAdd bool
}
// NewApplyOptions creates a default set of options for calls to ApplyWithOptions.
func NewApplyOptions() *ApplyOptions {
return &ApplyOptions{
SupportNegativeIndices: SupportNegativeIndices,
AccumulatedCopySizeLimit: AccumulatedCopySizeLimit,
AllowMissingPathOnRemove: false,
EnsurePathExistsOnAdd: false,
}
}
func newLazyNode(raw *json.RawMessage) *lazyNode {
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
}
func newRawMessage(buf []byte) *json.RawMessage {
ra := make(json.RawMessage, len(buf))
copy(ra, buf)
return &ra
}
func (n *lazyNode) MarshalJSON() ([]byte, error) {
switch n.which {
case eRaw:
@ -82,6 +128,109 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
return nil
}
func (n *partialDoc) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
if _, err := buf.WriteString("{"); err != nil {
return nil, err
}
for i, k := range n.keys {
if i > 0 {
if _, err := buf.WriteString(", "); err != nil {
return nil, err
}
}
key, err := json.Marshal(k)
if err != nil {
return nil, err
}
if _, err := buf.Write(key); err != nil {
return nil, err
}
if _, err := buf.WriteString(": "); err != nil {
return nil, err
}
value, err := json.Marshal(n.obj[k])
if err != nil {
return nil, err
}
if _, err := buf.Write(value); err != nil {
return nil, err
}
}
if _, err := buf.WriteString("}"); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
type syntaxError struct {
msg string
}
func (err *syntaxError) Error() string {
return err.msg
}
func (n *partialDoc) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &n.obj); err != nil {
return err
}
buffer := bytes.NewBuffer(data)
d := json.NewDecoder(buffer)
if t, err := d.Token(); err != nil {
return err
} else if t != startObject {
return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %s", t)}
}
for d.More() {
k, err := d.Token()
if err != nil {
return err
}
key, ok := k.(string)
if !ok {
return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)}
}
if err := skipValue(d); err != nil {
return err
}
n.keys = append(n.keys, key)
}
return nil
}
func skipValue(d *json.Decoder) error {
t, err := d.Token()
if err != nil {
return err
}
if t != startObject && t != startArray {
return nil
}
for d.More() {
if t == startObject {
// consume key token
if _, err := d.Token(); err != nil {
return err
}
}
if err := skipValue(d); err != nil {
return err
}
}
end, err := d.Token()
if err != nil {
return err
}
if t == startObject && end != endObject {
return &syntaxError{msg: "expected close object token"}
}
if t == startArray && end != endArray {
return &syntaxError{msg: "expected close object token"}
}
return nil
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
@ -91,14 +240,12 @@ func deepCopy(src *lazyNode) (*lazyNode, int, error) {
return nil, 0, err
}
sz := len(a)
ra := make(json.RawMessage, sz)
copy(ra, a)
return newLazyNode(&ra), sz, nil
return newLazyNode(newRawMessage(a)), sz, nil
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
if n.which == eDoc {
return &n.doc, nil
return n.doc, nil
}
if n.raw == nil {
@ -112,7 +259,7 @@ func (n *lazyNode) intoDoc() (*partialDoc, error) {
}
n.which = eDoc
return &n.doc, nil
return n.doc, nil
}
func (n *lazyNode) intoAry() (*partialArray, error) {
@ -202,12 +349,12 @@ func (n *lazyNode) equal(o *lazyNode) bool {
return false
}
if len(n.doc) != len(o.doc) {
if len(n.doc.obj) != len(o.doc.obj) {
return false
}
for k, v := range n.doc {
ov, ok := o.doc[k]
for k, v := range n.doc.obj {
ov, ok := o.doc.obj[k]
if !ok {
return false
@ -340,7 +487,7 @@ Loop:
return false
}
func findObject(pd *container, path string) (container, string) {
func findObject(pd *container, path string, options *ApplyOptions) (container, string) {
doc := *pd
split := strings.Split(path, "/")
@ -357,7 +504,7 @@ func findObject(pd *container, path string) (container, string) {
for _, part := range parts {
next, ok := doc.get(decodePatchKey(part))
next, ok := doc.get(decodePatchKey(part), options)
if next == nil || ok != nil {
return nil, ""
@ -381,46 +528,76 @@ func findObject(pd *container, path string) (container, string) {
return doc, decodePatchKey(key)
}
func (d *partialDoc) set(key string, val *lazyNode) error {
(*d)[key] = val
func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error {
found := false
for _, k := range d.keys {
if k == key {
found = true
break
}
}
if !found {
d.keys = append(d.keys, key)
}
d.obj[key] = val
return nil
}
func (d *partialDoc) add(key string, val *lazyNode) error {
(*d)[key] = val
return nil
func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error {
return d.set(key, val, options)
}
func (d *partialDoc) get(key string) (*lazyNode, error) {
v, ok := (*d)[key]
func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) {
v, ok := d.obj[key]
if !ok {
return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key)
}
return v, nil
}
func (d *partialDoc) remove(key string) error {
_, ok := (*d)[key]
func (d *partialDoc) remove(key string, options *ApplyOptions) error {
_, ok := d.obj[key]
if !ok {
if options.AllowMissingPathOnRemove {
return nil
}
return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key)
}
delete(*d, key)
idx := -1
for i, k := range d.keys {
if k == key {
idx = i
break
}
}
d.keys = append(d.keys[0:idx], d.keys[idx+1:]...)
delete(d.obj, key)
return nil
}
// set should only be used to implement the "replace" operation, so "key" must
// be an already existing index in "d".
func (d *partialArray) set(key string, val *lazyNode) error {
func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) error {
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
if idx < 0 {
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(*d) {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(*d)
}
(*d)[idx] = val
return nil
}
func (d *partialArray) add(key string, val *lazyNode) error {
func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error {
if key == "-" {
*d = append(*d, val)
return nil
@ -442,7 +619,7 @@ func (d *partialArray) add(key string, val *lazyNode) error {
}
if idx < 0 {
if !SupportNegativeIndices {
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(ary) {
@ -459,13 +636,23 @@ func (d *partialArray) add(key string, val *lazyNode) error {
return nil
}
func (d *partialArray) get(key string) (*lazyNode, error) {
func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) {
idx, err := strconv.Atoi(key)
if err != nil {
return nil, err
}
if idx < 0 {
if !options.SupportNegativeIndices {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(*d) {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(*d)
}
if idx >= len(*d) {
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
@ -473,7 +660,7 @@ func (d *partialArray) get(key string) (*lazyNode, error) {
return (*d)[idx], nil
}
func (d *partialArray) remove(key string) error {
func (d *partialArray) remove(key string, options *ApplyOptions) error {
idx, err := strconv.Atoi(key)
if err != nil {
return err
@ -482,14 +669,20 @@ func (d *partialArray) remove(key string) error {
cur := *d
if idx >= len(cur) {
if options.AllowMissingPathOnRemove {
return nil
}
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < 0 {
if !SupportNegativeIndices {
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
if idx < -len(cur) {
if options.AllowMissingPathOnRemove {
return nil
}
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
}
idx += len(cur)
@ -502,22 +695,29 @@ func (d *partialArray) remove(key string) error {
*d = ary
return nil
}
func (p Patch) add(doc *container, op Operation) error {
func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error {
path, err := op.Path()
if err != nil {
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
}
con, key := findObject(doc, path)
if options.EnsurePathExistsOnAdd {
err = ensurePathExists(doc, path, options)
if err != nil {
return err
}
}
con, key := findObject(doc, path, options)
if con == nil {
return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
}
err = con.add(key, op.value())
err = con.add(key, op.value(), options)
if err != nil {
return errors.Wrapf(err, "error in add for path: '%s'", path)
}
@ -525,19 +725,113 @@ func (p Patch) add(doc *container, op Operation) error {
return nil
}
func (p Patch) remove(doc *container, op Operation) error {
// Given a document and a path to a key, walk the path and create all missing elements
// creating objects and arrays as needed.
func ensurePathExists(pd *container, path string, options *ApplyOptions) error {
doc := *pd
var err error
var arrIndex int
split := strings.Split(path, "/")
if len(split) < 2 {
return nil
}
parts := split[1:]
for pi, part := range parts {
// Have we reached the key part of the path?
// If yes, we're done.
if pi == len(parts)-1 {
return nil
}
target, ok := doc.get(decodePatchKey(part), options)
if target == nil || ok != nil {
// If the current container is an array which has fewer elements than our target index,
// pad the current container with nulls.
if arrIndex, err = strconv.Atoi(part); err == nil {
pa, ok := doc.(*partialArray)
if ok && arrIndex >= len(*pa)+1 {
// Pad the array with null values up to the required index.
for i := len(*pa); i <= arrIndex-1; i++ {
doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
}
}
}
// Check if the next part is a numeric index.
// If yes, then create an array, otherwise, create an object.
if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil {
if arrIndex < 0 {
if !options.SupportNegativeIndices {
return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for invalid index: %d", arrIndex)
}
if arrIndex < -1 {
return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for negative index other than -1: %d", arrIndex)
}
arrIndex = 0
}
newNode := newLazyNode(newRawMessage(rawJSONArray))
doc.add(part, newNode, options)
doc, _ = newNode.intoAry()
// Pad the new array with null values up to the required index.
for i := 0; i < arrIndex; i++ {
doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options)
}
} else {
newNode := newLazyNode(newRawMessage(rawJSONObject))
doc.add(part, newNode, options)
doc, _ = newNode.intoDoc()
}
} else {
if isArray(*target.raw) {
doc, err = target.intoAry()
if err != nil {
return err
}
} else {
doc, err = target.intoDoc()
if err != nil {
return err
}
}
}
}
return nil
}
func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error {
path, err := op.Path()
if err != nil {
return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
}
con, key := findObject(doc, path)
con, key := findObject(doc, path, options)
if con == nil {
if options.AllowMissingPathOnRemove {
return nil
}
return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
}
err = con.remove(key)
err = con.remove(key, options)
if err != nil {
return errors.Wrapf(err, "error in remove for path: '%s'", path)
}
@ -545,24 +839,24 @@ func (p Patch) remove(doc *container, op Operation) error {
return nil
}
func (p Patch) replace(doc *container, op Operation) error {
func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) error {
path, err := op.Path()
if err != nil {
return errors.Wrapf(err, "replace operation failed to decode path")
}
con, key := findObject(doc, path)
con, key := findObject(doc, path, options)
if con == nil {
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
}
_, ok := con.get(key)
_, ok := con.get(key, options)
if ok != nil {
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
}
err = con.set(key, op.value())
err = con.set(key, op.value(), options)
if err != nil {
return errors.Wrapf(err, "error in remove for path: '%s'", path)
}
@ -570,24 +864,24 @@ func (p Patch) replace(doc *container, op Operation) error {
return nil
}
func (p Patch) move(doc *container, op Operation) error {
func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error {
from, err := op.From()
if err != nil {
return errors.Wrapf(err, "move operation failed to decode from")
}
con, key := findObject(doc, from)
con, key := findObject(doc, from, options)
if con == nil {
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
}
val, err := con.get(key)
val, err := con.get(key, options)
if err != nil {
return errors.Wrapf(err, "error in move for path: '%s'", key)
}
err = con.remove(key)
err = con.remove(key, options)
if err != nil {
return errors.Wrapf(err, "error in move for path: '%s'", key)
}
@ -597,13 +891,13 @@ func (p Patch) move(doc *container, op Operation) error {
return errors.Wrapf(err, "move operation failed to decode path")
}
con, key = findObject(doc, path)
con, key = findObject(doc, path, options)
if con == nil {
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
}
err = con.add(key, val)
err = con.add(key, val, options)
if err != nil {
return errors.Wrapf(err, "error in move for path: '%s'", path)
}
@ -611,19 +905,19 @@ func (p Patch) move(doc *container, op Operation) error {
return nil
}
func (p Patch) test(doc *container, op Operation) error {
func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error {
path, err := op.Path()
if err != nil {
return errors.Wrapf(err, "test operation failed to decode path")
}
con, key := findObject(doc, path)
con, key := findObject(doc, path, options)
if con == nil {
return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
}
val, err := con.get(key)
val, err := con.get(key, options)
if err != nil && errors.Cause(err) != ErrMissing {
return errors.Wrapf(err, "error in test for path: '%s'", path)
}
@ -644,19 +938,19 @@ func (p Patch) test(doc *container, op Operation) error {
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
}
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, options *ApplyOptions) error {
from, err := op.From()
if err != nil {
return errors.Wrapf(err, "copy operation failed to decode from")
}
con, key := findObject(doc, from)
con, key := findObject(doc, from, options)
if con == nil {
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
}
val, err := con.get(key)
val, err := con.get(key, options)
if err != nil {
return errors.Wrapf(err, "error in copy for from: '%s'", from)
}
@ -666,7 +960,7 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er
return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
}
con, key = findObject(doc, path)
con, key = findObject(doc, path, options)
if con == nil {
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
@ -678,11 +972,11 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er
}
(*accumulatedCopySize) += int64(sz)
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
if options.AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > options.AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(options.AccumulatedCopySizeLimit, *accumulatedCopySize)
}
err = con.add(key, valCopy)
err = con.add(key, valCopy, options)
if err != nil {
return errors.Wrapf(err, "error while adding value during copy")
}
@ -692,13 +986,8 @@ func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) er
// Equal indicates if 2 JSON documents have the same structural equality.
func Equal(a, b []byte) bool {
ra := make(json.RawMessage, len(a))
copy(ra, a)
la := newLazyNode(&ra)
rb := make(json.RawMessage, len(b))
copy(rb, b)
lb := newLazyNode(&rb)
la := newLazyNode(newRawMessage(a))
lb := newLazyNode(newRawMessage(b))
return la.equal(lb)
}
@ -719,12 +1008,24 @@ func DecodePatch(buf []byte) (Patch, error) {
// Apply mutates a JSON document according to the patch, and returns the new
// document.
func (p Patch) Apply(doc []byte) ([]byte, error) {
return p.ApplyIndent(doc, "")
return p.ApplyWithOptions(doc, NewApplyOptions())
}
// ApplyWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions.
// It returns the new document.
func (p Patch) ApplyWithOptions(doc []byte, options *ApplyOptions) ([]byte, error) {
return p.ApplyIndentWithOptions(doc, "", options)
}
// ApplyIndent mutates a JSON document according to the patch, and returns the new
// document indented.
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
return p.ApplyIndentWithOptions(doc, indent, NewApplyOptions())
}
// ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions.
// It returns the new document indented.
func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) {
var pd container
if doc[0] == '[' {
pd = &partialArray{}
@ -745,17 +1046,17 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
for _, op := range p {
switch op.Kind() {
case "add":
err = p.add(&pd, op)
err = p.add(&pd, op, options)
case "remove":
err = p.remove(&pd, op)
err = p.remove(&pd, op, options)
case "replace":
err = p.replace(&pd, op)
err = p.replace(&pd, op, options)
case "move":
err = p.move(&pd, op)
err = p.move(&pd, op, options)
case "test":
err = p.test(&pd, op)
err = p.test(&pd, op, options)
case "copy":
err = p.copy(&pd, op, &accumulatedCopySize)
err = p.copy(&pd, op, &accumulatedCopySize, options)
default:
err = fmt.Errorf("Unexpected kind: %s", op.Kind())
}

6
vendor/modules.txt vendored
View File

@ -6,7 +6,7 @@ github.com/NYTimes/gziphandler
github.com/PuerkitoBio/purell
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
github.com/PuerkitoBio/urlesc
# github.com/alessio/shellescape v1.2.2
# github.com/alessio/shellescape v1.4.1
github.com/alessio/shellescape
# github.com/beorn7/perks v1.0.1
github.com/beorn7/perks/quantile
@ -32,7 +32,7 @@ github.com/emicklei/go-restful
github.com/emicklei/go-restful/log
# github.com/evanphx/json-patch v4.11.0+incompatible
github.com/evanphx/json-patch
# github.com/evanphx/json-patch/v5 v5.1.0
# github.com/evanphx/json-patch/v5 v5.2.0
## explicit
github.com/evanphx/json-patch/v5
# github.com/fsnotify/fsnotify v1.4.9
@ -992,7 +992,7 @@ sigs.k8s.io/controller-runtime/pkg/webhook
sigs.k8s.io/controller-runtime/pkg/webhook/admission
sigs.k8s.io/controller-runtime/pkg/webhook/conversion
sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics
# sigs.k8s.io/kind v0.10.0
# sigs.k8s.io/kind v0.11.1
## explicit
sigs.k8s.io/kind/pkg/apis/config/defaults
sigs.k8s.io/kind/pkg/apis/config/v1alpha4

View File

@ -18,4 +18,4 @@ limitations under the License.
package defaults
// Image is the default for the Config.Image field, aka the default node image.
const Image = "kindest/node:v1.20.2@sha256:8f7ea6e7642c0da54f04a7ee10431549c0257315b3a634f6ef2fecaaedb19bab"
const Image = "kindest/node:v1.21.1@sha256:69860bda5563ac81e3c0057d654b5253219618a22ec3a346306239bba8cfa1a6"

View File

@ -37,24 +37,27 @@ func SetDefaultsCluster(obj *Cluster) {
SetDefaultsNode(a)
}
if obj.Networking.IPFamily == "" {
obj.Networking.IPFamily = "ipv4"
obj.Networking.IPFamily = IPv4Family
}
// default to listening on 127.0.0.1:randomPort on ipv4
// and [::1]:randomPort on ipv6
if obj.Networking.APIServerAddress == "" {
obj.Networking.APIServerAddress = "127.0.0.1"
if obj.Networking.IPFamily == "ipv6" {
if obj.Networking.IPFamily == IPv6Family {
obj.Networking.APIServerAddress = "::1"
}
}
// default the pod CIDR
if obj.Networking.PodSubnet == "" {
obj.Networking.PodSubnet = "10.244.0.0/16"
if obj.Networking.IPFamily == "ipv6" {
if obj.Networking.IPFamily == IPv6Family {
// node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices
// xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else-
obj.Networking.PodSubnet = "fd00:10:244::/56"
}
if obj.Networking.IPFamily == DualStackFamily {
obj.Networking.PodSubnet = "10.244.0.0/16,fd00:10:244::/56"
}
}
// default the service CIDR using a different subnet than kubeadm default
// https://github.com/kubernetes/kubernetes/blob/746404f82a28e55e0b76ffa7e40306fb88eb3317/cmd/kubeadm/app/apis/kubeadm/v1beta2/defaults.go#L32
@ -62,13 +65,16 @@ func SetDefaultsCluster(obj *Cluster) {
// we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services)
if obj.Networking.ServiceSubnet == "" {
obj.Networking.ServiceSubnet = "10.96.0.0/16"
if obj.Networking.IPFamily == "ipv6" {
if obj.Networking.IPFamily == IPv6Family {
obj.Networking.ServiceSubnet = "fd00:10:96::/112"
}
if obj.Networking.IPFamily == DualStackFamily {
obj.Networking.ServiceSubnet = "10.96.0.0/16,fd00:10:96::/112"
}
}
// default the KubeProxyMode using iptables as it's already the default
if obj.Networking.KubeProxyMode == "" {
obj.Networking.KubeProxyMode = IPTablesMode
obj.Networking.KubeProxyMode = IPTablesProxyMode
}
}

View File

@ -104,6 +104,9 @@ type Node struct {
// If unset a default image will be used, see defaults.Image
Image string `yaml:"image,omitempty"`
// Labels are the labels with which the respective node will be labeled
Labels map[string]string `yaml:"labels,omitempty"`
/* Advanced fields */
// TODO: cri-like types should be inline instead
@ -196,16 +199,18 @@ const (
IPv4Family ClusterIPFamily = "ipv4"
// IPv6Family sets ClusterIPFamily to ipv6
IPv6Family ClusterIPFamily = "ipv6"
// DualStackFamily sets ClusterIPFamily to dual
DualStackFamily ClusterIPFamily = "dual"
)
// ProxyMode defines a proxy mode for kube-proxy
type ProxyMode string
const (
// IPTablesMode sets ProxyMode to iptables
IPTablesMode ProxyMode = "iptables"
// IPVSMode sets ProxyMode to iptables
IPVSMode ProxyMode = "ipvs"
// IPTablesProxyMode sets ProxyMode to iptables
IPTablesProxyMode ProxyMode = "iptables"
// IPVSProxyMode sets ProxyMode to ipvs
IPVSProxyMode ProxyMode = "ipvs"
)
// PatchJSON6902 represents an inline kustomize json 6902 patch

View File

@ -114,6 +114,13 @@ func (in *Networking) DeepCopy() *Networking {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Node) DeepCopyInto(out *Node) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExtraMounts != nil {
in, out := &in.ExtraMounts, &out.ExtraMounts
*out = make([]Mount, len(*in))

View File

@ -48,6 +48,11 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
ctx.Status.Start("Writing configuration 📜")
defer ctx.Status.End(false)
providerInfo, err := ctx.Provider.Info()
if err != nil {
return err
}
allNodes, err := ctx.Nodes()
if err != nil {
return err
@ -73,9 +78,10 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
KubeProxyMode: string(ctx.Config.Networking.KubeProxyMode),
ServiceSubnet: ctx.Config.Networking.ServiceSubnet,
ControlPlane: true,
IPv6: ctx.Config.Networking.IPFamily == "ipv6",
IPFamily: ctx.Config.Networking.IPFamily,
FeatureGates: ctx.Config.FeatureGates,
RuntimeConfig: ctx.Config.RuntimeConfig,
RootlessProvider: providerInfo.Rootless,
}
kubeadmConfigPlusPatches := func(node nodes.Node, data kubeadm.ConfigData) func() error {
@ -92,15 +98,42 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
}
}
// Populate the list of control-plane node labels and the list of worker node labels respectively.
// controlPlaneLabels is an array of maps (labels, read from config) associated with all the control-plane nodes.
// workerLabels is an array of maps (labels, read from config) associated with all the worker nodes.
controlPlaneLabels := []map[string]string{}
workerLabels := []map[string]string{}
for _, node := range ctx.Config.Nodes {
if node.Role == config.ControlPlaneRole {
controlPlaneLabels = append(controlPlaneLabels, node.Labels)
} else if node.Role == config.WorkerRole {
workerLabels = append(workerLabels, node.Labels)
} else {
continue
}
}
// hashMapLabelsToCommaSeparatedLabels converts labels in hashmap form to labels in a comma-separated string form like "key1=value1,key2=value2"
hashMapLabelsToCommaSeparatedLabels := func(labels map[string]string) string {
output := ""
for key, value := range labels {
output += fmt.Sprintf("%s=%s,", key, value)
}
return strings.TrimSuffix(output, ",") // remove the last character (comma) in the output string
}
// create the kubeadm join configuration for control plane nodes
controlPlanes, err := nodeutils.ControlPlaneNodes(allNodes)
if err != nil {
return err
}
for _, node := range controlPlanes {
for i, node := range controlPlanes {
node := node // capture loop variable
configData := configData // copy config data
if len(controlPlaneLabels[i]) > 0 {
configData.NodeLabels = hashMapLabelsToCommaSeparatedLabels(controlPlaneLabels[i]) // updating the config with the respective labels to be written over the current control-plane node in consideration
}
fns = append(fns, kubeadmConfigPlusPatches(node, configData))
}
@ -111,10 +144,13 @@ func (a *Action) Execute(ctx *actions.ActionContext) error {
}
if len(workers) > 0 {
// create the workers concurrently
for _, node := range workers {
for i, node := range workers {
node := node // capture loop variable
configData := configData // copy config data
configData.ControlPlane = false
if len(workerLabels[i]) > 0 {
configData.NodeLabels = hashMapLabelsToCommaSeparatedLabels(workerLabels[i]) // updating the config with the respective labels to be written over the current worker node in consideration
}
fns = append(fns, kubeadmConfigPlusPatches(node, configData))
}
}
@ -201,11 +237,14 @@ func getKubeadmConfig(cfg *config.Cluster, data kubeadm.ConfigData, node nodes.N
data.NodeAddress = nodeAddress
// configure the right protocol addresses
if cfg.Networking.IPFamily == "ipv6" {
if cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily {
if ip := net.ParseIP(nodeAddressIPv6); ip.To16() == nil {
return "", errors.Errorf("failed to get IPv6 address for node %s; is %s configured to use IPv6 correctly?", node.String(), provider)
}
data.NodeAddress = nodeAddressIPv6
if cfg.Networking.IPFamily == config.DualStackFamily {
data.NodeAddress = fmt.Sprintf("%s,%s", nodeAddress, nodeAddressIPv6)
}
}
// generate the config contents

View File

@ -23,8 +23,10 @@ import (
"text/template"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/internal/apis/config"
"sigs.k8s.io/kind/pkg/cluster/internal/create/actions"
"sigs.k8s.io/kind/pkg/cluster/internal/patch"
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
)
@ -83,6 +85,40 @@ func (a *action) Execute(ctx *actions.ActionContext) error {
manifest = out.String()
}
// NOTE: this is intentionally undocumented, as an internal implementation
// detail. Going forward users should disable the default CNI and install
// their own, or use the default. The internal templating mechanism is
// not intended for external usage and is unstable.
if strings.Contains(manifest, "would you kindly patch this file") {
// Add the controlplane endpoint so kindnet doesn´t have to wait for kube-proxy
controlPlaneEndpoint, err := ctx.Provider.GetAPIServerInternalEndpoint(ctx.Config.Name)
if err != nil {
return err
}
patchValue := `
- op: add
path: /spec/template/spec/containers/0/env/-
value:
name: CONTROL_PLANE_ENDPOINT
value: ` + controlPlaneEndpoint
controlPlanePatch6902 := config.PatchJSON6902{
Group: "apps",
Version: "v1",
Kind: "DaemonSet",
Patch: patchValue,
}
patchedConfig, err := patch.KubeYAML(manifest, nil, []config.PatchJSON6902{controlPlanePatch6902})
if err != nil {
return err
}
manifest = patchedConfig
}
ctx.Logger.V(5).Infof("Using the following Kindnetd config:\n%s", manifest)
// install the manifest
if err := node.Command(
"kubectl", "create", "--kubeconfig=/etc/kubernetes/admin.conf",

View File

@ -26,16 +26,19 @@ import (
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
"sigs.k8s.io/kind/pkg/cluster/internal/create/actions"
"sigs.k8s.io/kind/pkg/internal/apis/config"
)
// kubeadmInitAction implements action for executing the kubeadm init
// and a set of default post init operations like e.g. install the
// CNI network plugin.
type action struct{}
type action struct {
skipKubeProxy bool
}
// NewAction returns a new action for kubeadm init
func NewAction() actions.Action {
return &action{}
func NewAction(cfg *config.Cluster) actions.Action {
return &action{skipKubeProxy: cfg.Networking.KubeProxyMode == config.NoneProxyMode}
}
// Execute runs the action
@ -56,13 +59,18 @@ func (a *action) Execute(ctx *actions.ActionContext) error {
return err
}
// skip preflight checks, as these have undesirable side effects
// and don't tell us much. requires kubeadm 1.13+
skipPhases := "preflight"
if a.skipKubeProxy {
skipPhases += ",addon/kube-proxy"
}
// run kubeadm
cmd := node.Command(
// init because this is the control plane node
"kubeadm", "init",
// skip preflight checks, as these have undesirable side effects
// and don't tell us much. requires kubeadm 1.13+
"--skip-phases=preflight",
"--skip-phases="+skipPhases,
// specify our generated config file
"--config=/kind/kubeadm.conf",
"--skip-token-print",

View File

@ -19,7 +19,6 @@ package create
import (
"fmt"
"math/rand"
"regexp"
"time"
"github.com/alessio/shellescape"
@ -49,12 +48,6 @@ const (
clusterNameMax = 50
)
// similar to valid docker container names, but since we will prefix
// and suffix this name, we can relax it a little
// see NewContext() for usage
// https://godoc.org/github.com/docker/docker/daemon/names#pkg-constants
var validNameRE = regexp.MustCompile(`^[a-z0-9_.-]+$`)
// ClusterOptions holds cluster creation options
type ClusterOptions struct {
Config *config.Cluster
@ -73,6 +66,11 @@ type ClusterOptions struct {
// Cluster creates a cluster
func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) error {
// validate provider first
if err := validateProvider(p); err != nil {
return err
}
// default / process options (namely config)
if err := fixupOptions(opts); err != nil {
return err
@ -83,14 +81,6 @@ func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) erro
return err
}
// TODO: move to config validation
// validate the name
if !validNameRE.MatchString(opts.Config.Name) {
return errors.Errorf(
"'%s' is not a valid cluster name, cluster names must match `%s`",
opts.Config.Name, validNameRE.String(),
)
}
// warn if cluster name might typically be too long
if len(opts.Config.Name) > clusterNameMax {
logger.Warnf("cluster name %q is probably too long, this might not work properly on some systems", opts.Config.Name)
@ -123,7 +113,7 @@ func Cluster(logger log.Logger, p providers.Provider, opts *ClusterOptions) erro
}
if !opts.StopBeforeSettingUpKubernetes {
actionsToRun = append(actionsToRun,
kubeadminit.NewAction(), // run kubeadm init
kubeadminit.NewAction(opts.Config), // run kubeadm init
)
// this step might be skipped, but is next after init
if !opts.Config.Networking.DisableDefaultCNI {
@ -249,3 +239,19 @@ func fixupOptions(opts *ClusterOptions) error {
return nil
}
func validateProvider(p providers.Provider) error {
info, err := p.Info()
if err != nil {
return err
}
if info.Rootless {
if !info.Cgroup2 {
return errors.New("running kind with rootless provider requires cgroup v2, see https://kind.sigs.k8s.io/docs/user/rootless/")
}
if !info.SupportsMemoryLimit || !info.SupportsPidsLimit || !info.SupportsCPUShares {
return errors.New("running kind with rootless provider requires setting systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/")
}
}
return nil
}

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/internal/apis/config"
)
// ConfigData is supplied to the kubeadm config template, with values populated
@ -46,7 +47,7 @@ type ConfigData struct {
// ControlPlane flag specifies the node belongs to the control plane
ControlPlane bool
// The main IP address of the node
// The IP address or comma separated list IP addresses of of the node
NodeAddress string
// The name for the node (not the address)
NodeName string
@ -67,18 +68,27 @@ type ConfigData struct {
// Kubernetes API Server RuntimeConfig
RuntimeConfig map[string]string
// IPv4 values take precedence over IPv6 by default, if true set IPv6 default values
IPv6 bool
// IPFamily of the cluster, it can be IPv4, IPv6 or DualStack
IPFamily config.ClusterIPFamily
// Labels are the labels, in the format "key1=val1,key2=val2", with which the respective node will be labeled
NodeLabels string
// DerivedConfigData is populated by Derive()
// These auto-generated fields are available to Config templates,
// but not meant to be set by hand
DerivedConfigData
// Provider is running with rootless mode, so kube-proxy needs to be configured
// not to fail on sysctl error.
RootlessProvider bool
}
// DerivedConfigData fields are automatically derived by
// ConfigData.Derive if they are not specified / zero valued
type DerivedConfigData struct {
// AdvertiseAddress is the first address in NodeAddress
AdvertiseAddress string
// DockerStableTag is automatically derived from KubernetesVersion
DockerStableTag string
// SortedFeatureGateKeys allows us to iterate FeatureGates deterministically
@ -87,14 +97,24 @@ type DerivedConfigData struct {
FeatureGatesString string
// RuntimeConfigString is of the form `Foo=true,Baz=false`
RuntimeConfigString string
// KubeadmFeatureGates contains Kubeadm only feature gates
KubeadmFeatureGates map[string]bool
// IPv4 values take precedence over IPv6 by default, if true set IPv6 default values
IPv6 bool
}
// Derive automatically derives DockerStableTag if not specified
func (c *ConfigData) Derive() {
// get the first address to use it as the API advertised address
c.AdvertiseAddress = strings.Split(c.NodeAddress, ",")[0]
if c.DockerStableTag == "" {
c.DockerStableTag = strings.Replace(c.KubernetesVersion, "+", "_", -1)
}
// get the IP addresses family for defaulting components
c.IPv6 = c.IPFamily == config.IPv6Family
// get sorted list of FeatureGate keys
featureGateKeys := make([]string, 0, len(c.FeatureGates))
for k := range c.FeatureGates {
@ -187,7 +207,7 @@ bootstrapTokens:
# we use a well know port for making the API server discoverable inside docker network.
# from the host machine such port will be accessible via a random local port instead.
localAPIEndpoint:
advertiseAddress: "{{ .NodeAddress }}"
advertiseAddress: "{{ .AdvertiseAddress }}"
bindPort: {{.APIBindPort}}
nodeRegistration:
criSocket: "/run/containerd/containerd.sock"
@ -204,7 +224,7 @@ metadata:
{{ if .ControlPlane -}}
controlPlane:
localAPIEndpoint:
advertiseAddress: "{{ .NodeAddress }}"
advertiseAddress: "{{ .AdvertiseAddress }}"
bindPort: {{.APIBindPort}}
{{- end }}
nodeRegistration:
@ -223,6 +243,11 @@ apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
metadata:
name: config
# explicitly set default cgroup driver
# unblocks https://github.com/kubernetes/kubernetes/pull/99471
# TODO: consider switching to systemd instead
# tracked in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroupDriver: cgroupfs
# configure ipv6 addresses in IPv6 mode
{{ if .IPv6 -}}
address: "::"
@ -240,6 +265,7 @@ evictionHard:
{{ range $key := .SortedFeatureGateKeys }}
"{{ $key }}": {{$.FeatureGates $key }}
{{end}}{{end}}
{{if ne .KubeProxyMode "None"}}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
@ -252,6 +278,11 @@ mode: "{{ .KubeProxyMode }}"
{{end}}{{end}}
iptables:
minSyncPeriod: 1s
conntrack:
# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
# It is a global variable that affects other namespaces
maxPerCore: 0
{{end}}
`
// ConfigTemplateBetaV2 is the kubeadm config template for API version v1beta2
@ -262,6 +293,10 @@ metadata:
name: config
kubernetesVersion: {{.KubernetesVersion}}
clusterName: "{{.ClusterName}}"
{{ if .KubeadmFeatureGates}}featureGates:
{{ range $key, $value := .KubeadmFeatureGates }}
"{{ $key }}": {{ $value }}
{{end}}{{end}}
controlPlaneEndpoint: "{{ .ControlPlaneEndpoint }}"
# on docker for mac we have to expose the api server via port forward,
# so we need to ensure the cert is valid for localhost so we can talk
@ -307,7 +342,7 @@ bootstrapTokens:
# we use a well know port for making the API server discoverable inside docker network.
# from the host machine such port will be accessible via a random local port instead.
localAPIEndpoint:
advertiseAddress: "{{ .NodeAddress }}"
advertiseAddress: "{{ .AdvertiseAddress }}"
bindPort: {{.APIBindPort}}
nodeRegistration:
criSocket: "unix:///run/containerd/containerd.sock"
@ -315,6 +350,7 @@ nodeRegistration:
fail-swap-on: "false"
node-ip: "{{ .NodeAddress }}"
provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}"
node-labels: "{{ .NodeLabels }}"
---
# no-op entry that exists solely so it can be patched
apiVersion: kubeadm.k8s.io/v1beta2
@ -324,7 +360,7 @@ metadata:
{{ if .ControlPlane -}}
controlPlane:
localAPIEndpoint:
advertiseAddress: "{{ .NodeAddress }}"
advertiseAddress: "{{ .AdvertiseAddress }}"
bindPort: {{.APIBindPort}}
{{- end }}
nodeRegistration:
@ -333,6 +369,7 @@ nodeRegistration:
fail-swap-on: "false"
node-ip: "{{ .NodeAddress }}"
provider-id: "kind://{{.NodeProvider}}/{{.ClusterName}}/{{.NodeName}}"
node-labels: "{{ .NodeLabels }}"
discovery:
bootstrapToken:
apiServerEndpoint: "{{ .ControlPlaneEndpoint }}"
@ -343,6 +380,11 @@ apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
metadata:
name: config
# explicitly set default cgroup driver
# unblocks https://github.com/kubernetes/kubernetes/pull/99471
# TODO: consider switching to systemd instead
# tracked in: https://github.com/kubernetes-sigs/kind/issues/1726
cgroupDriver: cgroupfs
# configure ipv6 addresses in IPv6 mode
{{ if .IPv6 -}}
address: "::"
@ -360,6 +402,7 @@ evictionHard:
{{ range $key := .SortedFeatureGateKeys }}
"{{ $key }}": {{ index $.FeatureGates $key }}
{{end}}{{end}}
{{if ne .KubeProxyMode "None"}}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
@ -372,6 +415,16 @@ mode: "{{ .KubeProxyMode }}"
{{end}}{{end}}
iptables:
minSyncPeriod: 1s
conntrack:
# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
# It is a global variable that affects other namespaces
maxPerCore: 0
{{if .RootlessProvider}}
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
tcpEstablishedTimeout: 0s
# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
tcpCloseWaitTimeout: 0s
{{end}}{{end}}
`
// Config returns a kubeadm config generated from config data, in particular
@ -390,6 +443,9 @@ func Config(data ConfigData) (config string, err error) {
// assume the latest API version, then fallback if the k8s version is too low
templateSource := ConfigTemplateBetaV2
if ver.LessThan(version.MustParseSemantic("v1.15.0")) {
if data.RootlessProvider {
return "", errors.Errorf("version %q is not compatible with rootless provider", ver)
}
templateSource = ConfigTemplateBetaV1
}
@ -401,6 +457,17 @@ func Config(data ConfigData) (config string, err error) {
// derive any automatic fields if not supplied
data.Derive()
// Kubeadm has its own feature-gate for dual stack
// we need to enable it for Kubernetes version 1.20 only
// dual-stack is only supported in 1.20+
// TODO: remove this when 1.20 is EOL or we no longer support
// dual-stack for 1.20 in KIND
if ver.LessThan(version.MustParseSemantic("v1.21.0")) &&
ver.AtLeast(version.MustParseSemantic("v1.20.0")) {
data.KubeadmFeatureGates = make(map[string]bool)
data.KubeadmFeatureGates["IPv6DualStack"] = true
}
// execute the template
var buff bytes.Buffer
err = t.Execute(&buff, data)

View File

@ -30,7 +30,16 @@ type mergePatch struct {
func parseMergePatches(rawPatches []string) ([]mergePatch, error) {
patches := []mergePatch{}
// split document streams before trying to parse them
splitRawPatches := make([]string, 0, len(rawPatches))
for _, raw := range rawPatches {
splitRaw, err := splitYAMLDocuments(raw)
if err != nil {
return nil, err
}
splitRawPatches = append(splitRawPatches, splitRaw...)
}
for _, raw := range splitRawPatches {
matchInfo, err := parseYAMLMatchInfo(raw)
if err != nil {
return nil, errors.WithStack(err)

View File

@ -5,7 +5,6 @@ import (
"path/filepath"
"sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/cmd/kind/version"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/exec"
)
@ -23,24 +22,9 @@ func CollectLogs(n nodes.Node, dir string) error {
return cmd.SetStdout(f).SetStderr(f).Run()
}
}
writeToPathFn := func(s string, path string) func() error {
return func() error {
f, err := FileOnHost(path)
if err != nil {
return err
}
defer f.Close()
_, err = f.WriteString(s)
return err
}
}
return errors.AggregateConcurrent([]func() error{
// record info about the node container
writeToPathFn(
version.DisplayVersion(),
filepath.Join(dir, "kind-version.txt"),
),
execToPathFn(
n.Command("cat", "/kind/version"),
"kubernetes-version.txt",

View File

@ -21,10 +21,12 @@ import (
"crypto/sha1"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"net"
"regexp"
"sort"
"strconv"
"strings"
"sigs.k8s.io/kind/pkg/errors"
@ -61,9 +63,11 @@ func ensureNetwork(name string) error {
// Generate unique subnet per network based on the name
// obtained from the ULA fc00::/8 range
// Use the MTU configured for the docker default network
// Make N attempts with "probing" in case we happen to collide
subnet := generateULASubnetFromName(name, 0)
err = createNetworkNoDuplicates(name, subnet)
mtu := getDefaultNetworkMTU()
err = createNetworkNoDuplicates(name, subnet, mtu)
if err == nil {
// Success!
return nil
@ -75,7 +79,7 @@ func ensureNetwork(name string) error {
// If it is, make more attempts below
if isIPv6UnavailableError(err) {
// only one attempt, IPAM is automatic in ipv4 only
return createNetworkNoDuplicates(name, "")
return createNetworkNoDuplicates(name, "", mtu)
}
if isPoolOverlapError(err) {
// pool overlap suggests perhaps another process created the network
@ -97,7 +101,7 @@ func ensureNetwork(name string) error {
const maxAttempts = 5
for attempt := int32(1); attempt < maxAttempts; attempt++ {
subnet := generateULASubnetFromName(name, attempt)
err = createNetworkNoDuplicates(name, subnet)
err = createNetworkNoDuplicates(name, subnet, mtu)
if err == nil {
// success!
return nil
@ -121,8 +125,8 @@ func ensureNetwork(name string) error {
return errors.New("exhausted attempts trying to find a non-overlapping subnet")
}
func createNetworkNoDuplicates(name, ipv6Subnet string) error {
if err := createNetwork(name, ipv6Subnet); err != nil && !isNetworkAlreadyExistsError(err) {
func createNetworkNoDuplicates(name, ipv6Subnet string, mtu int) error {
if err := createNetwork(name, ipv6Subnet, mtu); err != nil && !isNetworkAlreadyExistsError(err) {
return err
}
_, err := removeDuplicateNetworks(name)
@ -142,15 +146,33 @@ func removeDuplicateNetworks(name string) (bool, error) {
return len(networks) > 0, nil
}
func createNetwork(name, ipv6Subnet string) error {
if ipv6Subnet == "" {
return exec.Command("docker", "network", "create", "-d=bridge",
func createNetwork(name, ipv6Subnet string, mtu int) error {
args := []string{"network", "create", "-d=bridge",
"-o", "com.docker.network.bridge.enable_ip_masquerade=true",
name).Run()
}
return exec.Command("docker", "network", "create", "-d=bridge",
"-o", "com.docker.network.bridge.enable_ip_masquerade=true",
"--ipv6", "--subnet", ipv6Subnet, name).Run()
if mtu > 0 {
args = append(args, "-o", fmt.Sprintf("com.docker.network.driver.mtu=%d", mtu))
}
if ipv6Subnet != "" {
args = append(args, "--ipv6", "--subnet", ipv6Subnet)
}
args = append(args, name)
return exec.Command("docker", args...).Run()
}
// getDefaultNetworkMTU obtains the MTU from the docker default network
func getDefaultNetworkMTU() int {
cmd := exec.Command("docker", "network", "inspect", "bridge",
"-f", `{{ index .Options "com.docker.network.driver.mtu" }}`)
lines, err := exec.OutputLines(cmd)
if err != nil || len(lines) != 1 {
return 0
}
mtu, err := strconv.Atoi(lines[0])
if err != nil {
return 0
}
return mtu
}
func sortedNetworksWithName(name string) ([]string, error) {

View File

@ -17,6 +17,8 @@ limitations under the License.
package docker
import (
"encoding/csv"
"encoding/json"
"fmt"
"net"
"os"
@ -49,6 +51,7 @@ func NewProvider(logger log.Logger) providers.Provider {
// see NewProvider
type provider struct {
logger log.Logger
info *providers.ProviderInfo
}
// String implements fmt.Stringer
@ -281,3 +284,62 @@ func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error {
errs = append(errs, errors.AggregateConcurrent(fns))
return errors.NewAggregate(errs)
}
// Info returns the provider info.
// The info is cached on the first time of the execution.
func (p *provider) Info() (*providers.ProviderInfo, error) {
var err error
if p.info == nil {
p.info, err = info()
}
return p.info, err
}
// dockerInfo corresponds to `docker info --format '{{json .}}'`
type dockerInfo struct {
CgroupDriver string `json:"CgroupDriver"` // "systemd", "cgroupfs", "none"
CgroupVersion string `json:"CgroupVersion"` // e.g. "2"
MemoryLimit bool `json:"MemoryLimit"`
PidsLimit bool `json:"PidsLimit"`
CPUShares bool `json:"CPUShares"`
SecurityOptions []string `json:"SecurityOptions"`
}
func info() (*providers.ProviderInfo, error) {
cmd := exec.Command("docker", "info", "--format", "{{json .}}")
out, err := exec.Output(cmd)
if err != nil {
return nil, errors.Wrap(err, "failed to get docker info")
}
var dInfo dockerInfo
if err := json.Unmarshal(out, &dInfo); err != nil {
return nil, err
}
info := providers.ProviderInfo{
Cgroup2: dInfo.CgroupVersion == "2",
}
// When CgroupDriver == "none", the MemoryLimit/PidsLimit/CPUShares
// values are meaningless and need to be considered false.
// https://github.com/moby/moby/issues/42151
if dInfo.CgroupDriver != "none" {
info.SupportsMemoryLimit = dInfo.MemoryLimit
info.SupportsPidsLimit = dInfo.PidsLimit
info.SupportsCPUShares = dInfo.CPUShares
}
for _, o := range dInfo.SecurityOptions {
// o is like "name=seccomp,profile=default", or "name=rootless",
csvReader := csv.NewReader(strings.NewReader(o))
sliceSlice, err := csvReader.ReadAll()
if err != nil {
return nil, err
}
for _, f := range sliceSlice {
for _, ff := range f {
if ff == "name=rootless" {
info.Rootless = true
}
}
}
}
return &info, nil
}

View File

@ -63,7 +63,8 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
// For now remote docker + multi control plane is not supported
apiServerPort = 0 // replaced with random ports
apiServerAddress = "127.0.0.1" // only the LB needs to be non-local
if clusterIsIPv6(cfg) {
// only for IPv6 only clusters
if cfg.Networking.IPFamily == config.IPv6Family {
apiServerAddress = "::1" // only the LB needs to be non-local
}
// plan loadbalancer node
@ -134,7 +135,7 @@ func createContainer(args []string) error {
}
func clusterIsIPv6(cfg *config.Cluster) bool {
return cfg.Networking.IPFamily == "ipv6"
return cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily
}
func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool {
@ -183,6 +184,9 @@ func commonArgs(cluster string, cfg *config.Cluster, networkName string, nodeNam
// however this _actually_ means the same thing as always
// so the closest thing is on-failure:1, which will retry *once*
"--restart=on-failure:1",
// this can be enabled by default in docker daemon.json, so we explicitly
// disable it, we want our entrypoint to be PID1, not docker-init / tini
"--init=false",
}
// enable IPv6 if necessary
@ -251,6 +255,11 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
}
args = append(args, mappingArgs...)
switch node.Role {
case config.ControlPlaneRole:
args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf")
}
// finally, specify the image to run
return append(args, node.Image), nil
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package docker
import (
"encoding/json"
"strings"
"sigs.k8s.io/kind/pkg/exec"
@ -48,15 +49,39 @@ func usernsRemap() bool {
}
// mountDevMapper checks if the Docker storage driver is Btrfs or ZFS
// or if the backing filesystem is Btrfs
func mountDevMapper() bool {
storage := ""
// check the docker storage driver
cmd := exec.Command("docker", "info", "-f", "{{.Driver}}")
lines, err := exec.OutputLines(cmd)
if err != nil {
if err != nil || len(lines) != 1 {
return false
}
if len(lines) > 0 {
storage = strings.ToLower(strings.TrimSpace(lines[0]))
if storage == "btrfs" || storage == "zfs" || storage == "devicemapper" {
return true
}
return storage == "btrfs" || storage == "zfs"
// check the backing file system
// docker info -f '{{json .DriverStatus }}'
// [["Backing Filesystem","extfs"],["Supports d_type","true"],["Native Overlay Diff","true"]]
cmd = exec.Command("docker", "info", "-f", "{{json .DriverStatus }}")
lines, err = exec.OutputLines(cmd)
if err != nil || len(lines) != 1 {
return false
}
var dat [][]string
if err := json.Unmarshal([]byte(lines[0]), &dat); err != nil {
return false
}
for _, item := range dat {
if item[0] == "Backing Filesystem" {
storage = strings.ToLower(item[1])
break
}
}
return storage == "btrfs" || storage == "zfs" || storage == "xfs"
}

View File

@ -53,6 +53,7 @@ func NewProvider(logger log.Logger) providers.Provider {
// see NewProvider
type provider struct {
logger log.Logger
info *providers.ProviderInfo
}
// String implements fmt.Stringer
@ -68,12 +69,6 @@ func (p *provider) Provision(status *cli.Status, cfg *config.Cluster) (err error
return err
}
// kind doesn't work with podman rootless, surface an error
if os.Geteuid() != 0 {
p.logger.Errorf("podman provider does not work properly in rootless mode")
os.Exit(1)
}
// TODO: validate cfg
// ensure node images are pulled before actually provisioning
if err := ensureNodeImages(p.logger, status, cfg); err != nil {
@ -192,7 +187,40 @@ func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) {
if err != nil {
return "", errors.Wrap(err, "failed to check podman version")
}
if v.LessThan(version.MustParseSemantic("2.2.0")) {
// podman inspect was broken between 2.2.0 and 3.0.0
// https://github.com/containers/podman/issues/8444
if v.AtLeast(version.MustParseSemantic("2.2.0")) &&
v.LessThan(version.MustParseSemantic("3.0.0")) {
p.logger.Warnf("WARNING: podman version %s not fully supported, please use versions 3.0.0+")
cmd := exec.Command(
"podman", "inspect",
"--format",
"{{range .NetworkSettings.Ports }}{{range .}}{{.HostIP}}/{{.HostPort}}{{end}}{{end}}",
n.String(),
)
lines, err := exec.OutputLines(cmd)
if err != nil {
return "", errors.Wrap(err, "failed to get api server port")
}
if len(lines) != 1 {
return "", errors.Errorf("network details should only be one line, got %d lines", len(lines))
}
// output is in the format IP/Port
parts := strings.Split(strings.TrimSpace(lines[0]), "/")
if len(parts) != 2 {
return "", errors.Errorf("network details should be in the format IP/Port, received: %s", parts)
}
host := parts[0]
port, err := strconv.Atoi(parts[1])
if err != nil {
return "", errors.Errorf("network port not an integer: %v", err)
}
return net.JoinHostPort(host, strconv.Itoa(port)), nil
}
cmd := exec.Command(
"podman", "inspect",
"--format",
@ -241,6 +269,7 @@ func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) {
}
}
}
var portMappings19 []portMapping19
if err := json.Unmarshal([]byte(lines[0]), &portMappings19); err != nil {
return "", errors.Errorf("invalid network details: %v", err)
@ -250,34 +279,8 @@ func (p *provider) GetAPIServerEndpoint(cluster string) (string, error) {
return net.JoinHostPort(pm.HostIP, strconv.Itoa(int(pm.HostPort))), nil
}
}
}
// TODO: hack until https://github.com/containers/podman/issues/8444 is resolved
cmd := exec.Command(
"podman", "inspect",
"--format",
"{{range .NetworkSettings.Ports }}{{range .}}{{.HostIP}}/{{.HostPort}}{{end}}{{end}}",
n.String(),
)
lines, err := exec.OutputLines(cmd)
if err != nil {
return "", errors.Wrap(err, "failed to get api server port")
}
if len(lines) != 1 {
return "", errors.Errorf("network details should only be one line, got %d lines", len(lines))
}
// output is in the format IP/Port
parts := strings.Split(strings.TrimSpace(lines[0]), "/")
if len(parts) != 2 {
return "", errors.Errorf("network details should be in the format IP/Port, received: %s", parts)
}
host := parts[0]
port, err := strconv.Atoi(parts[1])
if err != nil {
return "", errors.Errorf("network port not an integer: %v", err)
}
return net.JoinHostPort(host, strconv.Itoa(port)), nil
return "", errors.Errorf("failed to get api server port")
}
// GetAPIServerInternalEndpoint is part of the providers.Provider interface
@ -350,3 +353,63 @@ func (p *provider) CollectLogs(dir string, nodes []nodes.Node) error {
errs = append(errs, errors.AggregateConcurrent(fns))
return errors.NewAggregate(errs)
}
// Info returns the provider info.
// The info is cached on the first time of the execution.
func (p *provider) Info() (*providers.ProviderInfo, error) {
if p.info == nil {
var err error
p.info, err = info(p.logger)
if err != nil {
return p.info, err
}
}
return p.info, nil
}
// podmanInfo corresponds to `podman info --format 'json`.
// The structure is different from `docker info --format '{{json .}}'`,
// and lacks information about the availability of the cgroup controllers.
type podmanInfo struct {
Host struct {
CgroupVersion string `json:"cgroupVersion,omitempty"` // "v2"
Security struct {
Rootless bool `json:"rootless,omitempty"`
} `json:"security"`
} `json:"host"`
}
// info detects ProviderInfo by executing `podman info --format json`.
func info(logger log.Logger) (*providers.ProviderInfo, error) {
const podman = "podman"
args := []string{"info", "--format", "json"}
cmd := exec.Command(podman, args...)
out, err := exec.Output(cmd)
if err != nil {
return nil, errors.Wrapf(err, "failed to get podman info (%s %s): %q",
podman, strings.Join(args, " "), string(out))
}
var pInfo podmanInfo
if err := json.Unmarshal(out, &pInfo); err != nil {
return nil, err
}
info := &providers.ProviderInfo{
Rootless: pInfo.Host.Security.Rootless,
Cgroup2: pInfo.Host.CgroupVersion == "v2",
// We assume all the cgroup controllers to be available.
//
// For rootless, this assumption is not always correct,
// so we print the warning below.
//
// TODO: We wiil be able to implement proper cgroup controller detection
// after the GA of Podman 3.2.x: https://github.com/containers/podman/pull/10387
SupportsMemoryLimit: true, // not guaranteed to be correct
SupportsPidsLimit: true, // not guaranteed to be correct
SupportsCPUShares: true, // not guaranteed to be correct
}
if info.Rootless {
logger.Warn("Cgroup controller detection is not implemented for Podman. " +
"If you see cgroup-related errors, you might need to set systemd property \"Delegate=yes\", see https://kind.sigs.k8s.io/docs/user/rootless/")
}
return info, nil
}

View File

@ -51,7 +51,8 @@ func planCreation(cfg *config.Cluster, networkName string) (createContainerFuncs
// For now remote podman + multi control plane is not supported
apiServerPort = 0 // replaced with random ports
apiServerAddress = "127.0.0.1" // only the LB needs to be non-local
if clusterIsIPv6(cfg) {
// only for IPv6 only clusters
if cfg.Networking.IPFamily == config.IPv6Family {
apiServerAddress = "::1" // only the LB needs to be non-local
}
// plan loadbalancer node
@ -120,7 +121,7 @@ func createContainer(args []string) error {
}
func clusterIsIPv6(cfg *config.Cluster) bool {
return cfg.Networking.IPFamily == "ipv6"
return cfg.Networking.IPFamily == config.IPv6Family || cfg.Networking.IPFamily == config.DualStackFamily
}
func clusterHasImplicitLoadBalancer(cfg *config.Cluster) bool {
@ -143,6 +144,8 @@ func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) {
"--net", networkName, // attach to its own network
// label the node with the cluster ID
"--label", fmt.Sprintf("%s=%s", clusterLabelKey, cfg.Name),
// specify container implementation to systemd
"-e", "container=podman",
}
// enable IPv6 if necessary
@ -151,7 +154,7 @@ func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) {
}
// pass proxy environment variables
proxyEnv, err := getProxyEnv(cfg)
proxyEnv, err := getProxyEnv(cfg, networkName)
if err != nil {
return nil, errors.Wrap(err, "proxy setup error")
}
@ -159,6 +162,12 @@ func commonArgs(cfg *config.Cluster, networkName string) ([]string, error) {
args = append(args, "-e", fmt.Sprintf("%s=%s", key, val))
}
// handle Podman on Btrfs or ZFS same as we do with Docker
// https://github.com/kubernetes-sigs/kind/issues/1416#issuecomment-606514724
if mountDevMapper() {
args = append(args, "--volume", "/dev/mapper:/dev/mapper")
}
return args, nil
}
@ -209,6 +218,11 @@ func runArgsForNode(node *config.Node, clusterIPFamily config.ClusterIPFamily, n
}
args = append(args, mappingArgs...)
switch node.Role {
case config.ControlPlaneRole:
args = append(args, "-e", "KUBECONFIG=/etc/kubernetes/admin.conf")
}
// finally, specify the image to run
_, image := sanitizeImage(node.Image)
return append(args, image), nil
@ -243,12 +257,12 @@ func runArgsForLoadBalancer(cfg *config.Cluster, name string, args []string) ([]
return append(args, image), nil
}
func getProxyEnv(cfg *config.Cluster) (map[string]string, error) {
func getProxyEnv(cfg *config.Cluster, networkName string) (map[string]string, error) {
envs := common.GetProxyEnvs(cfg)
// Specifically add the podman network subnets to NO_PROXY if we are using a proxy
if len(envs) > 0 {
// podman default bridge network is named "bridge" (https://docs.podman.com/network/bridge/#use-the-default-bridge-network)
subnets, err := getSubnets("bridge")
// kind default bridge is "kind"
subnets, err := getSubnets(networkName)
if err != nil {
return nil, err
}
@ -267,7 +281,8 @@ func getProxyEnv(cfg *config.Cluster) (map[string]string, error) {
}
func getSubnets(networkName string) ([]string, error) {
format := `{{range (index (index . "IPAM") "Config")}}{{index . "Subnet"}} {{end}}`
// TODO: unmarshall json and get rid of this complex query
format := `{{ range (index (index (index (index . "plugins") 0 ) "ipam" ) "ranges")}}{{ index ( index . 0 ) "subnet" }} {{end}}`
cmd := exec.Command("podman", "network", "inspect", "-f", format, networkName)
lines, err := exec.OutputLines(cmd)
if err != nil {

View File

@ -115,3 +115,19 @@ func deleteVolumes(names []string) error {
cmd := exec.Command("podman", args...)
return cmd.Run()
}
// mountDevMapper checks if the podman storage driver is Btrfs or ZFS
func mountDevMapper() bool {
storage := ""
cmd := exec.Command("podman", "info", "-f",
`{{ index .Store.GraphStatus "Backing Filesystem"}}`)
lines, err := exec.OutputLines(cmd)
if err != nil {
return false
}
if len(lines) > 0 {
storage = strings.ToLower(strings.TrimSpace(lines[0]))
}
return storage == "btrfs" || storage == "zfs"
}

View File

@ -45,4 +45,15 @@ type Provider interface {
GetAPIServerInternalEndpoint(cluster string) (string, error)
// CollectLogs will populate dir with cluster logs and other debug files
CollectLogs(dir string, nodes []nodes.Node) error
// Info returns the provider info
Info() (*ProviderInfo, error)
}
// ProviderInfo is the info of the provider
type ProviderInfo struct {
Rootless bool
Cgroup2 bool
SupportsMemoryLimit bool
SupportsPidsLimit bool
SupportsCPUShares bool
}

View File

@ -17,11 +17,17 @@ limitations under the License.
package cluster
import (
"io/ioutil"
"os"
"path/filepath"
"sort"
"sigs.k8s.io/kind/pkg/cmd/kind/version"
"sigs.k8s.io/kind/pkg/cluster/constants"
"sigs.k8s.io/kind/pkg/cluster/nodes"
"sigs.k8s.io/kind/pkg/cluster/nodeutils"
"sigs.k8s.io/kind/pkg/errors"
"sigs.k8s.io/kind/pkg/log"
internalcreate "sigs.k8s.io/kind/pkg/cluster/internal/create"
@ -68,20 +74,54 @@ func NewProvider(options ...ProviderOption) *Provider {
}
}
// ensure a provider if none was set
// NOTE: depends on logger being set (see sorting above)
if p.provider == nil {
// auto-detect based on each package IsAvailable() function
// default to docker for backwards compatibility
if docker.IsAvailable() {
p.provider = docker.NewProvider(p.logger)
} else if podman.IsAvailable() {
p.provider = podman.NewProvider(p.logger)
} else {
p.provider = docker.NewProvider(p.logger)
// DetectNodeProvider does not fallback to allow callers to determine
// this behavior
// However for compatibility if the caller of NewProvider supplied no
// option and we autodetect internally, we default to the docker provider
// for fallback, to avoid a breaking change for now.
// This may change in the future.
// TODO: consider breaking this API for earlier errors.
providerOpt, _ := DetectNodeProvider()
if providerOpt == nil {
providerOpt = ProviderWithDocker()
}
providerOpt.apply(p)
}
return p
}
// NoNodeProviderDetectedError indicates that we could not autolocate an available
// NodeProvider backend on the host
var NoNodeProviderDetectedError = errors.NewWithoutStack("failed to detect any supported node provider")
// DetectNodeProvider allows callers to autodetect the node provider
// *without* fallback to the default.
//
// Pass the returned ProviderOption to NewProvider to pass the auto-detect Docker
// or Podman option explicitly (in the future there will be more options)
//
// NOTE: The kind *cli* also checks `KIND_EXPERIMENTAL_PROVIDER` for "podman" or
// "docker" currently and does not auto-detect / respects this if set.
//
// This will be replaced with some other mechanism in the future (likely when
// podman support is GA), in the meantime though your tool may wish to match this.
//
// In the future when this is not considered experimental,
// that logic will be in a public API as well.
func DetectNodeProvider() (ProviderOption, error) {
// auto-detect based on each node provider's IsAvailable() function
if docker.IsAvailable() {
return ProviderWithDocker(), nil
}
if podman.IsAvailable() {
return ProviderWithPodman(), nil
}
return nil, errors.WithStack(NoNodeProviderDetectedError)
}
// ProviderOption is an option for configuring a provider
type ProviderOption interface {
apply(p *Provider)
@ -191,5 +231,18 @@ func (p *Provider) CollectLogs(name, dir string) error {
if err != nil {
return err
}
// ensure directory
if err := os.MkdirAll(dir, os.ModePerm); err != nil {
return errors.Wrap(err, "failed to create logs directory")
}
// write kind version
if err := ioutil.WriteFile(
filepath.Join(dir, "kind-version.txt"),
[]byte(version.DisplayVersion()),
0666, // match os.Create
); err != nil {
return errors.Wrap(err, "failed to write kind-version.txt")
}
// collect and write cluster logs
return p.provider.CollectLogs(dir, n)
}

View File

@ -50,7 +50,7 @@ func DisplayVersion() string {
}
// VersionCore is the core portion of the kind CLI version per Semantic Versioning 2.0.0
const VersionCore = "0.10.0"
const VersionCore = "0.11.1"
// VersionPreRelease is the pre-release portion of the kind CLI version per
// Semantic Versioning 2.0.0

View File

@ -17,6 +17,8 @@ limitations under the License.
package errors
import (
stderrors "errors"
pkgerrors "github.com/pkg/errors"
)
@ -26,6 +28,12 @@ func New(message string) error {
return pkgerrors.New(message)
}
// NewWithoutStack is like new but does NOT wrap with a stack
// This is useful for exported errors
func NewWithoutStack(message string) error {
return stderrors.New(message)
}
// Errorf formats according to a format specifier and returns the string as a
// value that satisfies error. Errorf also records the stack trace at the
// point it was called.

View File

@ -51,6 +51,7 @@ func convertv1alpha4Node(in *v1alpha4.Node, out *Node) {
out.Role = NodeRole(in.Role)
out.Image = in.Image
out.Labels = in.Labels
out.KubeadmConfigPatches = in.KubeadmConfigPatches
out.ExtraMounts = make([]Mount, len(in.ExtraMounts))
out.ExtraPortMappings = make([]PortMapping, len(in.ExtraPortMappings))

View File

@ -48,14 +48,14 @@ func SetDefaultsCluster(obj *Cluster) {
SetDefaultsNode(a)
}
if obj.Networking.IPFamily == "" {
obj.Networking.IPFamily = "ipv4"
obj.Networking.IPFamily = IPv4Family
}
// default to listening on 127.0.0.1:randomPort on ipv4
// and [::1]:randomPort on ipv6
if obj.Networking.APIServerAddress == "" {
obj.Networking.APIServerAddress = "127.0.0.1"
if obj.Networking.IPFamily == "ipv6" {
if obj.Networking.IPFamily == IPv6Family {
obj.Networking.APIServerAddress = "::1"
}
}
@ -63,11 +63,14 @@ func SetDefaultsCluster(obj *Cluster) {
// default the pod CIDR
if obj.Networking.PodSubnet == "" {
obj.Networking.PodSubnet = "10.244.0.0/16"
if obj.Networking.IPFamily == "ipv6" {
if obj.Networking.IPFamily == IPv6Family {
// node-mask cidr default is /64 so we need a larger subnet, we use /56 following best practices
// xref: https://www.ripe.net/publications/docs/ripe-690#4--size-of-end-user-prefix-assignment---48---56-or-something-else-
obj.Networking.PodSubnet = "fd00:10:244::/56"
}
if obj.Networking.IPFamily == DualStackFamily {
obj.Networking.PodSubnet = "10.244.0.0/16,fd00:10:244::/56"
}
}
// default the service CIDR using the kubeadm default
@ -76,13 +79,16 @@ func SetDefaultsCluster(obj *Cluster) {
// we allocate a /16 subnet that allows 65535 services (current Kubernetes tested limit is O(10k) services)
if obj.Networking.ServiceSubnet == "" {
obj.Networking.ServiceSubnet = "10.96.0.0/16"
if obj.Networking.IPFamily == "ipv6" {
if obj.Networking.IPFamily == IPv6Family {
obj.Networking.ServiceSubnet = "fd00:10:96::/112"
}
if obj.Networking.IPFamily == DualStackFamily {
obj.Networking.ServiceSubnet = "10.96.0.0/16,fd00:10:96::/112"
}
}
// default the KubeProxyMode using iptables as it's already the default
if obj.Networking.KubeProxyMode == "" {
obj.Networking.KubeProxyMode = IPTablesMode
obj.Networking.KubeProxyMode = IPTablesProxyMode
}
}

View File

@ -85,6 +85,9 @@ type Node struct {
// If unset a default image will be used, see defaults.Image
Image string
// Labels are the labels with which the respective node will be labeled
Labels map[string]string
/* Advanced fields */
// ExtraMounts describes additional mount points for the node container
@ -157,16 +160,20 @@ const (
IPv4Family ClusterIPFamily = "ipv4"
// IPv6Family sets ClusterIPFamily to ipv6
IPv6Family ClusterIPFamily = "ipv6"
// DualStackFamily sets ClusterIPFamily to dual
DualStackFamily ClusterIPFamily = "dual"
)
// ProxyMode defines a proxy mode for kube-proxy
type ProxyMode string
const (
// IPTablesMode sets ProxyMode to iptables
IPTablesMode ProxyMode = "iptables"
// IPVSMode sets ProxyMode to iptables
IPVSMode ProxyMode = "ipvs"
// IPTablesProxyMode sets ProxyMode to iptables
IPTablesProxyMode ProxyMode = "iptables"
// IPVSProxyMode sets ProxyMode to ipvs
IPVSProxyMode ProxyMode = "ipvs"
// NoneProxyMode disables kube-proxy
NoneProxyMode ProxyMode = "none"
)
// PatchJSON6902 represents an inline kustomize json 6902 patch

View File

@ -17,16 +17,31 @@ limitations under the License.
package config
import (
"fmt"
"net"
"regexp"
"strings"
"sigs.k8s.io/kind/pkg/errors"
)
// similar to valid docker container names, but since we will prefix
// and suffix this name, we can relax it a little
// see NewContext() for usage
// https://godoc.org/github.com/docker/docker/daemon/names#pkg-constants
var validNameRE = regexp.MustCompile(`^[a-z0-9.-]+$`)
// Validate returns a ConfigErrors with an entry for each problem
// with the config, or nil if there are none
func (c *Cluster) Validate() error {
errs := []error{}
// validate the name
if !validNameRE.MatchString(c.Name) {
errs = append(errs, errors.Errorf("'%s' is not a valid cluster name, cluster names must match `%s`",
c.Name, validNameRE.String()))
}
// the api server port only needs checking if we aren't picking a random one
// at runtime
if c.Networking.APIServerPort != 0 {
@ -36,17 +51,20 @@ func (c *Cluster) Validate() error {
}
}
isDualStack := c.Networking.IPFamily == DualStackFamily
// podSubnet should be a valid CIDR
if _, _, err := net.ParseCIDR(c.Networking.PodSubnet); err != nil {
errs = append(errs, errors.Wrapf(err, "invalid podSubnet"))
if err := validateSubnets(c.Networking.PodSubnet, isDualStack); err != nil {
errs = append(errs, errors.Errorf("invalid pod subnet %v", err))
}
// serviceSubnet should be a valid CIDR
if _, _, err := net.ParseCIDR(c.Networking.ServiceSubnet); err != nil {
errs = append(errs, errors.Wrapf(err, "invalid serviceSubnet"))
if err := validateSubnets(c.Networking.ServiceSubnet, isDualStack); err != nil {
errs = append(errs, errors.Errorf("invalid service subnet %v", err))
}
// KubeProxyMode should be iptables or ipvs
if c.Networking.KubeProxyMode != IPTablesMode && c.Networking.KubeProxyMode != IPVSMode {
if c.Networking.KubeProxyMode != IPTablesProxyMode && c.Networking.KubeProxyMode != IPVSProxyMode &&
c.Networking.KubeProxyMode != NoneProxyMode {
errs = append(errs, errors.Errorf("invalid kubeProxyMode: %s", c.Networking.KubeProxyMode))
}
@ -121,3 +139,64 @@ func validatePort(port int32) error {
}
return nil
}
func validateSubnets(subnetStr string, dualstack bool) error {
allErrs := []error{}
cidrsString := strings.Split(subnetStr, ",")
subnets := make([]*net.IPNet, 0, len(cidrsString))
for _, cidrString := range cidrsString {
_, cidr, err := net.ParseCIDR(cidrString)
if err != nil {
return fmt.Errorf("failed to parse cidr value:%q with error: %v", cidrString, err)
}
subnets = append(subnets, cidr)
}
switch {
// if DualStack only 2 CIDRs allowed
case dualstack && len(subnets) > 2:
allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking"))
// if DualStack and there are 2 CIDRs validate if there is at least one of each IP family
case dualstack && len(subnets) == 2:
areDualStackCIDRs, err := isDualStackCIDRs(subnets)
if err != nil {
allErrs = append(allErrs, err)
} else if !areDualStackCIDRs {
allErrs = append(allErrs, errors.New("expected one (IPv4 or IPv6) CIDR or two CIDRs from each family for dual-stack networking"))
}
// if not DualStack only one CIDR allowed
case !dualstack && len(subnets) > 1:
allErrs = append(allErrs, errors.New("only one CIDR allowed for single-stack networking"))
}
if len(allErrs) > 0 {
return errors.NewAggregate(allErrs)
}
return nil
}
// isDualStackCIDRs returns if
// - all are valid cidrs
// - at least one cidr from each family (v4 or v6)
func isDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
v4Found := false
v6Found := false
for _, cidr := range cidrs {
if cidr == nil {
return false, fmt.Errorf("cidr %v is invalid", cidr)
}
if v4Found && v6Found {
continue
}
if cidr.IP != nil && cidr.IP.To4() == nil {
v6Found = true
continue
}
v4Found = true
}
return v4Found && v6Found, nil
}

View File

@ -113,6 +113,13 @@ func (in *Networking) DeepCopy() *Networking {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Node) DeepCopyInto(out *Node) {
*out = *in
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.ExtraMounts != nil {
in, out := &in.ExtraMounts, &out.ExtraMounts
*out = make([]Mount, len(*in))