Merge branch 'master' into remove-support

This commit is contained in:
John Gardiner Myers 2020-01-11 20:11:41 -08:00
commit 1651a7ec3c
366 changed files with 3782 additions and 12888 deletions

View File

@ -10,4 +10,15 @@ go:
go_import_path: k8s.io/kops
script:
- GOPROXY=https://proxy.golang.org make travis-ci
- GOPROXY=https://proxy.golang.org make nodeup examples test
jobs:
exclude:
- os: osx
go: "1.12"
include:
- name: Verify
os: linux
go: "1.13"
script:
- GOPROXY=https://proxy.golang.org make travis-ci

View File

@ -439,25 +439,14 @@ nodeup-dist:
tools/sha1 .build/dist/nodeup .build/dist/nodeup.sha1
tools/sha256 .build/dist/nodeup .build/dist/nodeup.sha256
.PHONY: dns-controller-gocode
dns-controller-gocode:
go install ${GCFLAGS} -tags 'peer_name_alternative peer_name_hash' ${LDFLAGS}"${EXTRA_LDFLAGS} -X main.BuildVersion=${DNS_CONTROLLER_TAG}" k8s.io/kops/dns-controller/cmd/dns-controller
.PHONY: bazel-crossbuild-dns-controller
bazel-crossbuild-dns-controller:
bazel build ${BAZEL_CONFIG} --features=pure --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //dns-controller/...
.PHONY: dns-controller-builder-image
dns-controller-builder-image:
docker build -t dns-controller-builder images/dns-controller-builder
.PHONY: dns-controller-build-in-docker
dns-controller-build-in-docker: dns-controller-builder-image
docker run -t -e HOST_UID=${UID} -e HOST_GID=${GID} -v `pwd`:/src dns-controller-builder /onbuild.sh
.PHONY: dns-controller-image
dns-controller-image: dns-controller-build-in-docker
docker build -t ${DOCKER_REGISTRY}/dns-controller:${DNS_CONTROLLER_TAG} -f images/dns-controller/Dockerfile .
.PHONY: dns-controller-push
dns-controller-push: dns-controller-image
docker push ${DOCKER_REGISTRY}/dns-controller:${DNS_CONTROLLER_TAG}
dns-controller-push:
DOCKER_REGISTRY=${DOCKER_REGISTRY} DOCKER_IMAGE_PREFIX=${DOCKER_IMAGE_PREFIX} DNS_CONTROLLER_TAG=${DNS_CONTROLLER_TAG} bazel run --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //dns-controller/cmd/dns-controller:push-image
# --------------------------------------------------
# static utils
@ -569,7 +558,7 @@ ci: govet verify-gofmt verify-generate verify-gomod verify-goimports verify-boil
# verify-gofmt: uses bazel, covered by pull-kops-verify-gofmt
# govet needs to be after verify-goimports because it generates bindata.go
.PHONY: travis-ci
travis-ci: verify-generate verify-gomod verify-goimports govet verify-boilerplate verify-bazel verify-misspelling nodeup examples test | verify-gendocs verify-packages verify-apimachinery
travis-ci: verify-generate verify-gomod verify-goimports govet verify-boilerplate verify-bazel verify-misspelling | verify-gendocs verify-packages verify-apimachinery
echo "Done!"
.PHONY: pr
@ -691,14 +680,6 @@ bazel-crossbuild-nodeup:
bazel-crossbuild-protokube:
bazel build ${BAZEL_CONFIG} --features=pure --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //protokube/...
.PHONY: bazel-crossbuild-dns-controller
bazel-crossbuild-dns-controller:
bazel build ${BAZEL_CONFIG} --features=pure --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //dns-controller/...
.PHONY: bazel-crossbuild-dns-controller-image
bazel-crossbuild-dns-controller-image:
bazel build ${BAZEL_CONFIG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //images:dns-controller.tar
.PHONY: bazel-crossbuild-protokube-image
bazel-crossbuild-protokube-image:
bazel build ${BAZEL_CONFIG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //images:protokube.tar
@ -765,7 +746,7 @@ push-node-authorizer:
bazel-protokube-export:
mkdir -p ${BAZELIMAGES}
bazel build ${BAZEL_CONFIG} --action_env=PROTOKUBE_TAG=${PROTOKUBE_TAG} --platforms=@io_bazel_rules_go//go/toolchain:linux_amd64 //images:protokube.tar.gz //images:protokube.tar.gz.sha1 //images:protokube.tar.gz.sha256
cp -fp bazel-bin/images/bazel-out/k8-fastbuild/bin/images/protokube.tar.gz ${BAZELIMAGES}/protokube.tar.gz
cp -fp bazel-bin/images/protokube.tar.gz ${BAZELIMAGES}/protokube.tar.gz
cp -fp bazel-bin/images/protokube.tar.gz.sha1 ${BAZELIMAGES}/protokube.tar.gz.sha1
cp -fp bazel-bin/images/protokube.tar.gz.sha256 ${BAZELIMAGES}/protokube.tar.gz.sha256

View File

@ -36,12 +36,10 @@ type MockELBV2 struct {
type loadBalancer struct {
description elbv2.LoadBalancer
tags map[string]string
}
type targetGroup struct {
description elbv2.TargetGroup
tags map[string]string
}
func (m *MockELBV2) DescribeLoadBalancers(request *elbv2.DescribeLoadBalancersInput) (*elbv2.DescribeLoadBalancersOutput, error) {

View File

@ -387,7 +387,7 @@ func runTestAWS(t *testing.T, clusterName string, srcDir string, version string,
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.MockKopsVersion("1.15.0")
h.SetupMockAWS()
expectedFilenames := []string{
@ -434,7 +434,7 @@ func runTestPhase(t *testing.T, clusterName string, srcDir string, version strin
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.MockKopsVersion("1.15.0")
h.SetupMockAWS()
phaseName := string(phase)
if phaseName == "" {
@ -482,7 +482,7 @@ func runTestGCE(t *testing.T, clusterName string, srcDir string, version string,
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.MockKopsVersion("1.15.0")
h.SetupMockGCE()
expectedFilenames := []string{
@ -518,7 +518,7 @@ func runTestCloudformation(t *testing.T, clusterName string, srcDir string, vers
h := testutils.NewIntegrationTestHarness(t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.MockKopsVersion("1.15.0")
h.SetupMockAWS()
factory := util.NewFactory(factoryOptions)

View File

@ -269,7 +269,7 @@ func runLifecycleTestAWS(o *LifecycleTestOptions) {
h := testutils.NewIntegrationTestHarness(o.t)
defer h.Close()
h.MockKopsVersion("1.8.1")
h.MockKopsVersion("1.15.0")
cloud := h.SetupMockAWS()
var beforeIds []string

View File

@ -217,7 +217,7 @@ func RunReplace(f *util.Factory, cmd *cobra.Command, out io.Writer, c *replaceOp
}
default:
klog.V(2).Infof("Type of object was %T", v)
return fmt.Errorf("Unhandled kind %q in %q", gvk, f)
return fmt.Errorf("unhandled kind %q in %q", gvk, f)
}
}
}

View File

@ -30,11 +30,9 @@ import (
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/klog"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/instancegroups"
"k8s.io/kops/pkg/pretty"
"k8s.io/kops/pkg/validation"
@ -186,10 +184,8 @@ func NewCmdRollingUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringSliceVar(&options.InstanceGroups, "instance-group", options.InstanceGroups, "List of instance groups to update (defaults to all if not specified)")
cmd.Flags().StringSliceVar(&options.InstanceGroupRoles, "instance-group-roles", options.InstanceGroupRoles, "If specified, only instance groups of the specified role will be updated (e.g. Master,Node,Bastion)")
if featureflag.DrainAndValidateRollingUpdate.Enabled() {
cmd.Flags().BoolVar(&options.FailOnDrainError, "fail-on-drain-error", true, "The rolling-update will fail if draining a node fails.")
cmd.Flags().BoolVar(&options.FailOnValidate, "fail-on-validate-error", true, "The rolling-update will fail if the cluster fails to validate.")
}
cmd.Flags().BoolVar(&options.FailOnDrainError, "fail-on-drain-error", true, "The rolling-update will fail if draining a node fails.")
cmd.Flags().BoolVar(&options.FailOnValidate, "fail-on-validate-error", true, "The rolling-update will fail if the cluster fails to validate.")
cmd.Run = func(cmd *cobra.Command, args []string) {
err := rootCommand.ProcessArgs(args)
@ -389,13 +385,10 @@ func RunRollingUpdateCluster(f *util.Factory, out io.Writer, options *RollingUpd
}
var clusterValidator validation.ClusterValidator
if featureflag.DrainAndValidateRollingUpdate.Enabled() {
klog.V(2).Infof("Rolling update with drain and validate enabled.")
if !options.CloudOnly {
clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, k8sClient)
if err != nil {
return fmt.Errorf("cannot create cluster validator: %v", err)
}
if !options.CloudOnly {
clusterValidator, err = validation.NewClusterValidator(cluster, cloud, list, k8sClient)
if err != nil {
return fmt.Errorf("cannot create cluster validator: %v", err)
}
}
d := &instancegroups.RollingUpdateCluster{

View File

@ -221,7 +221,7 @@ func (c *RootCmd) ProcessArgs(args []string) error {
fmt.Printf("For example: use `--bastion=true` or `--bastion`, not `--bastion true`\n\n")
if len(args) == 1 {
return fmt.Errorf("Cannot specify cluster via --name and positional argument")
return fmt.Errorf("cannot specify cluster via --name and positional argument")
}
return fmt.Errorf("expected a single <clustername> to be passed as an argument")
}

View File

@ -80,10 +80,10 @@ func NewCmdToolboxBundle(f *util.Factory, out io.Writer) *cobra.Command {
func RunToolboxBundle(context Factory, out io.Writer, options *ToolboxBundleOptions, args []string) error {
if len(args) == 0 {
return fmt.Errorf("Specify name of instance group for node")
return fmt.Errorf("specify name of instance group for node")
}
if len(args) != 1 {
return fmt.Errorf("Can only specify one instance group")
return fmt.Errorf("can only specify one instance group")
}
if options.Target == "" {

View File

@ -132,18 +132,18 @@ func RunToolboxConvertImported(f *util.Factory, out io.Writer, options *ToolboxC
// TODO: Switch to cloudup.BuildCloud
if len(cluster.Spec.Subnets) == 0 {
return fmt.Errorf("Configuration must include Subnets")
return fmt.Errorf("configuration must include Subnets")
}
region := ""
for _, subnet := range cluster.Spec.Subnets {
if len(subnet.Name) <= 2 {
return fmt.Errorf("Invalid AWS zone: %q", subnet.Zone)
return fmt.Errorf("invalid AWS zone: %q", subnet.Zone)
}
zoneRegion := subnet.Zone[:len(subnet.Zone)-1]
if region != "" && zoneRegion != region {
return fmt.Errorf("Clusters cannot span multiple regions")
return fmt.Errorf("clusters cannot span multiple regions")
}
region = zoneRegion

View File

@ -142,6 +142,6 @@ func RunToolboxDump(f *util.Factory, out io.Writer, options *ToolboxDumpOptions)
return nil
default:
return fmt.Errorf("Unsupported output format: %q", options.Output)
return fmt.Errorf("unsupported output format: %q", options.Output)
}
}

View File

@ -135,7 +135,7 @@ func runToolBoxTemplate(f *util.Factory, out io.Writer, options *toolboxTemplate
templates = append(templates, list...)
}
snippets := make(map[string]string, 0)
snippets := make(map[string]string)
for _, x := range options.snippetsPath {
list, err := expandFiles(utils.ExpandPath(x))
if err != nil {
@ -212,7 +212,7 @@ func runToolBoxTemplate(f *util.Factory, out io.Writer, options *toolboxTemplate
// newTemplateContext is responsible for loading the --values and build a context for the template
func newTemplateContext(files []string, values []string, stringValues []string) (map[string]interface{}, error) {
context := make(map[string]interface{}, 0)
context := make(map[string]interface{})
for _, x := range files {
list, err := expandFiles(utils.ExpandPath(x))
@ -225,7 +225,7 @@ func newTemplateContext(files []string, values []string, stringValues []string)
return nil, fmt.Errorf("unable to configuration file: %s, error: %s", j, err)
}
ctx := make(map[string]interface{}, 0)
ctx := make(map[string]interface{})
if err := utils.YamlUnmarshal(content, &ctx); err != nil {
return nil, fmt.Errorf("unable decode the configuration file: %s, error: %v", j, err)
}

View File

@ -219,7 +219,7 @@ func RunUpdateCluster(f *util.Factory, clusterName string, out io.Writer, c *Upd
for _, override := range c.LifecycleOverrides {
values := strings.Split(override, "=")
if len(values) != 2 {
return results, fmt.Errorf("Incorrect syntax for lifecyle-overrides, correct syntax is TaskName=lifecycleName, override provided: %q", override)
return results, fmt.Errorf("incorrect syntax for lifecyle-overrides, correct syntax is TaskName=lifecycleName, override provided: %q", override)
}
taskName := values[0]

View File

@ -81,7 +81,7 @@ func (f *Factory) Clientset() (simple.Clientset, error) {
} else {
u, err := url.Parse(registryPath)
if err != nil {
return nil, fmt.Errorf("Invalid kops server url: %q", registryPath)
return nil, fmt.Errorf("invalid kops server url: %q", registryPath)
}
configOverrides.CurrentContext = u.Host
}

View File

@ -21,7 +21,6 @@ import (
"fmt"
"io"
"os"
"runtime"
"strings"
"time"
@ -40,13 +39,6 @@ import (
"k8s.io/kops/util/pkg/tables"
)
func init() {
if runtime.GOOS == "darwin" {
// In order for net.LookupHost(apiAddr.Host) to lookup our placeholder address on darwin, we have to
os.Setenv("GODEBUG", "netdns=go")
}
}
type ValidateClusterOptions struct {
output string
wait time.Duration
@ -136,12 +128,12 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
configLoadingRules,
&clientcmd.ConfigOverrides{CurrentContext: contextName}).ClientConfig()
if err != nil {
return nil, fmt.Errorf("Cannot load kubecfg settings for %q: %v", contextName, err)
return nil, fmt.Errorf("cannot load kubecfg settings for %q: %v", contextName, err)
}
k8sClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("Cannot build kubernetes api client for %q: %v", contextName, err)
return nil, fmt.Errorf("cannot build kubernetes api client for %q: %v", contextName, err)
}
timeout := time.Now().Add(options.wait)
@ -188,7 +180,7 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
}
default:
return nil, fmt.Errorf("Unknown output format: %q", options.output)
return nil, fmt.Errorf("unknown output format: %q", options.output)
}
if options.wait == 0 || len(result.Failures) == 0 {
@ -264,12 +256,10 @@ func validateClusterOutputTable(result *validation.ValidationCluster, cluster *a
if err := failuresTable.Render(result.Failures, out, "KIND", "NAME", "MESSAGE"); err != nil {
return fmt.Errorf("error rendering failures table: %v", err)
}
}
if len(result.Failures) == 0 {
fmt.Fprintf(out, "\nYour cluster %s is ready\n", cluster.Name)
fmt.Fprintf(out, "\nValidation Failed\n")
} else {
fmt.Fprint(out, "\nValidation Failed\n")
fmt.Fprintf(out, "\nYour cluster %s is ready\n", cluster.Name)
}
return nil

View File

@ -167,8 +167,7 @@ func (c *IngressController) updateIngressRecords(ingress *v1beta1.Ingress) strin
fqdn := dns.EnsureDotSuffix(rule.Host)
for _, ingress := range ingresses {
var r dns.Record
r = ingress
r := ingress
r.FQDN = fqdn
records = append(records, r)
}

View File

@ -203,8 +203,7 @@ func (c *ServiceController) updateServiceRecords(service *v1.Service) string {
fqdn := dns.EnsureDotSuffix(token)
for _, ingress := range ingresses {
var r dns.Record
r = ingress
r := ingress
r.FQDN = fqdn
records = append(records, r)
}

View File

@ -87,7 +87,7 @@ func InitDnsProvider(name string, configFilePath string) (Interface, error) {
var config *os.File
config, err = os.Open(configFilePath)
if err != nil {
return nil, fmt.Errorf("Couldn't open DNS provider configuration %s: %#v", configFilePath, err)
return nil, fmt.Errorf("couldn't open DNS provider configuration %s: %#v", configFilePath, err)
}
defer config.Close()

View File

@ -80,12 +80,12 @@ func (r *Route53APIStub) ChangeResourceRecordSets(input *route53.ChangeResourceR
switch *change.Action {
case route53.ChangeActionCreate:
if _, found := recordSets[key]; found {
return nil, fmt.Errorf("Attempt to create duplicate rrset %s", key) // TODO: Return AWS errors with codes etc
return nil, fmt.Errorf("attempt to create duplicate rrset %s", key) // TODO: Return AWS errors with codes etc
}
recordSets[key] = append(recordSets[key], change.ResourceRecordSet)
case route53.ChangeActionDelete:
if _, found := recordSets[key]; !found {
return nil, fmt.Errorf("Attempt to delete non-existent rrset %s", key) // TODO: Check other fields too
return nil, fmt.Errorf("attempt to delete non-existent rrset %s", key) // TODO: Check other fields too
}
delete(recordSets, key)
case route53.ChangeActionUpsert:
@ -110,7 +110,7 @@ func (r *Route53APIStub) CreateHostedZone(input *route53.CreateHostedZoneInput)
name := aws.StringValue(input.Name)
id := "/hostedzone/" + name
if _, ok := r.zones[id]; ok {
return nil, fmt.Errorf("Error creating hosted DNS zone: %s already exists", id)
return nil, fmt.Errorf("error creating hosted DNS zone: %s already exists", id)
}
r.zones[id] = &route53.HostedZone{
Id: aws.String(id),
@ -121,10 +121,10 @@ func (r *Route53APIStub) CreateHostedZone(input *route53.CreateHostedZoneInput)
func (r *Route53APIStub) DeleteHostedZone(input *route53.DeleteHostedZoneInput) (*route53.DeleteHostedZoneOutput, error) {
if _, ok := r.zones[*input.Id]; !ok {
return nil, fmt.Errorf("Error deleting hosted DNS zone: %s does not exist", *input.Id)
return nil, fmt.Errorf("error deleting hosted DNS zone: %s does not exist", *input.Id)
}
if len(r.recordSets[*input.Id]) > 0 {
return nil, fmt.Errorf("Error deleting hosted DNS zone: %s has resource records", *input.Id)
return nil, fmt.Errorf("error deleting hosted DNS zone: %s has resource records", *input.Id)
}
delete(r.zones, *input.Id)
return &route53.DeleteHostedZoneOutput{}, nil

View File

@ -68,7 +68,7 @@ func newCoreDNSProviderInterface(config io.Reader) (*Interface, error) {
klog.Infof("Using CoreDNS DNS provider")
if dnsZones == "" {
return nil, fmt.Errorf("Need to provide at least one DNS Zone")
return nil, fmt.Errorf("need to provide at least one DNS Zone")
}
etcdCfg := etcdc.Config{
@ -78,7 +78,7 @@ func newCoreDNSProviderInterface(config io.Reader) (*Interface, error) {
c, err := etcdc.New(etcdCfg)
if err != nil {
return nil, fmt.Errorf("Create etcd client from the config failed")
return nil, fmt.Errorf("create etcd client from the config failed")
}
etcdKeysAPI := etcdc.NewKeysAPI(c)

View File

@ -97,7 +97,7 @@ func (c *ResourceRecordChangeset) Apply() error {
if checkNotExists {
response, err := c.zone.zones.intf.etcdKeysAPI.Get(ctx, dnsmsg.Path(recordKey, etcdPathPrefix), getOpts)
if err == nil && response != nil {
return fmt.Errorf("Key already exist, key: %v", recordKey)
return fmt.Errorf("key already exist, key: %v", recordKey)
}
}

View File

@ -52,7 +52,7 @@ func (rrsets ResourceRecordSets) Get(name string) ([]dnsprovider.ResourceRecordS
klog.V(2).Infof("Subdomain %q does not exist", name)
return nil, nil
}
return nil, fmt.Errorf("Failed to get service from etcd, err: %v", err)
return nil, fmt.Errorf("failed to get service from etcd, err: %v", err)
}
if emptyResponse(response) {
klog.V(2).Infof("Subdomain %q does not exist in etcd", name)
@ -65,7 +65,7 @@ func (rrsets ResourceRecordSets) Get(name string) ([]dnsprovider.ResourceRecordS
service := dnsmsg.Service{}
err = json.Unmarshal([]byte(node.Value), &service)
if err != nil {
return nil, fmt.Errorf("Failed to unmarshall json data, err: %v", err)
return nil, fmt.Errorf("failed to unmarshall json data, err: %v", err)
}
rrset := ResourceRecordSet{name: name, rrdatas: []string{}, rrsets: &rrsets}

View File

@ -49,13 +49,13 @@ func (c ChangesCreateCall) Do(opts ...googleapi.CallOption) (interfaces.Change,
}
for _, del := range c.Change.Deletions() {
if _, found := rrsets[hashKey(del)]; !found {
return nil, fmt.Errorf("Attempt to delete non-existent rrset %v", del)
return nil, fmt.Errorf("attempt to delete non-existent rrset %v", del)
}
delete(rrsets, hashKey(del))
}
for _, add := range c.Change.Additions() {
if _, found := rrsets[hashKey(add)]; found {
return nil, fmt.Errorf("Attempt to insert duplicate rrset %v", add)
return nil, fmt.Errorf("attempt to insert duplicate rrset %v", add)
}
rrsets[hashKey(add)] = add.(ResourceRecordSet)
}

View File

@ -38,7 +38,7 @@ func (call ManagedZonesCreateCall) Do(opts ...googleapi.CallOption) (interfaces.
return nil, *call.Error
}
if call.Service.Impl[call.Project][call.ManagedZone.DnsName()] != nil {
return nil, fmt.Errorf("Error - attempt to create duplicate zone %s in project %s.",
return nil, fmt.Errorf("error - attempt to create duplicate zone %s in project %s",
call.ManagedZone.DnsName(), call.Project)
}
if call.Service.Impl == nil {

View File

@ -45,7 +45,7 @@ func (call ManagedZonesDeleteCall) Do(opts ...googleapi.CallOption) error {
delete(project, zone.Name())
return nil
}
return fmt.Errorf("Failed to find zone %s in project %s to delete it", call.ZoneName, call.Project)
return fmt.Errorf("failed to find zone %s in project %s to delete it", call.ZoneName, call.Project)
}
return fmt.Errorf("Failed to find project %s to delete zone %s from it", call.Project, call.ZoneName)
return fmt.Errorf("failed to find project %s to delete zone %s from it", call.Project, call.ZoneName)
}

View File

@ -40,7 +40,7 @@ func (call *ManagedZonesListCall) Do(opts ...googleapi.CallOption) (interfaces.M
}
proj, projectFound := call.Service.Impl[call.Project]
if !projectFound {
return nil, fmt.Errorf("Project %s not found.", call.Project)
return nil, fmt.Errorf("project %s not found", call.Project)
}
if call.DnsName_ != "" {
return &ManagedZonesListResponse{[]interfaces.ManagedZone{proj[call.DnsName_]}}, nil

View File

@ -38,7 +38,7 @@ func (s ResourceRecordSetsService) managedZone(project, managedZone string) (*Ma
}
z := s.Service.ManagedZones_.Impl[project][managedZone]
if z == nil {
return nil, fmt.Errorf("Zone %s not found in project %s", managedZone, project)
return nil, fmt.Errorf("zone %s not found in project %s", managedZone, project)
}
return z.(*ManagedZone), nil
}

View File

@ -104,11 +104,11 @@ func (c *ResourceRecordChangeset) Apply() error {
}
newAdditions := newChange.Additions()
if len(newAdditions) != len(additions) {
return fmt.Errorf("Internal error when adding resource record set. Call succeeded but number of records returned is incorrect. Records sent=%d, records returned=%d, additions:%v", len(additions), len(newAdditions), c.additions)
return fmt.Errorf("internal error when adding resource record set. Call succeeded but number of records returned is incorrect. Records sent=%d, records returned=%d, additions:%v", len(additions), len(newAdditions), c.additions)
}
newDeletions := newChange.Deletions()
if len(newDeletions) != len(deletions) {
return fmt.Errorf("Internal error when deleting resource record set. Call succeeded but number of records returned is incorrect. Records sent=%d, records returned=%d, deletions:%v", len(deletions), len(newDeletions), c.removals)
return fmt.Errorf("internal error when deleting resource record set. Call succeeded but number of records returned is incorrect. Records sent=%d, records returned=%d, deletions:%v", len(deletions), len(newDeletions), c.removals)
}
return nil

View File

@ -249,7 +249,7 @@ for asset in "${KOPS_ASSETS[@]}"; do
mkdir -p "$dir"
url="https://kubeupv2.s3.amazonaws.com/kops/$KOPS_VERSION/$asset"
wget -P "$dir" "$url"
wget -P "$dir" "$url.sha1"
wget -P "$dir" "$url.sha256"
done
## Upload assets

View File

@ -1,8 +1,26 @@
# Calico
[Calico](https://docs.projectcalico.org/latest/introduction/) is an open source networking and
network security solution for containers, virtual machines, and native host-based workloads.
Calico combines flexible networking capabilities with run-anywhere security enforcement to provide
a solution with native Linux kernel performance and true cloud-native scalability. Calico provides
developers and cluster operators with a consistent experience and set of capabilities whether
running in public cloud or on-prem, on a single node or across a multi-thousand node cluster.
See [Calico for networking and network policy](networking.md#calico-example-for-cni-and-network-policy) for help configuring kops with Calico.
For more general information on options available with Calico see the official [Calico docs](https://docs.projectcalico.org/latest/introduction/):
* See [Calico Network Policy](https://docs.projectcalico.org/latest/security/calico-network-policy)
for details on the additional features not available with Kubernetes Network Policy.
* See [Determining best Calico networking option](https://docs.projectcalico.org/latest/networking/determine-best-networking)
for help with the network options available with Calico.
# Calico Version 3
In early 2018 Version 3 of Calico was released, it included a reworked data
model and with that a switch from the etcd v2 to v3 API. This document covers
model and with that a switch from the etcd v2 to v3 API. This section covers
the requirements, upgrade process, and configuration to install
Calico Version 3.
Calico Version 3. By default new Kops installations configured to use Calico
will install v3.
## Requirements
@ -45,7 +63,7 @@ a v3 version. Feel free to change to a different v3 version of etcd.
## Upgrading an existing cluster
Assuming your cluster meets the requirements it is possible to upgrade
your Calico Kops cluster.
your Calico v2 Kops cluster to Calico v3.
A few notes about the upgrade:

View File

@ -870,3 +870,28 @@ spec:
assets:
containerProxy: proxy.example.com
```
### Setting Custom Kernel Runtime Parameters
To add custom kernel runtime parameters to your all instance groups in the
cluster, specify the `sysctlParameters` field as an array of strings. Each
string must take the form of `variable=value` the way it would appear in
sysctl.conf (see also `sysctl(8)` manpage).
You could also use the `sysctlParameters` field on [the instance group](https://github.com/kubernetes/kops/blob/master/docs/instance_groups.md#setting-custom-kernel-runtime-parameters) to specify different parameters for each instance group.
Unlike a simple file asset, specifying kernel runtime parameters in this manner
would correctly invoke `sysctl --system` automatically for you to apply said
parameters.
For example:
```yaml
spec:
sysctlParameters:
- fs.pipe-user-pages-soft=524288
- net.ipv4.tcp_keepalive_time=200
```
which would end up in a drop-in file on all masters and nodes of the cluster.

View File

@ -4,6 +4,6 @@ Upgrading and modifying a k8s cluster often requires the replacement of nodes.
When starting the rolling update, kops will check each instance in the instance group if it needs to be updated, so when you just update nodes, the master will not be updated. When your rolling update is interrupted and you run another rolling update, instances that have been updated before will not be updated again.
![Rolling Update Diagram](/development/images/rolling-update.png?raw=true "Rolling Updates Diagram")
![Rolling Update Diagram](images/rolling-update.png?raw=true "Rolling Updates Diagram")
`kops` executes steps 2-4 for all the masters until the masters are replaced. Then the same process is followed to replaces all nodes.

View File

@ -4,7 +4,7 @@ HTTP Forward Proxy Support
It is possible to launch a Kubernetes cluster from behind an http forward proxy ("corporate proxy"). To do so, you will need to configure the `egressProxy` for the cluster.
It is assumed the proxy is already existing. If you want a private topology on AWS, for example, with an proxy instead of a NAT instance, you'll need to create the proxy yourself. See [Running in a shared VPC](run_in_existing_vpc.md).
It is assumed the proxy is already existing. If you want a private topology on AWS, for example, with a proxy instead of a NAT instance, you'll need to create the proxy yourself. See [Running in a shared VPC](run_in_existing_vpc.md).
This configuration only manages proxy configurations for Kops and the Kubernetes cluster. We can not handle proxy configuration for application containers and pods.

View File

@ -574,3 +574,31 @@ spec:
```
If `openstack.kops.io/osVolumeSize` is not set it will default to the minimum disk specified by the image.
## Setting Custom Kernel Runtime Parameters
To add custom kernel runtime parameters to your instance group, specify the
`sysctlParameters` field as an array of strings. Each string must take the form
of `variable=value` the way it would appear in sysctl.conf (see also
`sysctl(8)` manpage).
Unlike a simple file asset, specifying kernel runtime parameters in this manner
would correctly invoke `sysctl --system` automatically for you to apply said
parameters.
For example:
```yaml
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
name: nodes
spec:
sysctlParameters:
- fs.pipe-user-pages-soft=524288
- net.ipv4.tcp_keepalive_time=200
```
which would end up in a drop-in file on nodes of the instance group in question.

View File

@ -34,7 +34,7 @@ has built in support for CNI networking components.
Several different CNI providers are currently built into kops:
* [Calico](https://docs.projectcalico.org/v3.1/getting-started/kubernetes/installation/calico#installing-with-the-etcd-datastore)
* [Calico](https://docs.projectcalico.org/latest/introduction) - use `--networking calico` (See [below](#calico-example-for-cni-and-network-policy) for additional configuration options.)
* [Canal (Flannel + Calico)](https://github.com/projectcalico/canal)
* [flannel](https://github.com/coreos/flannel) - use `--networking flannel-vxlan` (recommended) or `--networking flannel-udp` (legacy). `--networking flannel` now selects `flannel-vxlan`.
* [kopeio-vxlan](https://github.com/kopeio/networking)
@ -161,15 +161,13 @@ The above will deploy a daemonset installation which requires K8s 1.4.x or above
##### Enable Cross-Subnet mode in Calico (AWS only)
Calico [since 2.1] supports a new option for IP-in-IP mode where traffic is only encapsulated
Calico [since 2.1](https://www.projectcalico.org/project-calico-2-1-released/) supports a new option for IP-in-IP mode where traffic is only encapsulated
when its destined to subnets with intermediate infrastructure lacking Calico route awareness
for example, across heterogeneous public clouds or on AWS where traffic is crossing availability zones/ regions.
With this mode, IP-in-IP encapsulation is only performed selectively. This provides better performance in AWS
multi-AZ deployments, and in general when deploying on networks where pools of nodes with L2 connectivity
are connected via a router.
Reference: [Calico 2.1 Release Notes](https://www.projectcalico.org/project-calico-2-1-released/)
With this mode, IP-in-IP encapsulation is only [performed selectively](https://docs.projectcalico.org/v3.10/networking/vxlan-ipip#configure-ip-in-ip-encapsulation-for-only-cross-subnet-traffic).
This provides better performance in AWS multi-AZ deployments, and in general when deploying on networks where
pools of nodes with L2 connectivity are connected via a router.
Note that Calico by default, routes between nodes within a subnet are distributed using a full node-to-node BGP mesh.
Each node automatically sets up a BGP peering with every other node within the same L2 network.
@ -210,7 +208,7 @@ Only the masters have the IAM policy (`ec2:*`) to allow k8s-ec2-srcdst to execut
For Calico specific documentation please visit the [Calico Docs](http://docs.projectcalico.org/latest/getting-started/kubernetes/).
For details on upgrading a Calico v2 deployment see [Calico Version 3](calico-v3.md).
For details on upgrading a Calico v2 deployment see [Calico Version 3](calico-v3.md#upgrading-an-existing-cluster).
#### Getting help with Calico
@ -225,7 +223,7 @@ In kops 1.12.0 and later Calico uses the k8s APIServer as its datastore.
In versions <1.12.0 of kops Calico uses etcd as a backend for storing information about workloads and policies. Calico does not interfere with normal etcd operations and does not require special handling when upgrading etcd. For more information please visit the [etcd Docs](https://coreos.com/etcd/docs/latest/)
#### Configuraing Calico to use Typha
#### Configuring Calico to use Typha
As of Kops 1.12 Calico uses the kube-apiserver as its datastore. The default setup does not make use of [Typha](https://github.com/projectcalico/typha) - a component intended to lower the impact of Calico on the k8s APIServer which is recommended in [clusters over 50 nodes](https://docs.projectcalico.org/latest/getting-started/kubernetes/installation/calico#installing-with-the-kubernetes-api-datastoremore-than-50-nodes) and is strongly recommended in clusters of 100+ nodes.
It is possible to configure Calico to use Typha by editing a cluster and adding a
@ -474,7 +472,7 @@ For support with Cilium Network Policies you can reach out on Slack or Github:
### Flannel Example for CNI
#### Configuraing Flannel iptables resync period
#### Configuring Flannel iptables resync period
As of Kops 1.12.0, Flannel iptables resync option is configurable via editing a cluster and adding
`iptablesResyncSeconds` option to spec:

View File

@ -42,7 +42,7 @@ spec:
manifest: bar.addons.org.io/v0.0.1.yaml
```
In this this example the folder structure should look like this;
In this example the folder structure should look like this;
```
addon.yaml

2
go.mod
View File

@ -134,7 +134,7 @@ require (
k8s.io/klog v0.3.1
k8s.io/kubernetes v1.15.3
k8s.io/legacy-cloud-providers v0.0.0
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5
k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6
sigs.k8s.io/controller-runtime v0.2.2
sigs.k8s.io/controller-tools v0.2.2-0.20190919191502-76a25b63325a
sigs.k8s.io/yaml v1.1.0

2
go.sum
View File

@ -662,6 +662,8 @@ k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y=
k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM=
k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=

View File

@ -155,6 +155,7 @@ k8s.io/kops/protokube/pkg/gossip/gce
k8s.io/kops/protokube/pkg/gossip/memberlist
k8s.io/kops/protokube/pkg/gossip/mesh
k8s.io/kops/protokube/pkg/gossip/openstack
k8s.io/kops/protokube/pkg/hostmount
k8s.io/kops/protokube/pkg/protokube
k8s.io/kops/protokube/tests/integration/build_etcd_manifest
k8s.io/kops/tests

View File

@ -1,8 +1,2 @@
cloudmock/aws/mockelbv2
node-authorizer/pkg/authorizers/aws
node-authorizer/pkg/server
pkg/resources/openstack
upup/pkg/fi
upup/pkg/fi/cloudup
upup/pkg/fi/cloudup/awstasks
upup/pkg/kutil

View File

@ -48,8 +48,12 @@ FOCUS="${FOCUS%/}"
# See https://staticcheck.io/docs/checks
CHECKS=(
"all"
"-S1*" # Omit code simplifications for now.
"-ST1*" # Mostly stylistic, redundant w/ golint
"-ST1000" # Incorrect or missing package comment
"-ST1003" # Poorly chosen identifier
"-ST1005" # Incorrectly formatted error string
"-ST1006" # Poorly chosen receiver name
"-ST1012" # Poorly chosen name for error variable
"-ST1016" # Use consistent method receiver names
)
export IFS=','; checks="${CHECKS[*]}"; unset IFS

View File

@ -5,17 +5,6 @@ load(
"container_bundle",
"container_image",
)
container_image(
name = "dns-controller",
base = "@debian_hyperkube_base_amd64//image",
cmd = ["/usr/bin/dns-controller"],
directory = "/usr/bin/",
files = [
"//dns-controller/cmd/dns-controller",
],
)
load("@package_bundle//file:packages.bzl", "packages")
container_image(

View File

@ -987,6 +987,10 @@ spec:
in the 'kube-system' namespace to be used for TLS bootstrapping
authentication
type: boolean
encryptionProviderConfig:
description: EncryptionProviderConfig enables encryption at rest
for secrets.
type: string
etcdCaFile:
description: EtcdCAFile is the path to a ca certificate
type: string
@ -2769,6 +2773,21 @@ spec:
connLimit:
format: int32
type: integer
cpuLimit:
description: CPULimit CPU limit of weave container.
type: string
cpuRequest:
description: CPURequest CPU request of weave container. Default
50m
type: string
memoryLimit:
description: MemoryLimit memory limit of weave container. Default
200Mi
type: string
memoryRequest:
description: MemoryRequest memory request of weave container.
Default 200Mi
type: string
mtu:
format: int32
type: integer
@ -2777,6 +2796,21 @@ spec:
noMasqLocal:
format: int32
type: integer
npcCPULimit:
description: NPCCPULimit CPU limit of weave npc container
type: string
npcCPURequest:
description: NPCCPURequest CPU request of weave npc container.
Default 50m
type: string
npcMemoryLimit:
description: NPCMemoryLimit memory limit of weave npc container.
Default 200Mi
type: string
npcMemoryRequest:
description: NPCMemoryRequest memory request of weave npc container.
Default 200Mi
type: string
type: object
type: object
nodeAuthorization:
@ -2886,6 +2920,13 @@ spec:
type: string
type: object
type: array
sysctlParameters:
description: SysctlParameters will configure kernel parameters using
sysctl(8). When specified, each parameter must follow the form variable=value,
the way it would appear in sysctl.conf.
items:
type: string
type: array
target:
description: Target allows for us to nest extra config for targets such
as terraform

View File

@ -664,6 +664,13 @@ spec:
items:
type: string
type: array
sysctlParameters:
description: SysctlParameters will configure kernel parameters using
sysctl(8). When specified, each parameter must follow the form variable=value,
the way it would appear in sysctl.conf.
items:
type: string
type: array
taints:
description: Taints indicates the kubernetes taints for nodes in this
group

View File

@ -19,9 +19,6 @@ package aws
import (
"context"
"encoding/json"
"net"
"net/http"
"time"
"k8s.io/kops/node-authorizer/pkg/server"
@ -29,17 +26,6 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
)
// hc is the http client
var hc = &http.Client{
Timeout: 10 * time.Second,
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 10 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
},
}
type awsNodeVerifier struct{}
// NewVerifier creates and returns a verifier

View File

@ -76,7 +76,8 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/client-go/util/cert:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/kubernetes/pkg/util/mount:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/mount:go_default_library",
],
)

View File

@ -114,7 +114,7 @@ var containerdVersions = []packageVersion{
Hash: "7de4211fa0dfd240d8827b93763e1eb5f0d56411",
},
},
Dependencies: []string{"libseccomp", "pigz", "policycoreutils-python"},
Dependencies: []string{"libseccomp", "policycoreutils-python"},
},
// 1.2.10 - CentOS / Rhel 8

View File

@ -85,11 +85,13 @@ func TestContainerdPackageHashes(t *testing.T) {
}
for _, containerdVersion := range containerdVersions {
verifyContainerdPackageHash(t, containerdVersion.Source, containerdVersion.Hash)
t.Run(containerdVersion.Source, func(t *testing.T) {
verifyContainerdPackageHash(t, containerdVersion.Source, containerdVersion.Hash)
for _, p := range containerdVersion.ExtraPackages {
verifyContainerdPackageHash(t, p.Source, p.Hash)
}
for _, p := range containerdVersion.ExtraPackages {
verifyContainerdPackageHash(t, p.Source, p.Hash)
}
})
}
}

View File

@ -32,7 +32,7 @@ import (
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/utils/mount"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
@ -567,11 +567,11 @@ func EvaluateHostnameOverride(hostnameOverride string) (string, error) {
}
if len(result.Reservations) != 1 {
return "", fmt.Errorf("Too many reservations returned for the single instance-id")
return "", fmt.Errorf("too many reservations returned for the single instance-id")
}
if len(result.Reservations[0].Instances) != 1 {
return "", fmt.Errorf("Too many instances returned for the single instance-id")
return "", fmt.Errorf("too many instances returned for the single instance-id")
}
return *(result.Reservations[0].Instances[0].PrivateDnsName), nil
}

View File

@ -17,6 +17,8 @@ limitations under the License.
package model
import (
"path/filepath"
"k8s.io/kops/nodeup/pkg/distros"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
@ -46,5 +48,70 @@ func (b *DirectoryBuilder) Build(c *fi.ModelBuilderContext) error {
})
}
// We try to put things into /opt/kops
// On some OSes though, /opt/ is not writeable, and we can't even create the mountpoint
if b.Distribution == distros.DistributionContainerOS {
src := "/mnt/stateful_partition/opt/"
c.AddTask(&nodetasks.File{
Path: src,
Type: nodetasks.FileType_Directory,
Mode: s("0755"),
})
// Rebuild things we are masking
c.AddTask(&nodetasks.File{
Path: filepath.Join(src, "google"),
Type: nodetasks.FileType_Directory,
Mode: s("0755"),
})
c.AddTask(&nodetasks.File{
Path: filepath.Join(src, "google", "crash-reporter"),
Type: nodetasks.FileType_Directory,
Mode: s("0755"),
})
c.AddTask(&nodetasks.File{
Path: filepath.Join(src, "google", "crash-reporter", "filter"),
Type: nodetasks.FileType_File,
Mode: s("0755"),
Contents: fi.NewStringResource(cosCrashFilter),
})
// Precreate the directory that will be /opt/kops, so we can bind remount it
c.AddTask(&nodetasks.File{
Path: filepath.Join(src, "kops"),
Type: nodetasks.FileType_Directory,
Mode: s("0755"),
})
c.AddTask(&nodetasks.File{
Path: filepath.Join(src, "kops", "bin"),
Type: nodetasks.FileType_Directory,
Mode: s("0755"),
})
c.AddTask(&nodetasks.BindMount{
Source: src,
Mountpoint: "/opt",
Options: []string{"ro"},
})
c.AddTask(&nodetasks.BindMount{
Source: filepath.Join(src, "kops", "bin"),
Mountpoint: "/opt/kops/bin",
Options: []string{"exec", "nosuid", "nodev"},
})
}
return nil
}
// cosCrashFilter is used on COS to prevent userspace crash-reporting
// This is the one thing we need from /opt
const cosCrashFilter = `#!/bin/bash
# Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Do no collect any userland crash.
exit 1
`

View File

@ -85,11 +85,13 @@ func TestDockerPackageHashes(t *testing.T) {
}
for _, dockerVersion := range dockerVersions {
verifyPackageHash(t, dockerVersion.Source, dockerVersion.Hash)
t.Run(dockerVersion.Source, func(t *testing.T) {
verifyPackageHash(t, dockerVersion.Source, dockerVersion.Hash)
for _, p := range dockerVersion.ExtraPackages {
verifyPackageHash(t, p.Source, p.Hash)
}
for _, p := range dockerVersion.ExtraPackages {
verifyPackageHash(t, p.Source, p.Hash)
}
})
}
}

View File

@ -83,7 +83,7 @@ func (f *FileAssetsBuilder) buildFileAssets(c *fi.ModelBuilderContext, assets []
if asset.IsBase64 {
decoded, err := base64.RawStdEncoding.DecodeString(content)
if err != nil {
return fmt.Errorf("Failed on file asset: %s is invalid, unable to decode base64, error: %q", asset.Name, err)
return fmt.Errorf("failed on file asset: %s is invalid, unable to decode base64, error: %q", asset.Name, err)
}
content = string(decoded)
}

View File

@ -46,7 +46,7 @@ func (b *FirewallBuilder) buildSystemdService() *nodetasks.Service {
manifest.Set("Unit", "Before", "network.target")
manifest.Set("Service", "Type", "oneshot")
manifest.Set("Service", "RemainAfterExit", "yes")
manifest.Set("Service", "ExecStart", "/opt/kops/helpers/iptables-setup")
manifest.Set("Service", "ExecStart", "/opt/kops/bin/iptables-setup")
manifest.Set("Install", "WantedBy", "basic.target")
manifestString := manifest.Render()
@ -87,7 +87,7 @@ iptables -A FORWARD -w -p ICMP -j ACCEPT
fi
`
return &nodetasks.File{
Path: "/opt/kops/helpers/iptables-setup",
Path: "/opt/kops/bin/iptables-setup",
Contents: fi.NewStringResource(script),
Type: nodetasks.FileType_File,
Mode: s("0755"),

View File

@ -59,13 +59,20 @@ func (b *KubeAPIServerBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Cluster.Spec.EncryptionConfig != nil {
if *b.Cluster.Spec.EncryptionConfig && b.IsKubernetesGTE("1.7") {
b.Cluster.Spec.KubeAPIServer.ExperimentalEncryptionProviderConfig = fi.String(filepath.Join(b.PathSrvKubernetes(), "encryptionconfig.yaml"))
encryptionConfigPath := fi.String(filepath.Join(b.PathSrvKubernetes(), "encryptionconfig.yaml"))
if b.IsKubernetesGTE("1.13") {
b.Cluster.Spec.KubeAPIServer.EncryptionProviderConfig = encryptionConfigPath
} else {
b.Cluster.Spec.KubeAPIServer.ExperimentalEncryptionProviderConfig = encryptionConfigPath
}
key := "encryptionconfig"
encryptioncfg, _ := b.SecretStore.Secret(key)
if encryptioncfg != nil {
contents := string(encryptioncfg.Data)
t := &nodetasks.File{
Path: *b.Cluster.Spec.KubeAPIServer.ExperimentalEncryptionProviderConfig,
Path: *encryptionConfigPath,
Contents: fi.NewStringResource(contents),
Mode: fi.String("600"),
Type: nodetasks.FileType_File,
@ -275,7 +282,7 @@ func (b *KubeAPIServerBuilder) writeAuthenticationConfig(c *fi.ModelBuilderConte
return nil
}
return fmt.Errorf("Unrecognized authentication config %v", b.Cluster.Spec.Authentication)
return fmt.Errorf("unrecognized authentication config %v", b.Cluster.Spec.Authentication)
}
// buildPod is responsible for generating the kube-apiserver pod and thus manifest file

View File

@ -128,6 +128,12 @@ func Test_KubeAPIServer_BuildFlags(t *testing.T) {
},
"--experimental-encryption-provider-config=/srv/kubernetes/encryptionconfig.yaml --insecure-port=0 --secure-port=0",
},
{
kops.KubeAPIServerConfig{
EncryptionProviderConfig: fi.String("/srv/kubernetes/encryptionconfig.yaml"),
},
"--encryption-provider-config=/srv/kubernetes/encryptionconfig.yaml --insecure-port=0 --secure-port=0",
},
{
kops.KubeAPIServerConfig{
TargetRamMb: 320,

View File

@ -129,7 +129,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
cpuRequest, err := resource.ParseQuantity(c.CPURequest)
if err != nil {
return nil, fmt.Errorf("Error parsing CPURequest=%q", c.CPURequest)
return nil, fmt.Errorf("error parsing CPURequest=%q", c.CPURequest)
}
resourceRequests["cpu"] = cpuRequest
@ -137,7 +137,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
if c.CPULimit != "" {
cpuLimit, err := resource.ParseQuantity(c.CPULimit)
if err != nil {
return nil, fmt.Errorf("Error parsing CPULimit=%q", c.CPULimit)
return nil, fmt.Errorf("error parsing CPULimit=%q", c.CPULimit)
}
resourceLimits["cpu"] = cpuLimit
}
@ -145,7 +145,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
if c.MemoryRequest != "" {
memoryRequest, err := resource.ParseQuantity(c.MemoryRequest)
if err != nil {
return nil, fmt.Errorf("Error parsing MemoryRequest=%q", c.MemoryRequest)
return nil, fmt.Errorf("error parsing MemoryRequest=%q", c.MemoryRequest)
}
resourceRequests["memory"] = memoryRequest
}
@ -153,7 +153,7 @@ func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
if c.MemoryLimit != "" {
memoryLimit, err := resource.ParseQuantity(c.MemoryLimit)
if err != nil {
return nil, fmt.Errorf("Error parsing MemoryLimit=%q", c.MemoryLimit)
return nil, fmt.Errorf("error parsing MemoryLimit=%q", c.MemoryLimit)
}
resourceLimits["memory"] = memoryLimit
}

View File

@ -270,7 +270,14 @@ func (b *KubeletBuilder) buildSystemdService() *nodetasks.Service {
manifest := &systemd.Manifest{}
manifest.Set("Unit", "Description", "Kubernetes Kubelet Server")
manifest.Set("Unit", "Documentation", "https://github.com/kubernetes/kubernetes")
manifest.Set("Unit", "After", "docker.service")
switch b.Cluster.Spec.ContainerRuntime {
case "docker":
manifest.Set("Unit", "After", "docker.service")
case "containerd":
manifest.Set("Unit", "After", "containerd.service")
default:
klog.Warningf("unknown container runtime %q", b.Cluster.Spec.ContainerRuntime)
}
if b.Distribution == distros.DistributionCoreOS {
// We add /opt/kubernetes/bin for our utilities (socat, conntrack)

View File

@ -70,6 +70,7 @@ func (b *LogrotateBuilder) Build(c *fi.ModelBuilderContext) error {
b.addLogRotate(c, "kubelet", "/var/log/kubelet.log", logRotateOptions{})
b.addLogRotate(c, "etcd", "/var/log/etcd.log", logRotateOptions{})
b.addLogRotate(c, "etcd-events", "/var/log/etcd-events.log", logRotateOptions{})
b.addLogRotate(c, "kops-controller", "/var/log/kops-controller.log", logRotateOptions{})
if err := b.addLogrotateService(c); err != nil {
return err

View File

@ -52,11 +52,14 @@ func (b *MiscUtilsBuilder) Build(c *fi.ModelBuilderContext) error {
if b.Distribution.IsDebianFamily() {
packages = append(packages, "socat")
packages = append(packages, "curl")
packages = append(packages, "wget")
packages = append(packages, "nfs-common")
packages = append(packages, "python-apt")
packages = append(packages, "apt-transport-https")
} else if b.Distribution.IsRHELFamily() {
packages = append(packages, "curl")
packages = append(packages, "wget")
packages = append(packages, "nfs-utils")
packages = append(packages, "python2")
packages = append(packages, "git")
} else {

View File

@ -18,6 +18,7 @@ package model
import (
"fmt"
"os"
"path/filepath"
"k8s.io/kops/upup/pkg/fi"
@ -71,7 +72,21 @@ func (b *NetworkBuilder) Build(c *fi.ModelBuilderContext) error {
}
if networking.Cilium != nil {
unit := s(`
// systemd v238 includes the bpffs mount by default; and gives an error "has a bad unit file setting" if we try to mount it again (see mount_point_is_api)
var alreadyMounted bool
_, err := os.Stat("/sys/fs/bpf")
if err != nil {
if os.IsNotExist(err) {
alreadyMounted = false
} else {
return fmt.Errorf("error checking for /sys/fs/bpf: %v", err)
}
} else {
alreadyMounted = true
}
if !alreadyMounted {
unit := s(`
[Unit]
Description=Cilium BPF mounts
Documentation=http://docs.cilium.io/
@ -84,15 +99,16 @@ Where=/sys/fs/bpf
Type=bpf
[Install]
WantedBy=multi-user.target
WantedBy=multi-user.target
`)
service := &nodetasks.Service{
Name: "sys-fs-bpf.mount",
Definition: unit,
service := &nodetasks.Service{
Name: "sys-fs-bpf.mount",
Definition: unit,
}
service.InitDefaults()
c.AddTask(service)
}
service.InitDefaults()
c.AddTask(service)
}
return nil

View File

@ -77,8 +77,15 @@ func (b *NodeAuthorizationBuilder) Build(c *fi.ModelBuilderContext) error {
man := &systemd.Manifest{}
man.Set("Unit", "Description", "Node Authorization Client")
man.Set("Unit", "Documentation", "https://github.com/kubernetes/kops")
man.Set("Unit", "After", "docker.service")
man.Set("Unit", "Before", "kubelet.service")
switch b.Cluster.Spec.ContainerRuntime {
case "docker":
man.Set("Unit", "After", "docker.service")
case "containerd":
man.Set("Unit", "After", "containerd.service")
default:
klog.Warningf("unknown container runtime %q", b.Cluster.Spec.ContainerRuntime)
}
clientCert := filepath.Join(b.PathSrvKubernetes(), authorizerDir, "tls.pem")
man.Set("Service", "Type", "oneshot")

View File

@ -55,6 +55,15 @@ func (t *ProtokubeBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
if protokubeImage := t.NodeupConfig.ProtokubeImage; protokubeImage != nil {
c.AddTask(&nodetasks.LoadImageTask{
Name: "protokube",
Sources: protokubeImage.Sources,
Hash: protokubeImage.Hash,
Runtime: t.Cluster.Spec.ContainerRuntime,
})
}
if t.IsMaster {
kubeconfig, err := t.BuildPKIKubeconfig("kops")
if err != nil {
@ -184,7 +193,7 @@ func (t *ProtokubeBuilder) ProtokubeImagePullCommand() (string, error) {
if t.Cluster.Spec.ContainerRuntime == "docker" {
protokubeImagePullCommand = "-/usr/bin/docker pull " + sources[0]
} else if t.Cluster.Spec.ContainerRuntime == "containerd" {
protokubeImagePullCommand = "-/usr/bin/ctr --namespace k8s.io images pull docker.io/" + sources[0]
protokubeImagePullCommand = "-/usr/bin/ctr images pull docker.io/" + sources[0]
} else {
return "", fmt.Errorf("unable to create protokube image pull command for unsupported runtime %q", t.Cluster.Spec.ContainerRuntime)
}
@ -197,7 +206,7 @@ func (t *ProtokubeBuilder) ProtokubeContainerStopCommand() (string, error) {
if t.Cluster.Spec.ContainerRuntime == "docker" {
containerStopCommand = "-/usr/bin/docker stop protokube"
} else if t.Cluster.Spec.ContainerRuntime == "containerd" {
containerStopCommand = "-/usr/bin/ctr --namespace k8s.io task pause protokube"
containerStopCommand = "/bin/true"
} else {
return "", fmt.Errorf("unable to create protokube stop command for unsupported runtime %q", t.Cluster.Spec.ContainerRuntime)
}
@ -210,7 +219,7 @@ func (t *ProtokubeBuilder) ProtokubeContainerRemoveCommand() (string, error) {
if t.Cluster.Spec.ContainerRuntime == "docker" {
containerRemoveCommand = "-/usr/bin/docker rm protokube"
} else if t.Cluster.Spec.ContainerRuntime == "containerd" {
containerRemoveCommand = "-/usr/bin/ctr --namespace k8s.io container rm protokube"
containerRemoveCommand = "-/usr/bin/ctr container rm protokube"
} else {
return "", fmt.Errorf("unable to create protokube remove command for unsupported runtime %q", t.Cluster.Spec.ContainerRuntime)
}
@ -263,7 +272,7 @@ func (t *ProtokubeBuilder) ProtokubeContainerRunCommand() (string, error) {
} else if t.Cluster.Spec.ContainerRuntime == "containerd" {
containerRunArgs = append(containerRunArgs, []string{
"/usr/bin/ctr --namespace k8s.io run",
"/usr/bin/ctr run",
"--net-host",
"--with-ns pid:/proc/1/ns/pid",
"--privileged",

View File

@ -17,6 +17,7 @@ limitations under the License.
package model
import (
"fmt"
"strings"
"k8s.io/kops/pkg/apis/kops"
@ -130,6 +131,30 @@ func (b *SysctlBuilder) Build(c *fi.ModelBuilderContext) error {
"net.ipv4.ip_forward=1",
"")
if params := b.InstanceGroup.Spec.SysctlParameters; len(params) > 0 {
sysctls = append(sysctls,
"# Custom sysctl parameters from instance group spec",
"")
for _, param := range params {
if !strings.ContainsRune(param, '=') {
return fmt.Errorf("Invalid SysctlParameter: expected %q to contain '='", param)
}
sysctls = append(sysctls, param)
}
}
if params := b.Cluster.Spec.SysctlParameters; len(params) > 0 {
sysctls = append(sysctls,
"# Custom sysctl parameters from cluster spec",
"")
for _, param := range params {
if !strings.ContainsRune(param, '=') {
return fmt.Errorf("Invalid SysctlParameter: expected %q to contain '='", param)
}
sysctls = append(sysctls, param)
}
}
c.AddTask(&nodetasks.File{
Path: "/etc/sysctl.d/99-k8s-general.conf",
Contents: fi.NewStringResource(strings.Join(sysctls, "\n")),

View File

@ -9,6 +9,7 @@ spec:
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
containerRuntime: docker
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a

View File

@ -5,10 +5,10 @@ definition: |
Documentation=https://github.com/kubernetes/kops
[Service]
ExecStartPre=-/usr/bin/ctr --namespace k8s.io task pause protokube
ExecStartPre=-/usr/bin/ctr --namespace k8s.io container rm protokube
ExecStartPre=/bin/true
ExecStart=/usr/bin/ctr --namespace k8s.io run --net-host --with-ns pid:/proc/1/ns/pid --privileged --mount type=bind,src=/,dst=/rootfs,options=rbind:rslave --mount type=bind,src=/var/run/dbus,dst=/var/run/dbus,options=rbind:rprivate --mount type=bind,src=/run/systemd,dst=/run/systemd,options=rbind:rprivate --env KUBECONFIG=/rootfs/var/lib/kops/kubeconfig --mount type=bind,src=/usr/local/bin,dst=/opt/kops/bin,options=rbind:ro:rprivate --env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kops/bin docker.io/library/protokube:test protokube /usr/bin/protokube --bootstrap-master-node-labels=true --cloud=aws --containerized=true --dns-internal-suffix=.internal.minimal.example.com --dns=aws-route53 --initialize-rbac=true --manage-etcd=false --master=true --node-name=example-hostname --remove-dns-names=etcd-master-us-test-1a.internal.minimal.example.com,etcd-events-master-us-test-1a.internal.minimal.example.com --v=4
ExecStartPre=-/usr/bin/ctr container rm protokube
ExecStartPre=/bin/true
ExecStart=/usr/bin/ctr run --net-host --with-ns pid:/proc/1/ns/pid --privileged --mount type=bind,src=/,dst=/rootfs,options=rbind:rslave --mount type=bind,src=/var/run/dbus,dst=/var/run/dbus,options=rbind:rprivate --mount type=bind,src=/run/systemd,dst=/run/systemd,options=rbind:rprivate --env KUBECONFIG=/rootfs/var/lib/kops/kubeconfig --mount type=bind,src=/usr/local/bin,dst=/opt/kops/bin,options=rbind:ro:rprivate --env PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/kops/bin docker.io/library/protokube:test protokube /usr/bin/protokube --bootstrap-master-node-labels=true --cloud=aws --containerized=true --dns-internal-suffix=.internal.minimal.example.com --dns=aws-route53 --initialize-rbac=true --manage-etcd=false --master=true --node-name=example-hostname --remove-dns-names=etcd-master-us-test-1a.internal.minimal.example.com,etcd-events-master-us-test-1a.internal.minimal.example.com --v=4
Restart=always
RestartSec=2s
StartLimitInterval=0

View File

@ -22,7 +22,8 @@ import (
"k8s.io/kops/upup/pkg/fi"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/util/mount"
utilexec "k8s.io/utils/exec"
"k8s.io/utils/mount"
)
// VolumesBuilder maintains the volume mounting
@ -45,11 +46,11 @@ func (b *VolumesBuilder) Build(c *fi.ModelBuilderContext) error {
for _, x := range b.InstanceGroup.Spec.VolumeMounts {
// @check the directory exists, else create it
if err := b.EnsureDirectory(x.Path); err != nil {
return fmt.Errorf("Failed to ensure the directory: %s, error: %s", x.Path, err)
return fmt.Errorf("failed to ensure the directory: %s, error: %s", x.Path, err)
}
m := &mount.SafeFormatAndMount{
Exec: mount.NewOsExec(),
Exec: utilexec.New(),
Interface: mount.New(""),
}
@ -64,7 +65,7 @@ func (b *VolumesBuilder) Build(c *fi.ModelBuilderContext) error {
klog.Infof("Attempting to format and mount device: %s, path: %s", x.Device, x.Path)
if err := m.FormatAndMount(x.Device, x.Path, x.Filesystem, x.MountOptions); err != nil {
klog.Errorf("Failed to mount the device: %s on: %s, error: %s", x.Device, x.Path, err)
klog.Errorf("failed to mount the device: %s on: %s, error: %s", x.Device, x.Path, err)
return err
}

View File

@ -22,7 +22,6 @@ import (
"github.com/blang/semver"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/util/pkg/vfs"
@ -36,8 +35,8 @@ const (
)
type Channel struct {
v1.TypeMeta `json:",inline"`
ObjectMeta metav1.ObjectMeta `json:"metadata,omitempty"`
metav1.TypeMeta `json:",inline"`
ObjectMeta metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ChannelSpec `json:"spec,omitempty"`
}

View File

@ -182,6 +182,10 @@ type ClusterSpec struct {
// UseHostCertificates will mount /etc/ssl/certs to inside needed containers.
// This is needed if some APIs do have self-signed certs
UseHostCertificates *bool `json:"useHostCertificates,omitempty"`
// SysctlParameters will configure kernel parameters using sysctl(8). When
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
}
// NodeAuthorizationSpec is used to node authorization

View File

@ -396,6 +396,8 @@ type KubeAPIServerConfig struct {
AuthorizationWebhookCacheUnauthorizedTTL *metav1.Duration `json:"authorizationWebhookCacheUnauthorizedTtl,omitempty" flag:"authorization-webhook-cache-unauthorized-ttl"`
// AuthorizationRBACSuperUser is the name of the superuser for default rbac
AuthorizationRBACSuperUser *string `json:"authorizationRbacSuperUser,omitempty" flag:"authorization-rbac-super-user"`
// EncryptionProviderConfig enables encryption at rest for secrets.
EncryptionProviderConfig *string `json:"encryptionProviderConfig,omitempty" flag:"encryption-provider-config"`
// ExperimentalEncryptionProviderConfig enables encryption at rest for secrets.
ExperimentalEncryptionProviderConfig *string `json:"experimentalEncryptionProviderConfig,omitempty" flag:"experimental-encryption-provider-config"`

View File

@ -155,6 +155,10 @@ type InstanceGroupSpec struct {
SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"`
// InstanceProtection makes new instances in an autoscaling group protected from scale in
InstanceProtection *bool `json:"instanceProtection,omitempty"`
// SysctlParameters will configure kernel parameters using sysctl(8). When
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
}
const (

View File

@ -16,6 +16,8 @@ limitations under the License.
package kops
import "k8s.io/apimachinery/pkg/api/resource"
// NetworkingSpec allows selection and configuration of a networking plugin
type NetworkingSpec struct {
Classic *ClassicNetworkingSpec `json:"classic,omitempty"`
@ -65,6 +67,23 @@ type WeaveNetworkingSpec struct {
ConnLimit *int32 `json:"connLimit,omitempty"`
NoMasqLocal *int32 `json:"noMasqLocal,omitempty"`
NetExtraArgs string `json:"netExtraArgs,omitempty"`
// MemoryRequest memory request of weave container. Default 200Mi
MemoryRequest *resource.Quantity `json:"memoryRequest,omitempty"`
// CPURequest CPU request of weave container. Default 50m
CPURequest *resource.Quantity `json:"cpuRequest,omitempty"`
// MemoryLimit memory limit of weave container. Default 200Mi
MemoryLimit *resource.Quantity `json:"memoryLimit,omitempty"`
// CPULimit CPU limit of weave container.
CPULimit *resource.Quantity `json:"cpuLimit,omitempty"`
// NPCMemoryRequest memory request of weave npc container. Default 200Mi
NPCMemoryRequest *resource.Quantity `json:"npcMemoryRequest,omitempty"`
// NPCCPURequest CPU request of weave npc container. Default 50m
NPCCPURequest *resource.Quantity `json:"npcCPURequest,omitempty"`
// NPCMemoryLimit memory limit of weave npc container. Default 200Mi
NPCMemoryLimit *resource.Quantity `json:"npcMemoryLimit,omitempty"`
// NPCCPULimit CPU limit of weave npc container
NPCCPULimit *resource.Quantity `json:"npcCPULimit,omitempty"`
}
// FlannelNetworkingSpec declares that we want Flannel networking

View File

@ -121,3 +121,48 @@ func TestParseConfigYAML(t *testing.T) {
})
}
}
func TestWeaveParseConfigYAML(t *testing.T) {
grid := []struct {
Config string
ExpectedValue string
}{
{
Config: "networking: { weave: { memoryRequest: 500Mi, cpuRequest: 100m, npcMemoryRequest: 100Mi, npcCPURequest: 50m} }",
ExpectedValue: "50m",
},
{
Config: "networking: {}",
ExpectedValue: "",
},
}
for i := range grid {
g := grid[i]
t.Run(fmt.Sprintf("%q", g.Config), func(t *testing.T) {
config := ClusterSpec{}
err := utils.YamlUnmarshal([]byte(g.Config), &config)
if err != nil {
t.Errorf("error parsing configuration %q: %v", g.Config, err)
return
}
var actual string
if nil != config.Networking.Weave {
actual = config.Networking.Weave.NPCCPURequest.String()
}
if g.ExpectedValue == "" {
if actual != "" {
t.Errorf("expected empty value for Networking.Weave.NPCCPURequest.String(), got %v", actual)
return
}
} else {
if actual == "" {
t.Errorf("expected %v value for Networking.Weave.NPCCPURequest.String(), got empty string", g.ExpectedValue)
return
} else if actual != g.ExpectedValue {
t.Errorf("expected %v value for Networking.Weave.NPCCPURequest.String(), got %v", g.ExpectedValue, actual)
return
}
}
})
}
}

View File

@ -32,7 +32,7 @@ func CreateClusterConfig(clientset simple.Clientset, cluster *api.Cluster, group
return fmt.Errorf("InstanceGroup #%d did not have a Name", i+1)
}
if names[ns.ObjectMeta.Name] {
return fmt.Errorf("Duplicate InstanceGroup Name found: %q", ns.ObjectMeta.Name)
return fmt.Errorf("duplicate InstanceGroup Name found: %q", ns.ObjectMeta.Name)
}
names[ns.ObjectMeta.Name] = true
}

View File

@ -180,6 +180,10 @@ type ClusterSpec struct {
// UseHostCertificates will mount /etc/ssl/certs to inside needed containers.
// This is needed if some APIs do have self-signed certs
UseHostCertificates *bool `json:"useHostCertificates,omitempty"`
// SysctlParameters will configure kernel parameters using sysctl(8). When
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
}
// NodeAuthorizationSpec is used to node authorization

View File

@ -396,6 +396,8 @@ type KubeAPIServerConfig struct {
AuthorizationWebhookCacheUnauthorizedTTL *metav1.Duration `json:"authorizationWebhookCacheUnauthorizedTtl,omitempty" flag:"authorization-webhook-cache-unauthorized-ttl"`
// AuthorizationRBACSuperUser is the name of the superuser for default rbac
AuthorizationRBACSuperUser *string `json:"authorizationRbacSuperUser,omitempty" flag:"authorization-rbac-super-user"`
// EncryptionProviderConfig enables encryption at rest for secrets.
EncryptionProviderConfig *string `json:"encryptionProviderConfig,omitempty" flag:"encryption-provider-config"`
// ExperimentalEncryptionProviderConfig enables encryption at rest for secrets.
ExperimentalEncryptionProviderConfig *string `json:"experimentalEncryptionProviderConfig,omitempty" flag:"experimental-encryption-provider-config"`

View File

@ -142,6 +142,10 @@ type InstanceGroupSpec struct {
SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"`
// InstanceProtection makes new instances in an autoscaling group protected from scale in
InstanceProtection *bool `json:"instanceProtection,omitempty"`
// SysctlParameters will configure kernel parameters using sysctl(8). When
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
}
const (

View File

@ -16,6 +16,8 @@ limitations under the License.
package v1alpha1
import "k8s.io/apimachinery/pkg/api/resource"
// NetworkingSpec allows selection and configuration of a networking plugin
type NetworkingSpec struct {
Classic *ClassicNetworkingSpec `json:"classic,omitempty"`
@ -65,6 +67,23 @@ type WeaveNetworkingSpec struct {
ConnLimit *int32 `json:"connLimit,omitempty"`
NoMasqLocal *int32 `json:"noMasqLocal,omitempty"`
NetExtraArgs string `json:"netExtraArgs,omitempty"`
// MemoryRequest memory request of weave container. Default 200Mi
MemoryRequest *resource.Quantity `json:"memoryRequest,omitempty"`
// CPURequest CPU request of weave container. Default 50m
CPURequest *resource.Quantity `json:"cpuRequest,omitempty"`
// MemoryLimit memory limit of weave container. Default 200Mi
MemoryLimit *resource.Quantity `json:"memoryLimit,omitempty"`
// CPULimit CPU limit of weave container.
CPULimit *resource.Quantity `json:"cpuLimit,omitempty"`
// NPCMemoryRequest memory request of weave npc container. Default 200Mi
NPCMemoryRequest *resource.Quantity `json:"npcMemoryRequest,omitempty"`
// NPCCPURequest CPU request of weave npc container. Default 50m
NPCCPURequest *resource.Quantity `json:"npcCPURequest,omitempty"`
// NPCMemoryLimit memory limit of weave npc container. Default 200Mi
NPCMemoryLimit *resource.Quantity `json:"npcMemoryLimit,omitempty"`
// NPCCPULimit CPU limit of weave npc container
NPCCPULimit *resource.Quantity `json:"npcCPULimit,omitempty"`
}
// FlannelNetworkingSpec declares that we want Flannel networking

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -1877,6 +1877,7 @@ func autoConvert_v1alpha1_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.Target = nil
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -2174,6 +2175,7 @@ func autoConvert_kops_ClusterSpec_To_v1alpha1_ClusterSpec(in *kops.ClusterSpec,
out.Target = nil
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -3055,6 +3057,7 @@ func autoConvert_v1alpha1_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
}
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -3177,6 +3180,7 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha1_InstanceGroupSpec(in *kops.I
}
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -3287,6 +3291,7 @@ func autoConvert_v1alpha1_KubeAPIServerConfig_To_kops_KubeAPIServerConfig(in *Ku
out.AuthorizationWebhookCacheAuthorizedTTL = in.AuthorizationWebhookCacheAuthorizedTTL
out.AuthorizationWebhookCacheUnauthorizedTTL = in.AuthorizationWebhookCacheUnauthorizedTTL
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
out.EncryptionProviderConfig = in.EncryptionProviderConfig
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
@ -3386,6 +3391,7 @@ func autoConvert_kops_KubeAPIServerConfig_To_v1alpha1_KubeAPIServerConfig(in *ko
out.AuthorizationWebhookCacheAuthorizedTTL = in.AuthorizationWebhookCacheAuthorizedTTL
out.AuthorizationWebhookCacheUnauthorizedTTL = in.AuthorizationWebhookCacheUnauthorizedTTL
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
out.EncryptionProviderConfig = in.EncryptionProviderConfig
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
@ -4849,6 +4855,14 @@ func autoConvert_v1alpha1_WeaveNetworkingSpec_To_kops_WeaveNetworkingSpec(in *We
out.ConnLimit = in.ConnLimit
out.NoMasqLocal = in.NoMasqLocal
out.NetExtraArgs = in.NetExtraArgs
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest
out.MemoryLimit = in.MemoryLimit
out.CPULimit = in.CPULimit
out.NPCMemoryRequest = in.NPCMemoryRequest
out.NPCCPURequest = in.NPCCPURequest
out.NPCMemoryLimit = in.NPCMemoryLimit
out.NPCCPULimit = in.NPCCPULimit
return nil
}
@ -4862,6 +4876,14 @@ func autoConvert_kops_WeaveNetworkingSpec_To_v1alpha1_WeaveNetworkingSpec(in *ko
out.ConnLimit = in.ConnLimit
out.NoMasqLocal = in.NoMasqLocal
out.NetExtraArgs = in.NetExtraArgs
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest
out.MemoryLimit = in.MemoryLimit
out.CPULimit = in.CPULimit
out.NPCMemoryRequest = in.NPCMemoryRequest
out.NPCCPURequest = in.NPCCPURequest
out.NPCMemoryLimit = in.NPCMemoryLimit
out.NPCCPULimit = in.NPCCPULimit
return nil
}

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -808,6 +808,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(bool)
**out = **in
}
if in.SysctlParameters != nil {
in, out := &in.SysctlParameters, &out.SysctlParameters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -1723,6 +1728,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
*out = new(bool)
**out = **in
}
if in.SysctlParameters != nil {
in, out := &in.SysctlParameters, &out.SysctlParameters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -1993,6 +2003,11 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
*out = new(string)
**out = **in
}
if in.EncryptionProviderConfig != nil {
in, out := &in.EncryptionProviderConfig, &out.EncryptionProviderConfig
*out = new(string)
**out = **in
}
if in.ExperimentalEncryptionProviderConfig != nil {
in, out := &in.ExperimentalEncryptionProviderConfig, &out.ExperimentalEncryptionProviderConfig
*out = new(string)
@ -3471,6 +3486,46 @@ func (in *WeaveNetworkingSpec) DeepCopyInto(out *WeaveNetworkingSpec) {
*out = new(int32)
**out = **in
}
if in.MemoryRequest != nil {
in, out := &in.MemoryRequest, &out.MemoryRequest
x := (*in).DeepCopy()
*out = &x
}
if in.CPURequest != nil {
in, out := &in.CPURequest, &out.CPURequest
x := (*in).DeepCopy()
*out = &x
}
if in.MemoryLimit != nil {
in, out := &in.MemoryLimit, &out.MemoryLimit
x := (*in).DeepCopy()
*out = &x
}
if in.CPULimit != nil {
in, out := &in.CPULimit, &out.CPULimit
x := (*in).DeepCopy()
*out = &x
}
if in.NPCMemoryRequest != nil {
in, out := &in.NPCMemoryRequest, &out.NPCMemoryRequest
x := (*in).DeepCopy()
*out = &x
}
if in.NPCCPURequest != nil {
in, out := &in.NPCCPURequest, &out.NPCCPURequest
x := (*in).DeepCopy()
*out = &x
}
if in.NPCMemoryLimit != nil {
in, out := &in.NPCMemoryLimit, &out.NPCMemoryLimit
x := (*in).DeepCopy()
*out = &x
}
if in.NPCCPULimit != nil {
in, out := &in.NPCCPULimit, &out.NPCCPULimit
x := (*in).DeepCopy()
*out = &x
}
return
}

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -180,6 +180,10 @@ type ClusterSpec struct {
// UseHostCertificates will mount /etc/ssl/certs to inside needed containers.
// This is needed if some APIs do have self-signed certs
UseHostCertificates *bool `json:"useHostCertificates,omitempty"`
// SysctlParameters will configure kernel parameters using sysctl(8). When
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
}
// NodeAuthorizationSpec is used to node authorization

View File

@ -396,6 +396,8 @@ type KubeAPIServerConfig struct {
AuthorizationWebhookCacheUnauthorizedTTL *metav1.Duration `json:"authorizationWebhookCacheUnauthorizedTtl,omitempty" flag:"authorization-webhook-cache-unauthorized-ttl"`
// AuthorizationRBACSuperUser is the name of the superuser for default rbac
AuthorizationRBACSuperUser *string `json:"authorizationRbacSuperUser,omitempty" flag:"authorization-rbac-super-user"`
// EncryptionProviderConfig enables encryption at rest for secrets.
EncryptionProviderConfig *string `json:"encryptionProviderConfig,omitempty" flag:"encryption-provider-config"`
// ExperimentalEncryptionProviderConfig enables encryption at rest for secrets.
ExperimentalEncryptionProviderConfig *string `json:"experimentalEncryptionProviderConfig,omitempty" flag:"experimental-encryption-provider-config"`

View File

@ -149,6 +149,10 @@ type InstanceGroupSpec struct {
SecurityGroupOverride *string `json:"securityGroupOverride,omitempty"`
// InstanceProtection makes new instances in an autoscaling group protected from scale in
InstanceProtection *bool `json:"instanceProtection,omitempty"`
// SysctlParameters will configure kernel parameters using sysctl(8). When
// specified, each parameter must follow the form variable=value, the way
// it would appear in sysctl.conf.
SysctlParameters []string `json:"sysctlParameters,omitempty"`
}
const (

View File

@ -16,6 +16,8 @@ limitations under the License.
package v1alpha2
import "k8s.io/apimachinery/pkg/api/resource"
// NetworkingSpec allows selection and configuration of a networking plugin
type NetworkingSpec struct {
Classic *ClassicNetworkingSpec `json:"classic,omitempty"`
@ -65,6 +67,23 @@ type WeaveNetworkingSpec struct {
ConnLimit *int32 `json:"connLimit,omitempty"`
NoMasqLocal *int32 `json:"noMasqLocal,omitempty"`
NetExtraArgs string `json:"netExtraArgs,omitempty"`
// MemoryRequest memory request of weave container. Default 200Mi
MemoryRequest *resource.Quantity `json:"memoryRequest,omitempty"`
// CPURequest CPU request of weave container. Default 50m
CPURequest *resource.Quantity `json:"cpuRequest,omitempty"`
// MemoryLimit memory limit of weave container. Default 200Mi
MemoryLimit *resource.Quantity `json:"memoryLimit,omitempty"`
// CPULimit CPU limit of weave container.
CPULimit *resource.Quantity `json:"cpuLimit,omitempty"`
// NPCMemoryRequest memory request of weave npc container. Default 200Mi
NPCMemoryRequest *resource.Quantity `json:"npcMemoryRequest,omitempty"`
// NPCCPURequest CPU request of weave npc container. Default 50m
NPCCPURequest *resource.Quantity `json:"npcCPURequest,omitempty"`
// NPCMemoryLimit memory limit of weave npc container. Default 200Mi
NPCMemoryLimit *resource.Quantity `json:"npcMemoryLimit,omitempty"`
// NPCCPULimit CPU limit of weave npc container
NPCCPULimit *resource.Quantity `json:"npcCPULimit,omitempty"`
}
// FlannelNetworkingSpec declares that we want Flannel networking

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -1930,6 +1930,7 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
out.Target = nil
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -2242,6 +2243,7 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
out.Target = nil
}
out.UseHostCertificates = in.UseHostCertificates
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -3173,6 +3175,7 @@ func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *Instan
}
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -3300,6 +3303,7 @@ func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.I
}
out.SecurityGroupOverride = in.SecurityGroupOverride
out.InstanceProtection = in.InstanceProtection
out.SysctlParameters = in.SysctlParameters
return nil
}
@ -3557,6 +3561,7 @@ func autoConvert_v1alpha2_KubeAPIServerConfig_To_kops_KubeAPIServerConfig(in *Ku
out.AuthorizationWebhookCacheAuthorizedTTL = in.AuthorizationWebhookCacheAuthorizedTTL
out.AuthorizationWebhookCacheUnauthorizedTTL = in.AuthorizationWebhookCacheUnauthorizedTTL
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
out.EncryptionProviderConfig = in.EncryptionProviderConfig
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
@ -3656,6 +3661,7 @@ func autoConvert_kops_KubeAPIServerConfig_To_v1alpha2_KubeAPIServerConfig(in *ko
out.AuthorizationWebhookCacheAuthorizedTTL = in.AuthorizationWebhookCacheAuthorizedTTL
out.AuthorizationWebhookCacheUnauthorizedTTL = in.AuthorizationWebhookCacheUnauthorizedTTL
out.AuthorizationRBACSuperUser = in.AuthorizationRBACSuperUser
out.EncryptionProviderConfig = in.EncryptionProviderConfig
out.ExperimentalEncryptionProviderConfig = in.ExperimentalEncryptionProviderConfig
out.RequestheaderUsernameHeaders = in.RequestheaderUsernameHeaders
out.RequestheaderGroupHeaders = in.RequestheaderGroupHeaders
@ -5177,6 +5183,14 @@ func autoConvert_v1alpha2_WeaveNetworkingSpec_To_kops_WeaveNetworkingSpec(in *We
out.ConnLimit = in.ConnLimit
out.NoMasqLocal = in.NoMasqLocal
out.NetExtraArgs = in.NetExtraArgs
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest
out.MemoryLimit = in.MemoryLimit
out.CPULimit = in.CPULimit
out.NPCMemoryRequest = in.NPCMemoryRequest
out.NPCCPURequest = in.NPCCPURequest
out.NPCMemoryLimit = in.NPCMemoryLimit
out.NPCCPULimit = in.NPCCPULimit
return nil
}
@ -5190,6 +5204,14 @@ func autoConvert_kops_WeaveNetworkingSpec_To_v1alpha2_WeaveNetworkingSpec(in *ko
out.ConnLimit = in.ConnLimit
out.NoMasqLocal = in.NoMasqLocal
out.NetExtraArgs = in.NetExtraArgs
out.MemoryRequest = in.MemoryRequest
out.CPURequest = in.CPURequest
out.MemoryLimit = in.MemoryLimit
out.CPULimit = in.CPULimit
out.NPCMemoryRequest = in.NPCMemoryRequest
out.NPCCPURequest = in.NPCCPURequest
out.NPCMemoryLimit = in.NPCMemoryLimit
out.NPCCPULimit = in.NPCCPULimit
return nil
}

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -781,6 +781,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(bool)
**out = **in
}
if in.SysctlParameters != nil {
in, out := &in.SysctlParameters, &out.SysctlParameters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -1685,6 +1690,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
*out = new(bool)
**out = **in
}
if in.SysctlParameters != nil {
in, out := &in.SysctlParameters, &out.SysctlParameters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -2064,6 +2074,11 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
*out = new(string)
**out = **in
}
if in.EncryptionProviderConfig != nil {
in, out := &in.EncryptionProviderConfig, &out.EncryptionProviderConfig
*out = new(string)
**out = **in
}
if in.ExperimentalEncryptionProviderConfig != nil {
in, out := &in.ExperimentalEncryptionProviderConfig, &out.ExperimentalEncryptionProviderConfig
*out = new(string)
@ -3542,6 +3557,46 @@ func (in *WeaveNetworkingSpec) DeepCopyInto(out *WeaveNetworkingSpec) {
*out = new(int32)
**out = **in
}
if in.MemoryRequest != nil {
in, out := &in.MemoryRequest, &out.MemoryRequest
x := (*in).DeepCopy()
*out = &x
}
if in.CPURequest != nil {
in, out := &in.CPURequest, &out.CPURequest
x := (*in).DeepCopy()
*out = &x
}
if in.MemoryLimit != nil {
in, out := &in.MemoryLimit, &out.MemoryLimit
x := (*in).DeepCopy()
*out = &x
}
if in.CPULimit != nil {
in, out := &in.CPULimit, &out.CPULimit
x := (*in).DeepCopy()
*out = &x
}
if in.NPCMemoryRequest != nil {
in, out := &in.NPCMemoryRequest, &out.NPCMemoryRequest
x := (*in).DeepCopy()
*out = &x
}
if in.NPCCPURequest != nil {
in, out := &in.NPCCPURequest, &out.NPCCPURequest
x := (*in).DeepCopy()
*out = &x
}
if in.NPCMemoryLimit != nil {
in, out := &in.NPCMemoryLimit, &out.NPCMemoryLimit
x := (*in).DeepCopy()
*out = &x
}
if in.NPCCPULimit != nil {
in, out := &in.NPCCPULimit, &out.NPCCPULimit
x := (*in).DeepCopy()
*out = &x
}
return
}

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -77,7 +77,7 @@ func ValidateInstanceGroup(g *kops.InstanceGroup) error {
if g.IsMaster() {
if len(g.Spec.Subnets) == 0 {
return fmt.Errorf("Master InstanceGroup %s did not specify any Subnets", g.ObjectMeta.Name)
return fmt.Errorf("master InstanceGroup %s did not specify any Subnets", g.ObjectMeta.Name)
}
}
@ -218,7 +218,7 @@ func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, st
for i := range cluster.Spec.Subnets {
s := &cluster.Spec.Subnets[i]
if clusterSubnets[s.Name] != nil {
return fmt.Errorf("Subnets contained a duplicate value: %v", s.Name)
return fmt.Errorf("subnets contained a duplicate value: %v", s.Name)
}
clusterSubnets[s.Name] = s
}
@ -232,7 +232,7 @@ func CrossValidateInstanceGroup(g *kops.InstanceGroup, cluster *kops.Cluster, st
k8sVersion, err := util.ParseKubernetesVersion(cluster.Spec.KubernetesVersion)
if err != nil {
return fmt.Errorf("Unable to determine kubernetes version from %q", cluster.Spec.KubernetesVersion)
return fmt.Errorf("unable to determine kubernetes version from %q", cluster.Spec.KubernetesVersion)
}
allErrs := field.ErrorList{}

View File

@ -384,7 +384,7 @@ func ValidateEtcdVersionForCalicoV3(e *kops.EtcdClusterSpec, majorVersion string
}
sem, err := semver.Parse(strings.TrimPrefix(version, "v"))
if err != nil {
allErrs = append(allErrs, field.InternalError(fldPath.Child("MajorVersion"), fmt.Errorf("Failed to parse Etcd version to check compatibility: %s", err)))
allErrs = append(allErrs, field.InternalError(fldPath.Child("MajorVersion"), fmt.Errorf("failed to parse Etcd version to check compatibility: %s", err)))
}
if sem.Major != 3 {

View File

@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -881,6 +881,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(bool)
**out = **in
}
if in.SysctlParameters != nil {
in, out := &in.SysctlParameters, &out.SysctlParameters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -1851,6 +1856,11 @@ func (in *InstanceGroupSpec) DeepCopyInto(out *InstanceGroupSpec) {
*out = new(bool)
**out = **in
}
if in.SysctlParameters != nil {
in, out := &in.SysctlParameters, &out.SysctlParameters
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
@ -2246,6 +2256,11 @@ func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) {
*out = new(string)
**out = **in
}
if in.EncryptionProviderConfig != nil {
in, out := &in.EncryptionProviderConfig, &out.EncryptionProviderConfig
*out = new(string)
**out = **in
}
if in.ExperimentalEncryptionProviderConfig != nil {
in, out := &in.ExperimentalEncryptionProviderConfig, &out.ExperimentalEncryptionProviderConfig
*out = new(string)
@ -3756,6 +3771,46 @@ func (in *WeaveNetworkingSpec) DeepCopyInto(out *WeaveNetworkingSpec) {
*out = new(int32)
**out = **in
}
if in.MemoryRequest != nil {
in, out := &in.MemoryRequest, &out.MemoryRequest
x := (*in).DeepCopy()
*out = &x
}
if in.CPURequest != nil {
in, out := &in.CPURequest, &out.CPURequest
x := (*in).DeepCopy()
*out = &x
}
if in.MemoryLimit != nil {
in, out := &in.MemoryLimit, &out.MemoryLimit
x := (*in).DeepCopy()
*out = &x
}
if in.CPULimit != nil {
in, out := &in.CPULimit, &out.CPULimit
x := (*in).DeepCopy()
*out = &x
}
if in.NPCMemoryRequest != nil {
in, out := &in.NPCMemoryRequest, &out.NPCMemoryRequest
x := (*in).DeepCopy()
*out = &x
}
if in.NPCCPURequest != nil {
in, out := &in.NPCCPURequest, &out.NPCCPURequest
x := (*in).DeepCopy()
*out = &x
}
if in.NPCMemoryLimit != nil {
in, out := &in.NPCMemoryLimit, &out.NPCMemoryLimit
x := (*in).DeepCopy()
*out = &x
}
if in.NPCCPULimit != nil {
in, out := &in.NPCCPULimit, &out.NPCCPULimit
x := (*in).DeepCopy()
*out = &x
}
return
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,5 +1,5 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

Some files were not shown because too many files have changed in this diff Show More