Merge pull request #12906 from olemarkus/karpenter

Support Karpenter
This commit is contained in:
Kubernetes Prow Robot 2021-12-14 13:58:50 -08:00 committed by GitHub
commit 424452a855
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
95 changed files with 7727 additions and 15 deletions

View File

@ -594,6 +594,26 @@ func TestExternalDNSIRSA(t *testing.T) {
runTestTerraformAWS(t)
}
func TestKarpenter(t *testing.T) {
featureflag.ParseFlags("+Karpenter")
unsetFeatureFlags := func() {
featureflag.ParseFlags("-Karpenter")
}
defer unsetFeatureFlags()
test := newIntegrationTest("minimal.example.com", "karpenter").
withOIDCDiscovery().
withAddons(dnsControllerAddon).
withServiceAccountRole("dns-controller.kube-system", true).
withAddons("karpenter.sh-k8s-1.19").
withServiceAccountRole("karpenter.kube-system", true)
test.expectTerraformFilenames = append(test.expectTerraformFilenames,
"aws_launch_template_karpenter-nodes.minimal.example.com_user_data",
"aws_s3_bucket_object_nodeupconfig-karpenter-nodes_content",
)
test.runTestTerraformAWS(t)
}
// TestSharedSubnet runs the test on a configuration with a shared subnet (and VPC)
func TestSharedSubnet(t *testing.T) {
newIntegrationTest("sharedsubnet.example.com", "shared_subnet").

View File

@ -1319,6 +1319,12 @@ spec:
kube-proxy on the master * enable debugging handlers on the master,
so kubectl logs works'
type: boolean
karpenter:
description: Karpenter defines the Karpenter configuration.
properties:
enabled:
type: boolean
type: object
keyStore:
description: KeyStore is the VFS path to where SSL keys and certificates
are stored

View File

@ -661,6 +661,9 @@ spec:
machineType:
description: MachineType is the instance class
type: string
manager:
description: Manager determines what is managing the node lifecycle
type: string
maxPrice:
description: MaxPrice indicates this is a spot-pricing group, with
the specified value as our max-price bid

View File

@ -23,6 +23,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/upup/pkg/fi/utils"
)
@ -213,6 +214,12 @@ type ClusterSpec struct {
ServiceAccountIssuerDiscovery *ServiceAccountIssuerDiscoveryConfig `json:"serviceAccountIssuerDiscovery,omitempty"`
// SnapshotController defines the CSI Snapshot Controller configuration.
SnapshotController *SnapshotControllerConfig `json:"snapshotController,omitempty"`
// Karpenter defines the Karpenter configuration.
Karpenter *KarpenterConfig `json:"karpenter,omitempty"`
}
type KarpenterConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
// ServiceAccountIssuerDiscoveryConfig configures an OIDC Issuer.

View File

@ -82,8 +82,17 @@ const (
// SupportedFilesystems is a list of supported filesystems to format as
var SupportedFilesystems = []string{BtfsFilesystem, Ext4Filesystem, XFSFilesystem}
type InstanceManager string
const (
InstanceManagerCloudGroup InstanceManager = "CloudGroup"
InstanceManagerKarpenter InstanceManager = "Karpenter"
)
// InstanceGroupSpec is the specification for an InstanceGroup
type InstanceGroupSpec struct {
// Manager determines what is managing the node lifecycle
Manager InstanceManager `json:"manager,omitempty"`
// Type determines the role of instances in this instance group: masters or nodes
Role InstanceGroupRole `json:"role,omitempty"`
// Image is the instance (ami etc) we should use

View File

@ -211,6 +211,12 @@ type ClusterSpec struct {
ServiceAccountIssuerDiscovery *ServiceAccountIssuerDiscoveryConfig `json:"serviceAccountIssuerDiscovery,omitempty"`
// SnapshotController defines the CSI Snapshot Controller configuration.
SnapshotController *SnapshotControllerConfig `json:"snapshotController,omitempty"`
// Karpenter defines the Karpenter configuration.
Karpenter *KarpenterConfig `json:"karpenter,omitempty"`
}
type KarpenterConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
// ServiceAccountIssuerDiscoveryConfig configures an OIDC Issuer.

View File

@ -49,8 +49,12 @@ type InstanceGroupList struct {
// InstanceGroupRole string describes the roles of the nodes in this InstanceGroup (master or nodes)
type InstanceGroupRole string
type InstanceManager string
// InstanceGroupSpec is the specification for an InstanceGroup
type InstanceGroupSpec struct {
// Manager determines what is managing the node lifecycle
Manager InstanceManager `json:"manager,omitempty"`
// Type determines the role of instances in this instance group: masters or nodes
Role InstanceGroupRole `json:"role,omitempty"`
// Image is the instance (ami etc) we should use

View File

@ -584,6 +584,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*KarpenterConfig)(nil), (*kops.KarpenterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_KarpenterConfig_To_kops_KarpenterConfig(a.(*KarpenterConfig), b.(*kops.KarpenterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kops.KarpenterConfig)(nil), (*KarpenterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kops_KarpenterConfig_To_v1alpha2_KarpenterConfig(a.(*kops.KarpenterConfig), b.(*KarpenterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Keyset)(nil), (*kops.Keyset)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha2_Keyset_To_kops_Keyset(a.(*Keyset), b.(*kops.Keyset), scope)
}); err != nil {
@ -2724,6 +2734,15 @@ func autoConvert_v1alpha2_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
} else {
out.SnapshotController = nil
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(kops.KarpenterConfig)
if err := Convert_v1alpha2_KarpenterConfig_To_kops_KarpenterConfig(*in, *out, s); err != nil {
return err
}
} else {
out.Karpenter = nil
}
return nil
}
@ -3131,6 +3150,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha2_ClusterSpec(in *kops.ClusterSpec,
} else {
out.SnapshotController = nil
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(KarpenterConfig)
if err := Convert_kops_KarpenterConfig_To_v1alpha2_KarpenterConfig(*in, *out, s); err != nil {
return err
}
} else {
out.Karpenter = nil
}
return nil
}
@ -4169,6 +4197,7 @@ func Convert_kops_InstanceGroupList_To_v1alpha2_InstanceGroupList(in *kops.Insta
}
func autoConvert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *InstanceGroupSpec, out *kops.InstanceGroupSpec, s conversion.Scope) error {
out.Manager = kops.InstanceManager(in.Manager)
out.Role = kops.InstanceGroupRole(in.Role)
out.Image = in.Image
out.MinSize = in.MinSize
@ -4331,6 +4360,7 @@ func Convert_v1alpha2_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *InstanceGr
}
func autoConvert_kops_InstanceGroupSpec_To_v1alpha2_InstanceGroupSpec(in *kops.InstanceGroupSpec, out *InstanceGroupSpec, s conversion.Scope) error {
out.Manager = InstanceManager(in.Manager)
out.Role = InstanceGroupRole(in.Role)
out.Image = in.Image
out.MinSize = in.MinSize
@ -4513,6 +4543,26 @@ func Convert_kops_InstanceMetadataOptions_To_v1alpha2_InstanceMetadataOptions(in
return autoConvert_kops_InstanceMetadataOptions_To_v1alpha2_InstanceMetadataOptions(in, out, s)
}
func autoConvert_v1alpha2_KarpenterConfig_To_kops_KarpenterConfig(in *KarpenterConfig, out *kops.KarpenterConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_v1alpha2_KarpenterConfig_To_kops_KarpenterConfig is an autogenerated conversion function.
func Convert_v1alpha2_KarpenterConfig_To_kops_KarpenterConfig(in *KarpenterConfig, out *kops.KarpenterConfig, s conversion.Scope) error {
return autoConvert_v1alpha2_KarpenterConfig_To_kops_KarpenterConfig(in, out, s)
}
func autoConvert_kops_KarpenterConfig_To_v1alpha2_KarpenterConfig(in *kops.KarpenterConfig, out *KarpenterConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_kops_KarpenterConfig_To_v1alpha2_KarpenterConfig is an autogenerated conversion function.
func Convert_kops_KarpenterConfig_To_v1alpha2_KarpenterConfig(in *kops.KarpenterConfig, out *KarpenterConfig, s conversion.Scope) error {
return autoConvert_kops_KarpenterConfig_To_v1alpha2_KarpenterConfig(in, out, s)
}
func autoConvert_v1alpha2_Keyset_To_kops_Keyset(in *Keyset, out *kops.Keyset, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha2_KeysetSpec_To_kops_KeysetSpec(&in.Spec, &out.Spec, s); err != nil {

View File

@ -1257,6 +1257,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(SnapshotControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(KarpenterConfig)
**out = **in
}
return
}
@ -2490,6 +2495,22 @@ func (in *InstanceMetadataOptions) DeepCopy() *InstanceMetadataOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KarpenterConfig) DeepCopyInto(out *KarpenterConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KarpenterConfig.
func (in *KarpenterConfig) DeepCopy() *KarpenterConfig {
if in == nil {
return nil
}
out := new(KarpenterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Keyset) DeepCopyInto(out *Keyset) {
*out = *in

View File

@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/apis/kops"
)
@ -211,6 +212,12 @@ type ClusterSpec struct {
ServiceAccountIssuerDiscovery *ServiceAccountIssuerDiscoveryConfig `json:"serviceAccountIssuerDiscovery,omitempty"`
// SnapshotController defines the CSI Snapshot Controller configuration.
SnapshotController *SnapshotControllerConfig `json:"snapshotController,omitempty"`
// Karpenter defines the Karpenter configuration.
Karpenter *KarpenterConfig `json:"karpenter,omitempty"`
}
type KarpenterConfig struct {
Enabled bool `json:"enabled,omitempty"`
}
// ServiceAccountIssuerDiscoveryConfig configures an OIDC Issuer.

View File

@ -49,8 +49,12 @@ type InstanceGroupList struct {
// InstanceGroupRole string describes the roles of the nodes in this InstanceGroup (master or nodes)
type InstanceGroupRole string
type InstanceManager string
// InstanceGroupSpec is the specification for an InstanceGroup
type InstanceGroupSpec struct {
// Manager determines what is managing the node lifecycle
Manager InstanceManager `json:"manager,omitempty"`
// Type determines the role of instances in this instance group: masters or nodes
Role InstanceGroupRole `json:"role,omitempty"`
// Image is the instance (ami etc) we should use

View File

@ -614,6 +614,16 @@ func RegisterConversions(s *runtime.Scheme) error {
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*KarpenterConfig)(nil), (*kops.KarpenterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_KarpenterConfig_To_kops_KarpenterConfig(a.(*KarpenterConfig), b.(*kops.KarpenterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*kops.KarpenterConfig)(nil), (*KarpenterConfig)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_kops_KarpenterConfig_To_v1alpha3_KarpenterConfig(a.(*kops.KarpenterConfig), b.(*KarpenterConfig), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Keyset)(nil), (*kops.Keyset)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_Keyset_To_kops_Keyset(a.(*Keyset), b.(*kops.Keyset), scope)
}); err != nil {
@ -2607,6 +2617,15 @@ func autoConvert_v1alpha3_ClusterSpec_To_kops_ClusterSpec(in *ClusterSpec, out *
} else {
out.SnapshotController = nil
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(kops.KarpenterConfig)
if err := Convert_v1alpha3_KarpenterConfig_To_kops_KarpenterConfig(*in, *out, s); err != nil {
return err
}
} else {
out.Karpenter = nil
}
return nil
}
@ -3011,6 +3030,15 @@ func autoConvert_kops_ClusterSpec_To_v1alpha3_ClusterSpec(in *kops.ClusterSpec,
} else {
out.SnapshotController = nil
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(KarpenterConfig)
if err := Convert_kops_KarpenterConfig_To_v1alpha3_KarpenterConfig(*in, *out, s); err != nil {
return err
}
} else {
out.Karpenter = nil
}
return nil
}
@ -4060,6 +4088,7 @@ func Convert_kops_InstanceGroupList_To_v1alpha3_InstanceGroupList(in *kops.Insta
}
func autoConvert_v1alpha3_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *InstanceGroupSpec, out *kops.InstanceGroupSpec, s conversion.Scope) error {
out.Manager = kops.InstanceManager(in.Manager)
out.Role = kops.InstanceGroupRole(in.Role)
out.Image = in.Image
out.MinSize = in.MinSize
@ -4221,6 +4250,7 @@ func Convert_v1alpha3_InstanceGroupSpec_To_kops_InstanceGroupSpec(in *InstanceGr
}
func autoConvert_kops_InstanceGroupSpec_To_v1alpha3_InstanceGroupSpec(in *kops.InstanceGroupSpec, out *InstanceGroupSpec, s conversion.Scope) error {
out.Manager = InstanceManager(in.Manager)
out.Role = InstanceGroupRole(in.Role)
out.Image = in.Image
out.MinSize = in.MinSize
@ -4403,6 +4433,26 @@ func Convert_kops_InstanceMetadataOptions_To_v1alpha3_InstanceMetadataOptions(in
return autoConvert_kops_InstanceMetadataOptions_To_v1alpha3_InstanceMetadataOptions(in, out, s)
}
func autoConvert_v1alpha3_KarpenterConfig_To_kops_KarpenterConfig(in *KarpenterConfig, out *kops.KarpenterConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_v1alpha3_KarpenterConfig_To_kops_KarpenterConfig is an autogenerated conversion function.
func Convert_v1alpha3_KarpenterConfig_To_kops_KarpenterConfig(in *KarpenterConfig, out *kops.KarpenterConfig, s conversion.Scope) error {
return autoConvert_v1alpha3_KarpenterConfig_To_kops_KarpenterConfig(in, out, s)
}
func autoConvert_kops_KarpenterConfig_To_v1alpha3_KarpenterConfig(in *kops.KarpenterConfig, out *KarpenterConfig, s conversion.Scope) error {
out.Enabled = in.Enabled
return nil
}
// Convert_kops_KarpenterConfig_To_v1alpha3_KarpenterConfig is an autogenerated conversion function.
func Convert_kops_KarpenterConfig_To_v1alpha3_KarpenterConfig(in *kops.KarpenterConfig, out *KarpenterConfig, s conversion.Scope) error {
return autoConvert_kops_KarpenterConfig_To_v1alpha3_KarpenterConfig(in, out, s)
}
func autoConvert_v1alpha3_Keyset_To_kops_Keyset(in *Keyset, out *kops.Keyset, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha3_KeysetSpec_To_kops_KeysetSpec(&in.Spec, &out.Spec, s); err != nil {

View File

@ -1168,6 +1168,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(SnapshotControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(KarpenterConfig)
**out = **in
}
return
}
@ -2396,6 +2401,22 @@ func (in *InstanceMetadataOptions) DeepCopy() *InstanceMetadataOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KarpenterConfig) DeepCopyInto(out *KarpenterConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KarpenterConfig.
func (in *KarpenterConfig) DeepCopy() *KarpenterConfig {
if in == nil {
return nil
}
out := new(KarpenterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Keyset) DeepCopyInto(out *Keyset) {
*out = *in

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/upup/pkg/fi"
@ -265,6 +266,12 @@ func validateClusterSpec(spec *kops.ClusterSpec, c *kops.Cluster, fieldPath *fie
}
}
if spec.Karpenter != nil && spec.Karpenter.Enabled {
if !featureflag.Karpenter.Enabled() {
allErrs = append(allErrs, field.Forbidden(fieldPath.Child("karpenter", "enabled"), "karpenter requires the Karpenter feature flag"))
}
}
return allErrs
}

View File

@ -1260,6 +1260,11 @@ func (in *ClusterSpec) DeepCopyInto(out *ClusterSpec) {
*out = new(SnapshotControllerConfig)
(*in).DeepCopyInto(*out)
}
if in.Karpenter != nil {
in, out := &in.Karpenter, &out.Karpenter
*out = new(KarpenterConfig)
**out = **in
}
return
}
@ -2554,6 +2559,22 @@ func (in *InstanceMetadataOptions) DeepCopy() *InstanceMetadataOptions {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KarpenterConfig) DeepCopyInto(out *KarpenterConfig) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KarpenterConfig.
func (in *KarpenterConfig) DeepCopy() *KarpenterConfig {
if in == nil {
return nil
}
out := new(KarpenterConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Keyset) DeepCopyInto(out *Keyset) {
*out = *in

View File

@ -90,6 +90,8 @@ var (
TerraformManagedFiles = new("TerraformManagedFiles", Bool(true))
// AlphaAllowGCE is a feature flag that gates GCE support while it is alpha.
AlphaAllowGCE = new("AlphaAllowGCE", Bool(false))
// Karpenter enables karpenter-managed Instance Groups
Karpenter = new("Karpenter", Bool(false))
)
// FeatureFlag defines a feature flag

View File

@ -18,6 +18,7 @@ package instancegroups
import (
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/upup/pkg/fi"
@ -47,7 +48,7 @@ func resolveSettings(cluster *kops.Cluster, group *kops.InstanceGroup, numInstan
if rollingUpdate.MaxSurge == nil {
val := intstr.FromInt(0)
if kops.CloudProviderID(cluster.Spec.CloudProvider) == kops.CloudProviderAWS && !featureflag.Spotinst.Enabled() {
if kops.CloudProviderID(cluster.Spec.CloudProvider) == kops.CloudProviderAWS && !featureflag.Spotinst.Enabled() && group.Spec.Manager != kops.InstanceManagerKarpenter {
val = intstr.FromInt(1)
}
rollingUpdate.MaxSurge = &val

View File

@ -24,6 +24,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model"
@ -79,12 +80,14 @@ func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
c.AddTask(task)
// @step: now lets build the autoscaling group task
tsk, err := b.buildAutoScalingGroupTask(c, name, ig)
if err != nil {
return err
if ig.Spec.Manager != "Karpenter" {
tsk, err := b.buildAutoScalingGroupTask(c, name, ig)
if err != nil {
return err
}
tsk.LaunchTemplate = task
c.AddTask(tsk)
}
tsk.LaunchTemplate = task
c.AddTask(tsk)
warmPool := b.Cluster.Spec.WarmPool.ResolveDefaults(ig)
{

View File

@ -21,11 +21,12 @@ import (
"strings"
"k8s.io/klog/v2"
"k8s.io/legacy-cloud-providers/aws"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/legacy-cloud-providers/aws"
)
// NetworkModelBuilder configures network objects

View File

@ -25,6 +25,7 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"github.com/aws/aws-sdk-go/aws"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
)
@ -80,9 +81,11 @@ type NodeTerminationHandlerBuilder struct {
func (b *NodeTerminationHandlerBuilder) Build(c *fi.ModelBuilderContext) error {
for _, ig := range b.InstanceGroups {
err := b.configureASG(c, ig)
if err != nil {
return err
if ig.Spec.Manager == kops.InstanceManagerCloudGroup {
err := b.configureASG(c, ig)
if err != nil {
return err
}
}
}

View File

@ -16,6 +16,7 @@ go_library(
"//pkg/model/components/addonmanifests/clusterautoscaler:go_default_library",
"//pkg/model/components/addonmanifests/dnscontroller:go_default_library",
"//pkg/model/components/addonmanifests/externaldns:go_default_library",
"//pkg/model/components/addonmanifests/karpenter:go_default_library",
"//pkg/model/components/addonmanifests/nodeterminationhandler:go_default_library",
"//pkg/model/iam:go_default_library",
"//upup/pkg/fi:go_default_library",

View File

@ -0,0 +1,12 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["iam.go"],
importpath = "k8s.io/kops/pkg/model/components/addonmanifests/karpenter",
visibility = ["//visibility:public"],
deps = [
"//pkg/model/iam:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],
)

View File

@ -0,0 +1,66 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package karpenter
import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/kops/pkg/model/iam"
)
// ServiceAccount represents the service-account used by the dns-controller.
// It implements iam.Subject to get AWS IAM permissions.
type ServiceAccount struct{}
var _ iam.Subject = &ServiceAccount{}
// BuildAWSPolicy generates a custom policy for a ServiceAccount IAM role.
func (r *ServiceAccount) BuildAWSPolicy(b *iam.PolicyBuilder) (*iam.Policy, error) {
clusterName := b.Cluster.ObjectMeta.Name
p := iam.NewPolicy(clusterName)
addKarpenterPermissions(p)
return p, nil
}
// ServiceAccount returns the kubernetes service account used.
func (r *ServiceAccount) ServiceAccount() (types.NamespacedName, bool) {
return types.NamespacedName{
Namespace: "kube-system",
Name: "karpenter",
}, true
}
func addKarpenterPermissions(p *iam.Policy) {
p.AddUnconditionalActions(
// "ec2:CreateLaunchTemplate",
"ec2:CreateFleet",
"ec2:RunInstances",
"ec2:CreateTags",
"iam:PassRole",
"ec2:TerminateInstances",
"ec2:DescribeLaunchTemplates",
"ec2:DescribeInstances",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstanceTypeOfferings",
"ec2:DescribeAvailabilityZones",
"ssm:GetParameter",
)
}

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
addonsapi "k8s.io/kops/channels/pkg/api"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/kubemanifest"
@ -33,6 +34,7 @@ import (
"k8s.io/kops/pkg/model/components/addonmanifests/clusterautoscaler"
"k8s.io/kops/pkg/model/components/addonmanifests/dnscontroller"
"k8s.io/kops/pkg/model/components/addonmanifests/externaldns"
"k8s.io/kops/pkg/model/components/addonmanifests/karpenter"
"k8s.io/kops/pkg/model/components/addonmanifests/nodeterminationhandler"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/upup/pkg/fi"
@ -128,6 +130,8 @@ func getWellknownServiceAccount(name string) iam.Subject {
return &awscloudcontrollermanager.ServiceAccount{}
case "external-dns":
return &externaldns.ServiceAccount{}
case "karpenter":
return &karpenter.ServiceAccount{}
default:
return nil
}

View File

@ -35,6 +35,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/util/stringorslice"
@ -56,6 +57,10 @@ type Policy struct {
Version string
}
func (p *Policy) AddUnconditionalActions(actions ...string) {
p.unconditionalAction.Insert(actions...)
}
// AsJSON converts the policy document to JSON format (parsable by AWS)
func (p *Policy) AsJSON() (string, error) {
if len(p.unconditionalAction) > 0 {

View File

@ -31,6 +31,7 @@ import (
corev1 "k8s.io/api/core/v1"
expirationcache "k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/nodeidentity"
)
@ -41,7 +42,8 @@ const (
// ClusterAutoscalerNodeTemplateLabel is the prefix used on node labels when copying to cloud tags.
ClusterAutoscalerNodeTemplateLabel = "k8s.io/cluster-autoscaler/node-template/label/"
// The expiration time of nodeidentity.Info cache.
cacheTTL = 60 * time.Minute
cacheTTL = 60 * time.Minute
KarpenterNodeLabel = "karpenter.sh/"
)
// nodeIdentifier identifies a node from EC2
@ -145,10 +147,18 @@ func (i *nodeIdentifier) IdentifyNode(ctx context.Context, node *corev1.Node) (*
Labels: labels,
}
isKarpenterManaged := false
for _, tag := range instance.Tags {
if strings.HasPrefix(aws.StringValue(tag.Key), ClusterAutoscalerNodeTemplateLabel) {
key := aws.StringValue(tag.Key)
if strings.HasPrefix(key, ClusterAutoscalerNodeTemplateLabel) {
info.Labels[strings.TrimPrefix(aws.StringValue(tag.Key), ClusterAutoscalerNodeTemplateLabel)] = aws.StringValue(tag.Value)
}
if strings.HasPrefix(key, KarpenterNodeLabel) {
isKarpenterManaged = true
}
}
if isKarpenterManaged {
info.Labels["karpenter.sh/provisioner-name"] = info.Labels[CloudTagInstanceGroupName]
}
// If caching is enabled add the nodeidentity.Info to cache.

View File

@ -92,6 +92,10 @@ func BuildNodeLabels(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) m
nodeLabels[k] = v
}
if instanceGroup.Spec.Manager == kops.InstanceManagerKarpenter {
nodeLabels["karpenter.sh/provisioner-name"] = instanceGroup.ObjectMeta.Name
}
return nodeLabels
}

View File

@ -65,6 +65,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -85,6 +86,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -66,6 +66,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-1
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -89,6 +90,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-2
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -86,6 +86,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -106,6 +107,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -126,6 +128,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -146,6 +149,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -166,6 +170,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -186,6 +191,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -86,6 +86,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -106,6 +107,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -126,6 +128,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -146,6 +149,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -166,6 +170,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -186,6 +191,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -74,6 +74,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-1
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -97,6 +98,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-1
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -120,6 +122,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-1
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -143,6 +146,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-2
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -166,6 +170,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-2
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -189,6 +194,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-2
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -78,6 +78,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -98,6 +99,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -118,6 +120,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -138,6 +141,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -94,6 +94,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -114,6 +115,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -134,6 +136,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -154,6 +157,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -174,6 +178,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -194,6 +199,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -214,6 +220,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -75,6 +75,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.micro
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -95,6 +96,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -115,6 +117,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -71,6 +71,7 @@ spec:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -94,6 +95,7 @@ spec:
httpPutResponseHopLimit: 1
httpTokens: required
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -66,6 +66,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -86,6 +87,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -66,6 +66,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -86,6 +87,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -66,6 +66,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -86,6 +87,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -66,6 +66,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -86,6 +87,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -69,6 +69,7 @@ spec:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -92,6 +93,7 @@ spec:
httpPutResponseHopLimit: 1
httpTokens: required
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -69,6 +69,7 @@ spec:
httpPutResponseHopLimit: 3
httpTokens: required
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -92,6 +93,7 @@ spec:
httpPutResponseHopLimit: 1
httpTokens: required
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -75,6 +75,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.micro
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -95,6 +96,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -115,6 +117,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -69,6 +69,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -89,6 +90,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -78,6 +78,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.micro
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -101,6 +102,7 @@ spec:
- sg-exampleid4
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -124,6 +126,7 @@ spec:
- sg-exampleid2
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -73,6 +73,7 @@ metadata:
spec:
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: f1-micro
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -99,6 +100,7 @@ spec:
- sg-exampleid4
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-1
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -125,6 +127,7 @@ spec:
- sg-exampleid2
image: ubuntu-os-cloud/ubuntu-2004-focal-v20211118
machineType: n1-standard-2
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -75,6 +75,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -95,6 +96,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -68,6 +68,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -88,6 +89,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -68,6 +68,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -88,6 +89,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

View File

@ -67,6 +67,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: m3.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:
@ -87,6 +88,7 @@ metadata:
spec:
image: 099720109477/ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-20211118
machineType: t2.medium
manager: CloudGroup
maxSize: 1
minSize: 1
nodeLabels:

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,418 @@
Resources.AWSEC2LaunchTemplatemasterustest1amastersminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.12
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal.example.com
serviceAccountJWKSURI: https://api.internal.minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
NodeupConfigHash: LFwTDQ1M/AxVLdvKc8ZPsktDgr836JEsdQRwn2TU+iM=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="
Resources.AWSEC2LaunchTemplatenodesminimalexamplecom.Properties.LaunchTemplateData.UserData: |
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.12
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: ehZK5PooPMXQw0YD3dy5oARwClEXIj8ymh6DR1XYbQ0=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,17 @@
{
"Statement": [
{
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"discovery.example.com/minimal.example.com:sub": "system:serviceaccount:kube-system:dns-controller"
}
},
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com"
}
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,17 @@
{
"Statement": [
{
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"discovery.example.com/minimal.example.com:sub": "system:serviceaccount:kube-system:karpenter"
}
},
"Effect": "Allow",
"Principal": {
"Federated": "arn:aws-test:iam::123456789012:oidc-provider/discovery.example.com/minimal.example.com"
}
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,10 @@
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": { "Service": "ec2.amazonaws.com"},
"Action": "sts:AssumeRole"
}
]
}

View File

@ -0,0 +1,35 @@
{
"Statement": [
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,24 @@
{
"Statement": [
{
"Action": [
"ec2:CreateFleet",
"ec2:CreateTags",
"ec2:DescribeAvailabilityZones",
"ec2:DescribeInstanceTypeOfferings",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeLaunchTemplates",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:RunInstances",
"ec2:TerminateInstances",
"iam:PassRole",
"ssm:GetParameter"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,204 @@
{
"Statement": [
{
"Action": "ec2:AttachVolume",
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com",
"aws:ResourceTag/k8s.io/role/master": "1"
}
},
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/main/*"
},
{
"Action": [
"s3:GetObject",
"s3:DeleteObject",
"s3:DeleteObjectVersion",
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws-test:s3:::placeholder-write-bucket/clusters.example.com/minimal.example.com/backups/etcd/events/*"
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-write-bucket"
]
},
{
"Action": [
"route53:ChangeResourceRecordSets",
"route53:ListResourceRecordSets",
"route53:GetHostedZone"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::hostedzone/Z1AFAKE1ZON3YO"
]
},
{
"Action": [
"route53:GetChange"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:route53:::change/*"
]
},
{
"Action": [
"route53:ListHostedZones",
"route53:ListTagsForResource"
],
"Effect": "Allow",
"Resource": [
"*"
]
},
{
"Action": "ec2:CreateTags",
"Condition": {
"StringEquals": {
"ec2:CreateAction": [
"CreateVolume",
"CreateSnapshot"
]
}
},
"Effect": "Allow",
"Resource": [
"arn:aws-test:ec2:*:*:volume/*",
"arn:aws-test:ec2:*:*:snapshot/*"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeTags",
"ec2:CreateSecurityGroup",
"ec2:CreateTags",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"ec2:DescribeRegions",
"ec2:DescribeRouteTables",
"ec2:DescribeSecurityGroups",
"ec2:DescribeSubnets",
"ec2:DescribeVolumes",
"ec2:DescribeVpcs",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateTargetGroup",
"elasticloadbalancing:DescribeListeners",
"elasticloadbalancing:DescribeLoadBalancerAttributes",
"elasticloadbalancing:DescribeLoadBalancerPolicies",
"elasticloadbalancing:DescribeLoadBalancers",
"elasticloadbalancing:DescribeTargetGroups",
"elasticloadbalancing:DescribeTargetHealth",
"elasticloadbalancing:RegisterTargets",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:DescribeKey",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:AttachVolume",
"ec2:AuthorizeSecurityGroupIngress",
"ec2:DeleteRoute",
"ec2:DeleteSecurityGroup",
"ec2:DeleteVolume",
"ec2:DetachVolume",
"ec2:ModifyInstanceAttribute",
"ec2:ModifyVolume",
"ec2:RevokeSecurityGroupIngress",
"elasticloadbalancing:AddTags",
"elasticloadbalancing:ApplySecurityGroupsToLoadBalancer",
"elasticloadbalancing:AttachLoadBalancerToSubnets",
"elasticloadbalancing:ConfigureHealthCheck",
"elasticloadbalancing:CreateLoadBalancerListeners",
"elasticloadbalancing:CreateLoadBalancerPolicy",
"elasticloadbalancing:DeleteListener",
"elasticloadbalancing:DeleteLoadBalancer",
"elasticloadbalancing:DeleteLoadBalancerListeners",
"elasticloadbalancing:DeleteTargetGroup",
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:DeregisterTargets",
"elasticloadbalancing:DetachLoadBalancerFromSubnets",
"elasticloadbalancing:ModifyListener",
"elasticloadbalancing:ModifyLoadBalancerAttributes",
"elasticloadbalancing:ModifyTargetGroup",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer",
"elasticloadbalancing:RegisterTargets",
"elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer",
"elasticloadbalancing:SetLoadBalancerPoliciesOfListener"
],
"Condition": {
"StringEquals": {
"aws:ResourceTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
},
{
"Action": [
"ec2:CreateSecurityGroup",
"ec2:CreateVolume",
"elasticloadbalancing:CreateListener",
"elasticloadbalancing:CreateLoadBalancer",
"elasticloadbalancing:CreateTargetGroup"
],
"Condition": {
"StringEquals": {
"aws:RequestTag/KubernetesCluster": "minimal.example.com"
}
},
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1,41 @@
{
"Statement": [
{
"Action": [
"s3:Get*"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/addons/*",
"arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/cluster-completed.spec",
"arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/igconfig/node/*",
"arn:aws-test:s3:::placeholder-read-bucket/clusters.example.com/minimal.example.com/secrets/dockerconfig"
]
},
{
"Action": [
"s3:GetBucketLocation",
"s3:GetEncryptionConfiguration",
"s3:ListBucket",
"s3:ListBucketVersions"
],
"Effect": "Allow",
"Resource": [
"arn:aws-test:s3:::placeholder-read-bucket"
]
},
{
"Action": [
"autoscaling:DescribeAutoScalingInstances",
"ec2:DescribeInstanceTypes",
"ec2:DescribeInstances",
"iam:GetServerCertificate",
"iam:ListServerCertificates",
"kms:GenerateRandom"
],
"Effect": "Allow",
"Resource": "*"
}
],
"Version": "2012-10-17"
}

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,168 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.12
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: karpenter-nodes
InstanceGroupRole: Node
NodeupConfigHash: 82WPA9hO1RdHX0zXhyshQojhW4qQug4izPAWZkgZkN4=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,248 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.12
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://discovery.example.com/minimal.example.com
serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: master-us-test-1a
InstanceGroupRole: Master
NodeupConfigHash: Z4IU80+07IRL4JdtasAA0cGjl3hQHN6rUgqsA2L/R+E=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,168 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
export AWS_REGION=us-test-1
sysctl -w net.core.rmem_max=16777216 || true
sysctl -w net.core.wmem_max=16777216 || true
sysctl -w net.ipv4.tcp_rmem='4096 87380 16777216' || true
sysctl -w net.ipv4.tcp_wmem='4096 87380 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, urls
download-or-bust() {
local -r file="$1"
local -r hash="$2"
local -r urls=( $(split-commas "$3") )
if [[ -f "${file}" ]]; then
if ! validate-hash "${file}" "${hash}"; then
rm -f "${file}"
else
return
fi
fi
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
echo "== Downloaded ${url} (SHA256 = ${hash}) =="
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
cd ${INSTALL_DIR}/bin
download-or-bust nodeup "${NODEUP_HASH}" "${NODEUP_URL}"
chmod +x nodeup
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.12
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
CloudProvider: aws
ConfigBase: memfs://clusters.example.com/minimal.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
NodeupConfigHash: ehZK5PooPMXQw0YD3dy5oARwClEXIj8ymh6DR1XYbQ0=
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1,183 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
api:
dns: {}
authorization:
alwaysAllow: {}
channel: stable
cloudConfig:
awsEBSCSIDriver:
enabled: false
manageStorageClasses: true
cloudProvider: aws
clusterDNSDomain: cluster.local
configBase: memfs://clusters.example.com/minimal.example.com
configStore: memfs://clusters.example.com/minimal.example.com
containerRuntime: containerd
containerd:
logLevel: info
version: 1.4.12
dnsZone: Z1AFAKE1ZON3YO
docker:
skipInstall: true
etcdClusters:
- backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/main
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
version: 3.4.13
- backups:
backupStore: memfs://clusters.example.com/minimal.example.com/backups/etcd/events
etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
version: 3.4.13
externalDns:
provider: dns-controller
iam:
legacy: false
useServiceAccountExternalPermissions: true
karpenter:
enabled: true
keyStore: memfs://clusters.example.com/minimal.example.com/pki
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://discovery.example.com/minimal.example.com
serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: aws
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeDNS:
cacheMaxConcurrent: 150
cacheMaxSize: 1000
cpuRequest: 100m
domain: cluster.local
memoryLimit: 170Mi
memoryRequest: 70Mi
nodeLocalDNS:
cpuRequest: 25m
enabled: false
image: k8s.gcr.io/dns/k8s-dns-node-cache:1.21.3
memoryRequest: 5Mi
provider: CoreDNS
serverIP: 100.64.0.10
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: 1.21.0
masterInternalName: api.internal.minimal.example.com
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 100.96.0.0/11
secretStore: memfs://clusters.example.com/minimal.example.com/secrets
serviceAccountIssuerDiscovery:
discoveryStore: memfs://discovery.example.com/minimal.example.com
enableAWSOIDCProvider: true
serviceClusterIPRange: 100.64.0.0/13
sshAccess:
- 0.0.0.0/0
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
topology:
dns:
type: Public
masters: public
nodes: public

View File

@ -0,0 +1,18 @@
{
"issuer": "https://discovery.example.com/minimal.example.com",
"jwks_uri": "https://discovery.example.com/minimal.example.com/openid/v1/jwks",
"authorization_endpoint": "urn:kubernetes:programmatic_authorization",
"response_types_supported": [
"id_token"
],
"subject_types_supported": [
"public"
],
"id_token_signing_alg_values_supported": [
"RS256"
],
"claims_supported": [
"sub",
"iss"
]
}

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.4.13"
}

View File

@ -0,0 +1,4 @@
{
"memberCount": 1,
"etcdVersion": "3.4.13"
}

View File

@ -0,0 +1,20 @@
{
"keys": [
{
"use": "sig",
"kty": "RSA",
"kid": "3mNcULfgtWECYyZWY5ow1rOHjiRwEZHx28HQcRec3Ew",
"alg": "RS256",
"n": "2JbeF8dNwqfEKKD65aGlVs58fWkA0qZdVLKw8qATzRBJTi1nqbj2kAR4gyy_C8Mxouxva_om9d7Sq8Ka55T7-w",
"e": "AQAB"
},
{
"use": "sig",
"kty": "RSA",
"kid": "G-cZ10iKJqrXhR15ivI7Lg2q_cuL0zN9ouL0vF67FLc",
"alg": "RS256",
"n": "o4Tridlsf4Yz3UAiup_scSTiG_OqxkUW3Fz7zGKvVcLeYj9GEIKuzoB1VFk1nboDq4cCuGLfdzaQdCQKPIsDuw",
"e": "AQAB"
}
]
}

View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-events
name: etcd-manager-events
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/events
--client-urls=https://__name__:4002 --cluster-name=etcd-events --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3997 --peer-urls=https://__name__:2381
--quarantine-client-urls=https://__name__:3995 --v=6 --volume-name-tag=k8s.io/etcd/events
--volume-provider=aws --volume-tag=k8s.io/etcd/events --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: k8s.gcr.io/etcdadm/etcd-manager:v3.0.20211124
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-events
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd-events.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
labels:
k8s-app: etcd-manager-main
name: etcd-manager-main
namespace: kube-system
spec:
containers:
- command:
- /bin/sh
- -c
- mkfifo /tmp/pipe; (tee -a /var/log/etcd.log < /tmp/pipe & ) ; exec /etcd-manager
--backup-store=memfs://clusters.example.com/minimal.example.com/backups/etcd/main
--client-urls=https://__name__:4001 --cluster-name=etcd --containerized=true
--dns-suffix=.internal.minimal.example.com --grpc-port=3996 --peer-urls=https://__name__:2380
--quarantine-client-urls=https://__name__:3994 --v=6 --volume-name-tag=k8s.io/etcd/main
--volume-provider=aws --volume-tag=k8s.io/etcd/main --volume-tag=k8s.io/role/master=1
--volume-tag=kubernetes.io/cluster/minimal.example.com=owned > /tmp/pipe 2>&1
image: k8s.gcr.io/etcdadm/etcd-manager:v3.0.20211124
name: etcd-manager
resources:
requests:
cpu: 200m
memory: 100Mi
securityContext:
privileged: true
volumeMounts:
- mountPath: /rootfs
name: rootfs
- mountPath: /run
name: run
- mountPath: /etc/kubernetes/pki/etcd-manager
name: pki
- mountPath: /var/log/etcd.log
name: varlogetcd
hostNetwork: true
hostPID: true
priorityClassName: system-cluster-critical
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- hostPath:
path: /
type: Directory
name: rootfs
- hostPath:
path: /run
type: DirectoryOrCreate
name: run
- hostPath:
path: /etc/kubernetes/pki/etcd-manager-main
type: DirectoryOrCreate
name: pki
- hostPath:
path: /var/log/etcd.log
type: FileOrCreate
name: varlogetcd
status: {}

View File

@ -0,0 +1,32 @@
apiVersion: v1
kind: Pod
metadata:
creationTimestamp: null
spec:
containers:
- args:
- --ca-cert=/secrets/ca.crt
- --client-cert=/secrets/client.crt
- --client-key=/secrets/client.key
command:
- /kube-apiserver-healthcheck
image: k8s.gcr.io/kops/kube-apiserver-healthcheck:1.24.0-alpha.1
livenessProbe:
httpGet:
host: 127.0.0.1
path: /.kube-apiserver-healthcheck/healthz
port: 3990
initialDelaySeconds: 5
timeoutSeconds: 5
name: healthcheck
resources: {}
volumeMounts:
- mountPath: /secrets
name: healthcheck-secrets
readOnly: true
volumes:
- hostPath:
path: /etc/kubernetes/kube-apiserver-healthcheck/secrets
type: Directory
name: healthcheck-secrets
status: {}

View File

@ -0,0 +1,61 @@
kind: Addons
metadata:
creationTimestamp: null
name: bootstrap
spec:
addons:
- id: k8s-1.16
manifest: kops-controller.addons.k8s.io/k8s-1.16.yaml
manifestHash: 7eb2e21eff3c1f501f7aaf1007d533fcfa5ae8bd862dcea053ae62559a478e81
name: kops-controller.addons.k8s.io
needsRollingUpdate: control-plane
selector:
k8s-addon: kops-controller.addons.k8s.io
version: 9.99.0
- manifest: core.addons.k8s.io/v1.4.0.yaml
manifestHash: 18233793a8442224d052e44891e737c67ccfb4e051e95216392319653f4cb0e5
name: core.addons.k8s.io
selector:
k8s-addon: core.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: coredns.addons.k8s.io/k8s-1.12.yaml
manifestHash: 8c3daed1d84f622f3db7a8833fe1b317268d696e28a56e5ae79ff1167385463f
name: coredns.addons.k8s.io
selector:
k8s-addon: coredns.addons.k8s.io
version: 9.99.0
- id: k8s-1.9
manifest: kubelet-api.rbac.addons.k8s.io/k8s-1.9.yaml
manifestHash: 01c120e887bd98d82ef57983ad58a0b22bc85efb48108092a24c4b82e4c9ea81
name: kubelet-api.rbac.addons.k8s.io
selector:
k8s-addon: kubelet-api.rbac.addons.k8s.io
version: 9.99.0
- manifest: limit-range.addons.k8s.io/v1.5.0.yaml
manifestHash: 2d55c3bc5e354e84a3730a65b42f39aba630a59dc8d32b30859fcce3d3178bc2
name: limit-range.addons.k8s.io
selector:
k8s-addon: limit-range.addons.k8s.io
version: 9.99.0
- id: k8s-1.12
manifest: dns-controller.addons.k8s.io/k8s-1.12.yaml
manifestHash: 595f7ea5bff4f4668b9fdac3cb76ffce2fef68e3ae08e1781d657474a46b8df9
name: dns-controller.addons.k8s.io
selector:
k8s-addon: dns-controller.addons.k8s.io
version: 9.99.0
- id: v1.15.0
manifest: storage-aws.addons.k8s.io/v1.15.0.yaml
manifestHash: 065ae832ddac8d0931e9992d6a76f43a33a36975a38003b34f4c5d86a7d42780
name: storage-aws.addons.k8s.io
selector:
k8s-addon: storage-aws.addons.k8s.io
version: 9.99.0
- id: k8s-1.19
manifest: karpenter.sh/k8s-1.19.yaml
manifestHash: c93d4c515254e2e8d9cc61a900a15f6ada60afed44fd7ba9ae0dd16cda9ca027
name: karpenter.sh
selector:
k8s-addon: karpenter.sh
version: 9.99.0

View File

@ -0,0 +1,56 @@
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: core.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: core.addons.k8s.io
name: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: core.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: core.addons.k8s.io
name: kube-dns
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: core.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: core.addons.k8s.io
name: kube-proxy
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: core.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: core.addons.k8s.io
name: kubeadm:node-proxier
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-proxier
subjects:
- apiGroup: ""
kind: ServiceAccount
name: kube-proxy
namespace: kube-system

View File

@ -0,0 +1,380 @@
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/cluster-service: "true"
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
data:
Corefile: |-
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local. in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
addonmanager.kubernetes.io/mode: EnsureExists
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: coredns
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kube-dns
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
labels:
k8s-app: kube-dns
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values:
- kube-dns
topologyKey: kubernetes.io/hostname
weight: 100
containers:
- args:
- -conf
- /etc/coredns/Corefile
image: k8s.gcr.io/coredns/coredns:v1.8.5
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 5
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
successThreshold: 1
timeoutSeconds: 5
name: coredns
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
volumeMounts:
- mountPath: /etc/coredns
name: config-volume
readOnly: true
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: CriticalAddonsOnly
operator: Exists
volumes:
- configMap:
name: coredns
name: config-volume
---
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: CoreDNS
name: kube-dns
namespace: kube-system
resourceVersion: "0"
spec:
clusterIP: 100.64.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
selector:
k8s-app: kube-dns
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: kube-dns
namespace: kube-system
spec:
maxUnavailable: 50%
selector:
matchLabels:
k8s-app: kube-dns
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- replicationcontrollers/scale
verbs:
- get
- update
- apiGroups:
- extensions
- apps
resources:
- deployments/scale
- replicasets/scale
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
name: coredns-autoscaler
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: coredns.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: coredns.addons.k8s.io
k8s-app: coredns-autoscaler
kubernetes.io/cluster-service: "true"
name: coredns-autoscaler
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-app: coredns-autoscaler
spec:
containers:
- command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
image: k8s.gcr.io/cpa/cluster-proportional-autoscaler:1.8.4
name: autoscaler
resources:
requests:
cpu: 20m
memory: 10Mi
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: coredns-autoscaler
tolerations:
- key: CriticalAddonsOnly
operator: Exists

View File

@ -0,0 +1,146 @@
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.24.0-alpha.1
name: dns-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dns-controller
strategy:
type: Recreate
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ""
labels:
k8s-addon: dns-controller.addons.k8s.io
k8s-app: dns-controller
version: v1.24.0-alpha.1
spec:
containers:
- command:
- /dns-controller
- --watch-ingress=false
- --dns=aws-route53
- --zone=*/Z1AFAKE1ZON3YO
- --internal-ipv4
- --zone=*/*
- -v=2
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/dns-controller.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: k8s.gcr.io/kops/dns-controller:1.24.0-alpha.1
name: dns-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
volumeMounts:
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
dnsPolicy: Default
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 10001
serviceAccount: dns-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: token-amazonaws-com
projected:
defaultMode: 420
sources:
- serviceAccountToken:
audience: amazonaws.com
expirationSeconds: 86400
path: token
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: dns-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- ingress
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking
resources:
- ingresses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: dns-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: dns-controller.addons.k8s.io
name: kops:dns-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops:dns-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:dns-controller

View File

@ -0,0 +1,886 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: provisioners.karpenter.sh
spec:
group: karpenter.sh
names:
kind: Provisioner
listKind: ProvisionerList
plural: provisioners
singular: provisioner
scope: Cluster
versions:
- name: v1alpha5
schema:
openAPIV3Schema:
description: Provisioner is the Schema for the Provisioners API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ProvisionerSpec is the top level provisioner specification.
Provisioners launch nodes in response to pods that are unschedulable.
A single provisioner is capable of managing a diverse set of nodes.
Node properties are determined from a combination of provisioner and
pod scheduling constraints.
properties:
labels:
additionalProperties:
type: string
description: Labels are layered with Requirements and applied to every
node.
type: object
limits:
description: Limits define a set of bounds for provisioning capacity.
properties:
resources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Resources contains all the allocatable resources
that Karpenter supports for limiting.
type: object
type: object
provider:
description: Provider contains fields specific to your cloudprovider.
type: object
x-kubernetes-preserve-unknown-fields: true
requirements:
description: Requirements are layered with Labels and applied to every
node.
items:
description: A node selector requirement is a selector that contains
values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If the operator is In
or NotIn, the values array must be non-empty. If the operator
is Exists or DoesNotExist, the values array must be empty.
If the operator is Gt or Lt, the values array must have a
single element, which will be interpreted as an integer. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
taints:
description: Taints will be applied to every node launched by the
Provisioner. If specified, the provisioner will not provision nodes
for pods that do not have matching tolerations. Additional taints
will be created that match pod tolerations on a per-node basis.
items:
description: The node this Taint is attached to has the "effect"
on any pod that does not tolerate the Taint.
properties:
effect:
description: Required. The effect of the taint on pods that
do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule
and NoExecute.
type: string
key:
description: Required. The taint key to be applied to a node.
type: string
timeAdded:
description: TimeAdded represents the time at which the taint
was added. It is only written for NoExecute taints.
format: date-time
type: string
value:
description: The taint value corresponding to the taint key.
type: string
required:
- effect
- key
type: object
type: array
ttlSecondsAfterEmpty:
description: "TTLSecondsAfterEmpty is the number of seconds the controller
will wait before attempting to delete a node, measured from when
the node is detected to be empty. A Node is considered to be empty
when it does not have pods scheduled to it, excluding daemonsets.
\n Termination due to underutilization is disabled if this field
is not set."
format: int64
type: integer
ttlSecondsUntilExpired:
description: "TTLSecondsUntilExpired is the number of seconds the
controller will wait before terminating a node, measured from when
the node is created. This is useful to implement features like eventually
consistent node upgrade, memory leak protection, and disruption
testing. \n Termination due to expiration is disabled if this field
is not set."
format: int64
type: integer
type: object
status:
description: ProvisionerStatus defines the observed state of Provisioner
properties:
conditions:
description: Conditions is the set of conditions required for this
provisioner to scale its target, and indicates whether or not those
conditions are met.
items:
description: 'Condition defines a readiness condition for a Knative
resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties'
properties:
lastTransitionTime:
description: LastTransitionTime is the last time the condition
transitioned from one status to another. We use VolatileTime
in place of metav1.Time to exclude this from creating equality.Semantic
differences (all other things held constant).
type: string
message:
description: A human readable message indicating details about
the transition.
type: string
reason:
description: The reason for the condition's last transition.
type: string
severity:
description: Severity with which to treat failures of this type
of condition. When this is not specified, it defaults to Error.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
description: Type of condition.
type: string
required:
- status
- type
type: object
type: array
lastScaleTime:
description: LastScaleTime is the last time the Provisioner scaled
the number of nodes
format: date-time
type: string
resources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Resources is the list of resources that have been provisioned.
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter
namespace: kube-system
---
apiVersion: v1
data: {}
kind: Secret
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook-cert
namespace: kube-system
---
apiVersion: v1
data:
zap-logger-config: |
{
"level": "debug",
"development": true,
"disableStacktrace": true,
"disableCaller": true,
"sampling": {
"initial": 100,
"thereafter": 100
},
"outputPaths": ["stdout"],
"errorOutputPaths": ["stderr"],
"encoding": "console",
"encoderConfig": {
"timeKey": "time",
"levelKey": "level",
"nameKey": "logger",
"callerKey": "caller",
"messageKey": "message",
"stacktraceKey": "stacktrace",
"levelEncoder": "capital",
"timeEncoder": "iso8601"
}
}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
app.kubernetes.io/part-of: karpenter
k8s-addon: karpenter.sh
name: config-logging
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-controller
rules:
- apiGroups:
- karpenter.sh
resources:
- provisioners
- provisioners/status
verbs:
- create
- delete
- patch
- get
- list
- watch
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- patch
- update
- watch
- apiGroups:
- ""
resources:
- nodes
- pods
verbs:
- get
- list
- watch
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- create
- apiGroups:
- ""
resources:
- pods/binding
- pods/eviction
verbs:
- create
- apiGroups:
- apps
resources:
- daemonsets
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
- mutatingwebhookconfigurations
verbs:
- get
- watch
- list
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: karpenter-controller
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: karpenter-webhook
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- configmaps
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- list
- watch
- update
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- watch
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: karpenter-controller
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: karpenter-webhook
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-metrics
namespace: kube-system
spec:
ports:
- port: 8080
targetPort: metrics
selector:
karpenter: controller
---
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook
namespace: kube-system
spec:
ports:
- port: 443
targetPort: webhook
selector:
karpenter: webhook
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-controller
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
karpenter: controller
strategy:
type: Recreate
template:
metadata:
labels:
karpenter: controller
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.sh/provisioner-name
operator: DoesNotExist
containers:
- env:
- name: AWS_REGION
value: us-test-1
- name: CLUSTER_NAME
value: minimal.example.com
- name: CLUSTER_ENDPOINT
value: https://api.internal.minimal.example.com
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/karpenter.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/karpenter/controller:v0.5.2
livenessProbe:
httpGet:
path: /healthz
port: 8081
name: manager
ports:
- containerPort: 8080
name: metrics
- containerPort: 8081
name: health-probe
readinessProbe:
httpGet:
path: /readyz
port: 8081
resources:
limits:
cpu: "1"
memory: 1Gi
requests:
cpu: "1"
memory: 1Gi
volumeMounts:
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 10001
serviceAccountName: karpenter
volumes:
- name: token-amazonaws-com
projected:
defaultMode: 420
sources:
- serviceAccountToken:
audience: amazonaws.com
expirationSeconds: 86400
path: token
---
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-webhook
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
karpenter: webhook
strategy:
type: Recreate
template:
metadata:
labels:
karpenter: webhook
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.sh/provisioner-name
operator: DoesNotExist
containers:
- args:
- -port=8443
env:
- name: AWS_REGION
value: us-test-1
- name: CLUSTER_NAME
value: minimal.example.com
- name: CLUSTER_ENDPOINT
value: https://api.internal.minimal.example.com
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: AWS_ROLE_ARN
value: arn:aws-test:iam::123456789012:role/karpenter.kube-system.sa.minimal.example.com
- name: AWS_WEB_IDENTITY_TOKEN_FILE
value: /var/run/secrets/amazonaws.com/token
image: public.ecr.aws/karpenter/webhook:v0.5.2
livenessProbe:
httpGet:
port: 8443
scheme: HTTPS
name: webhook
ports:
- containerPort: 8443
name: webhook
readinessProbe:
httpGet:
port: 8443
scheme: HTTPS
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
volumeMounts:
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 10001
serviceAccountName: karpenter
volumes:
- name: token-amazonaws-com
projected:
defaultMode: 420
sources:
- serviceAccountToken:
audience: amazonaws.com
expirationSeconds: 86400
path: token
---
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: defaulting.webhook.provisioners.karpenter.sh
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: karpenter-webhook
namespace: kube-system
failurePolicy: Fail
name: defaulting.webhook.provisioners.karpenter.sh
rules:
- apiGroups:
- karpenter.sh
apiVersions:
- v1alpha5
operations:
- CREATE
- UPDATE
resources:
- provisioners provisioners/status
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: validation.webhook.provisioners.karpenter.sh
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: karpenter-webhook
namespace: kube-system
failurePolicy: Fail
name: validation.webhook.provisioners.karpenter.sh
rules:
- apiGroups:
- karpenter.sh
apiVersions:
- v1alpha5
operations:
- CREATE
- UPDATE
resources:
- provisioners provisioners/status
sideEffects: None
---
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: validation.webhook.config.karpenter.sh
webhooks:
- admissionReviewVersions:
- v1
clientConfig:
service:
name: karpenter-webhook
namespace: kube-system
failurePolicy: Fail
name: validation.webhook.config.karpenter.sh
objectSelector:
matchLabels:
app.kubernetes.io/part-of: karpenter
sideEffects: None
---
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: karpenter.sh
app.kubernetes.io/managed-by: kops
k8s-addon: karpenter.sh
name: karpenter-nodes
spec:
limits:
resources:
cpu: 1000
provider:
instanceProfile: nodes.minimal.example.com
launchTemplate: karpenter-nodes.minimal.example.com
securityGroupSelector:
Name: nodes.minimal.example.com
subnetSelector:
KubernetesCluster: minimal.example.com
kubernetes.io/role/internal-elb: "1"
requirements:
- key: karpenter.sh/capacity-type
operator: In
values:
- spot
- key: kubernetes.io/arch
operator: In
values:
- amd64
- key: node.kubernetes.io/instance-type
operator: In
values:
- t2.medium
ttlSecondsAfterEmpty: 30

View File

@ -0,0 +1,208 @@
apiVersion: v1
data:
config.yaml: |
{"cloud":"aws","configBase":"memfs://clusters.example.com/minimal.example.com","server":{"Listen":":3988","provider":{"aws":{"nodesRoles":["nodes.minimal.example.com"],"Region":"us-test-1"}},"serverKeyPath":"/etc/kubernetes/kops-controller/pki/kops-controller.key","serverCertificatePath":"/etc/kubernetes/kops-controller/pki/kops-controller.crt","caBasePath":"/etc/kubernetes/kops-controller/pki","signingCAs":["kubernetes-ca"],"certNames":["kubelet","kubelet-server","kube-proxy"]}}
kind: ConfigMap
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.24.0-alpha.1
name: kops-controller
namespace: kube-system
spec:
selector:
matchLabels:
k8s-app: kops-controller
template:
metadata:
annotations:
dns.alpha.kubernetes.io/internal: kops-controller.internal.minimal.example.com
labels:
k8s-addon: kops-controller.addons.k8s.io
k8s-app: kops-controller
version: v1.24.0-alpha.1
spec:
containers:
- command:
- /kops-controller
- --v=2
- --conf=/etc/kubernetes/kops-controller/config/config.yaml
env:
- name: KUBERNETES_SERVICE_HOST
value: 127.0.0.1
image: k8s.gcr.io/kops/kops-controller:1.24.0-alpha.1
name: kops-controller
resources:
requests:
cpu: 50m
memory: 50Mi
securityContext:
runAsNonRoot: true
volumeMounts:
- mountPath: /etc/kubernetes/kops-controller/config/
name: kops-controller-config
- mountPath: /etc/kubernetes/kops-controller/pki/
name: kops-controller-pki
dnsPolicy: Default
hostNetwork: true
nodeSelector:
kops.k8s.io/kops-controller-pki: ""
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
serviceAccount: kops-controller
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- configMap:
name: kops-controller
name: kops-controller-config
- hostPath:
path: /etc/kubernetes/kops-controller/
type: Directory
name: kops-controller-pki
updateStrategy:
type: OnDelete
---
apiVersion: v1
kind: ServiceAccount
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- create
- apiGroups:
- ""
- coordination.k8s.io
resourceNames:
- kops-controller-leader
resources:
- configmaps
- leases
verbs:
- get
- list
- watch
- patch
- update
- delete
- apiGroups:
- ""
- coordination.k8s.io
resources:
- configmaps
- leases
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kops-controller.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kops-controller.addons.k8s.io
name: kops-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kops-controller
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: kubelet-api.rbac.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: kubelet-api.rbac.addons.k8s.io
name: kops:system:kubelet-api-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet-api

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: LimitRange
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: limit-range.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: limit-range.addons.k8s.io
name: limits
namespace: default
spec:
limits:
- defaultRequest:
cpu: 100m
type: Container

View File

@ -0,0 +1,98 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: default
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "false"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: gp2
parameters:
type: gp2
provisioner: kubernetes.io/aws-ebs
---
allowVolumeExpansion: true
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
storageclass.kubernetes.io/is-default-class: "true"
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: kops-ssd-1-17
parameters:
encrypted: "true"
type: gp2
provisioner: kubernetes.io/aws-ebs
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
rules:
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
addon.kops.k8s.io/name: storage-aws.addons.k8s.io
app.kubernetes.io/managed-by: kops
k8s-addon: storage-aws.addons.k8s.io
name: system:aws-cloud-provider
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:aws-cloud-provider
subjects:
- kind: ServiceAccount
name: aws-cloud-provider
namespace: kube-system

View File

@ -0,0 +1,63 @@
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- f6120552408175ca332fd3b5d31c5edd115d8426d6731664e4ea3951c5eee3b4@https://github.com/containerd/containerd/releases/download/v1.4.12/cri-containerd-cni-1.4.12-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 87a4219c54552797ffd38790b72832372a90eceb7c8e451c36a682093d57dae6@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.11.tgz
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
karpenter.sh/provisioner-name: karpenter-nodes
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.12

View File

@ -0,0 +1,265 @@
APIServerConfig:
KubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: aws
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- https://127.0.0.1:4001
etcdServersOverrides:
- /events#https://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://discovery.example.com/minimal.example.com
serviceAccountJWKSURI: https://discovery.example.com/minimal.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
ServiceAccountPublicKeys: |
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBANiW3hfHTcKnxCig+uWhpVbOfH1pANKm
XVSysPKgE80QSU4tZ6m49pAEeIMsvwvDMaLsb2v6JvXe0qvCmueU+/sCAwEAAQ==
-----END RSA PUBLIC KEY-----
-----BEGIN RSA PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKOE64nZbH+GM91AIrqf7HEk4hvzqsZF
Ftxc+8xir1XC3mI/RhCCrs6AdVRZNZ26A6uHArhi33c2kHQkCjyLA7sCAwEAAQ==
-----END RSA PUBLIC KEY-----
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- f6120552408175ca332fd3b5d31c5edd115d8426d6731664e4ea3951c5eee3b4@https://github.com/containerd/containerd/releases/download/v1.4.12/cri-containerd-cni-1.4.12-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 87a4219c54552797ffd38790b72832372a90eceb7c8e451c36a682093d57dae6@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.11.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64
CAs:
apiserver-aggregator-ca: |
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gINaZLHjisEcbMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTExMloX
DTMxMDYzMDA0NTExMlowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQAHAomFKsF4jvYX
WM/UzQXDj9nSAFTf8dBPCXyZZNotsOH7+P6W4mMiuVs8bAuGiXGUdbsQ2lpiT/Rk
CzMeMdr4
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgjCCASygAwIBAgIMFo3gM0nxQpiX/agfMA0GCSqGSIb3DQEBCwUAMCIxIDAe
BgNVBAMTF2FwaXNlcnZlci1hZ2dyZWdhdG9yLWNhMB4XDTIxMDYzMDA0NTIzMVoX
DTMxMDYzMDA0NTIzMVowIjEgMB4GA1UEAxMXYXBpc2VydmVyLWFnZ3JlZ2F0b3It
Y2EwXDANBgkqhkiG9w0BAQEFAANLADBIAkEAyyE71AOU3go5XFegLQ6fidI0LhhM
x7CzpTzh2xWKcHUfbNI7itgJvC/+GlyG5W+DF5V7ba0IJiQLsFve0oLdewIDAQAB
o0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
ALfqF5ZmfqvqORuJIFilZYKF3d0wDQYJKoZIhvcNAQELBQADQQCXsoezoxXu2CEN
QdlXZOfmBT6cqxIX/RMHXhpHwRiqPsTO8IO2bVA8CSzxNwMuSv/ZtrMHoh8+PcVW
HLtkTXH8
-----END CERTIFICATE-----
etcd-clients-ca: |
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1ogHnr26DL9YkqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjE5MDFaFw0zMTA2Mjgx
NjE5MDFaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAAZAdf8ROEVkr3Rf7I+s+CQOil2toadlKWOY
qCeJ2XaEROfp9aUTEIU1MGM3g57MPyAPPU7mURskuOQz6B1UFaY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBcjCCARygAwIBAgIMFo1olfBnC/CsT+dqMA0GCSqGSIb3DQEBCwUAMBoxGDAW
BgNVBAMTD2V0Y2QtY2xpZW50cy1jYTAeFw0yMTA2MjgxNjIwMzNaFw0zMTA2Mjgx
NjIwMzNaMBoxGDAWBgNVBAMTD2V0Y2QtY2xpZW50cy1jYTBcMA0GCSqGSIb3DQEB
AQUAA0sAMEgCQQDYlt4Xx03Cp8QooPrloaVWznx9aQDSpl1UsrDyoBPNEElOLWep
uPaQBHiDLL8LwzGi7G9r+ib13tKrwprnlPv7AgMBAAGjQjBAMA4GA1UdDwEB/wQE
AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQjlt4Ue54AbJPWlDpRM51s
x+PeBDANBgkqhkiG9w0BAQsFAANBAF1xUz77PlUVUnd9duF8F7plou0TONC9R6/E
YQ8C6vM1b+9NSDGjCW8YmwEU2fBgskb/BBX2lwVZ32/RUEju4Co=
-----END CERTIFICATE-----
etcd-manager-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bKjm04vB4rNtaMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAwOTU2WhcN
MzEwNzA1MjAwOTU2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKiC8tndMlEFZ7qzeKxeKqFVjaYpsh/H
g7RxWo15+1kgH3suO0lxp9+RxSVv97hnsfbySTPZVhy2cIQj7eZtZt8CAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFBg6
CEZkQNnRkARBwFce03AEWa+sMA0GCSqGSIb3DQEBCwUAA0EAJMnBThok/uUe8q8O
sS5q19KUuE8YCTUzMDj36EBKf6NX4NoakCa1h6kfQVtlMtEIMWQZCjbm8xGK5ffs
GS/VUw==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBgDCCASqgAwIBAgIMFo+bQ+EgIiBmGghjMA0GCSqGSIb3DQEBCwUAMCExHzAd
BgNVBAMTFmV0Y2QtbWFuYWdlci1jYS1ldmVudHMwHhcNMjEwNzA1MjAxMTQ2WhcN
MzEwNzA1MjAxMTQ2WjAhMR8wHQYDVQQDExZldGNkLW1hbmFnZXItY2EtZXZlbnRz
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAKFhHVVxxDGv8d1jBvtdSxz7KIVoBOjL
DMxsmTsINiQkTQaFlb+XPlnY1ar4+RhE519AFUkqfhypk4Zxqf1YFXUCAwEAAaNC
MEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNuW
LLH5c8kDubDbr6BHgedW0iJ9MA0GCSqGSIb3DQEBCwUAA0EAiKUoBoaGu7XzboFE
hjfKlX0TujqWuW3qMxDEJwj4dVzlSLrAoB/G01MJ+xxYKh456n48aG6N827UPXhV
cPfVNg==
-----END CERTIFICATE-----
etcd-manager-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjm1c3jfv6hIMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAxbkDbGYmCSShpRG3r+lzTOFujyuruRfjOhYm
ZRX4w1Utd5y63dUc98sjc9GGUYMHd+0k1ql/a48tGhnK6N6jJwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWZLkbBFx
GAgPU4i62c52unSo7RswDQYJKoZIhvcNAQELBQADQQAj6Pgd0va/8FtkyMlnohLu
Gf4v8RJO6zk3Y6jJ4+cwWziipFM1ielMzSOZfFcCZgH3m5Io40is4hPSqyq2TOA6
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eg8Si30gr4MA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtbWFuYWdlci1jYS1tYWluMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1tYW5hZ2VyLWNhLW1haW4wXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAw33jzcd/iosN04b0WXbDt7B0c3sJ3aafcGLP
vG3xRB9N5bYr9+qZAq3mzAFkxscn4j1ce5b1/GKTDEAClmZgdQIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUE/h+3gDP
DvKwHRyiYlXM8voZ1wowDQYJKoZIhvcNAQELBQADQQBXuimeEoAOu5HN4hG7NqL9
t40K3ZRhRZv3JQWnRVJCBDjg1rD0GQJR/n+DoWvbeijI5C9pNjr2pWSIYR1eYCvd
-----END CERTIFICATE-----
etcd-peers-ca-events: |
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bKjmxTPh3/lYJMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMDk1NloXDTMx
MDcwNTIwMDk1NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAv5g4HF2xmrYyouJfY9jXx1M3gPLD/pupvxPY
xyjJw5pNCy5M5XGS3iTqRD5RDE0fWudVHFZKLIe8WPc06NApXwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUf6xiDI+O
Yph1ziCGr2hZaQYt+fUwDQYJKoZIhvcNAQELBQADQQBBxj5hqEQstonTb8lnqeGB
DEYtUeAk4eR/HzvUMjF52LVGuvN3XVt+JTrFeKNvb6/RDUbBNRj3azalcUkpPh6V
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBfDCCASagAwIBAgIMFo+bQ+Eq69jgzpKwMA0GCSqGSIb3DQEBCwUAMB8xHTAb
BgNVBAMTFGV0Y2QtcGVlcnMtY2EtZXZlbnRzMB4XDTIxMDcwNTIwMTE0NloXDTMx
MDcwNTIwMTE0NlowHzEdMBsGA1UEAxMUZXRjZC1wZWVycy1jYS1ldmVudHMwXDAN
BgkqhkiG9w0BAQEFAANLADBIAkEAo5Nj2CjX1qp3mEPw1H5nHAFWLoGNSLSlRFJW
03NxaNPMFzL5PrCoyOXrX8/MWczuZYw0Crf8EPOOQWi2+W0XLwIDAQABo0IwQDAO
BgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxauhhKQh
cvdZND78rHe0RQVTTiswDQYJKoZIhvcNAQELBQADQQB+cq4jIS9q0zXslaRa+ViI
J+dviA3sMygbmSJO0s4DxYmoazKJblux5q0ASSvS9iL1l9ShuZ1dWyp2tpZawHyb
-----END CERTIFICATE-----
etcd-peers-ca-main: |
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bKjmuLDDLcDHsMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDA5NTZaFw0zMTA3
MDUyMDA5NTZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCyRaXWpwgN6INQqws9p/BvPElJv2Rno9dVTFhlQqDA
aUJXe7MBmiO4NJcW76EozeBh5ztR3/4NE1FM2x8TisS3AgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQtE1d49uSvpURf
OQ25Vlu6liY20DANBgkqhkiG9w0BAQsFAANBAAgLVaetJZcfOA3OIMMvQbz2Ydrt
uWF9BKkIad8jrcIrm3IkOtR8bKGmDIIaRKuG/ZUOL6NMe2fky3AAfKwleL4=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBeDCCASKgAwIBAgIMFo+bQ+EuVthBfuZvMA0GCSqGSIb3DQEBCwUAMB0xGzAZ
BgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjAeFw0yMTA3MDUyMDExNDZaFw0zMTA3
MDUyMDExNDZaMB0xGzAZBgNVBAMTEmV0Y2QtcGVlcnMtY2EtbWFpbjBcMA0GCSqG
SIb3DQEBAQUAA0sAMEgCQQCxNbycDZNx5V1ZOiXxZSvaFpHRwKeHDfcuMUitdoPt
naVMlMTGDWAMuCVmFHFAWohIYynemEegmZkZ15S7AErfAgMBAAGjQjBAMA4GA1Ud
DwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTAjQ8T4HclPIsC
qipEfUIcLP6jqTANBgkqhkiG9w0BAQsFAANBAJdZ17TN3HlWrH7HQgfR12UBwz8K
G9DurDznVaBVUYaHY8Sg5AvAXeb+yIF2JMmRR+bK+/G1QYY2D3/P31Ic2Oo=
-----END CERTIFICATE-----
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
apiserver-aggregator-ca: "6980187172486667078076483355"
etcd-clients-ca: "6979622252718071085282986282"
etcd-manager-ca-events: "6982279354000777253151890266"
etcd-manager-ca-main: "6982279354000936168671127624"
etcd-peers-ca-events: "6982279353999767935825892873"
etcd-peers-ca-main: "6982279353998887468930183660"
kubernetes-ca: "6982820025135291416230495506"
service-account: "2"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.12
etcdManifests:
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/main.yaml
- memfs://clusters.example.com/minimal.example.com/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml

View File

@ -0,0 +1,62 @@
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- f6120552408175ca332fd3b5d31c5edd115d8426d6731664e4ea3951c5eee3b4@https://github.com/containerd/containerd/releases/download/v1.4.12/cri-containerd-cni-1.4.12-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 87a4219c54552797ffd38790b72832372a90eceb7c8e451c36a682093d57dae6@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.11.tgz
CAs:
kubernetes-ca: |
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANqBD8NSD82AUSMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwODAwWhcNMzEwNzA3MDcw
ODAwWjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBANFI3zr0Tk8krsW8vwjfMpzJOlWQ8616vG3YPa2qAgI7V4oKwfV0yIg1
jt+H6f4P/wkPAPTPTfRp9Iy8oHEEFw0CAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNG3zVjTcLlJwDsJ4/K9DV7KohUA
MA0GCSqGSIb3DQEBCwUAA0EAB8d03fY2w7WKpfO29qI295pu2C4ca9AiVGOpgSc8
tmQsq6rcxt3T+rb589PVtz0mw/cKTxOk6gH2CCC+yHfy2w==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIBbjCCARigAwIBAgIMFpANvmSa0OAlYmXKMA0GCSqGSIb3DQEBCwUAMBgxFjAU
BgNVBAMTDWt1YmVybmV0ZXMtY2EwHhcNMjEwNzA3MDcwOTM2WhcNMzEwNzA3MDcw
OTM2WjAYMRYwFAYDVQQDEw1rdWJlcm5ldGVzLWNhMFwwDQYJKoZIhvcNAQEBBQAD
SwAwSAJBAMF6F4aZdpe0RUpyykaBpWwZCnwbffhYGOw+fs6RdLuUq7QCNmJm/Eq7
WWOziMYDiI9SbclpD+6QiJ0N3EqppVUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEG
MA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFLImp6ARjPDAH6nhI+scWVt3Q9bn
MA0GCSqGSIb3DQEBCwUAA0EAVQVx5MUtuAIeePuP9o51xtpT2S6Fvfi8J4ICxnlA
9B7UD2ushcVFPtaeoL9Gfu8aY4KJBeqqg5ojl4qmRnThjw==
-----END CERTIFICATE-----
ClusterName: minimal.example.com
Hooks:
- null
- null
KeypairIDs:
kubernetes-ca: "6982820025135291416230495506"
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: aws
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
UpdatePolicy: automatic
channels:
- memfs://clusters.example.com/minimal.example.com/addons/bootstrap-channel.yaml
containerdConfig:
logLevel: info
version: 1.4.12

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,102 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-10T22:42:27Z"
name: minimal.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/minimal.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
iam:
useServiceAccountExternalPermissions: true
karpenter:
enabled: true
kubelet:
anonymousAuth: false
kubernetesVersion: v1.21.0
masterInternalName: api.internal.minimal.example.com
masterPublicName: api.minimal.example.com
networkCIDR: 172.20.0.0/16
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
serviceAccountIssuerDiscovery:
enableAWSOIDCProvider: true
discoveryStore: memfs://discovery.example.com/minimal.example.com
sshAccess:
- 0.0.0.0/0
topology:
masters: public
nodes: public
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Public
zone: us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: nodes
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-10T22:42:28Z"
name: karpenter-nodes
labels:
kops.k8s.io/cluster: minimal.example.com
spec:
manager: Karpenter
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
role: Node
subnets:
- us-test-1a

File diff suppressed because it is too large Load Diff

View File

@ -55,6 +55,7 @@ go_library(
"cloudup/resources/addons/external-dns.addons.k8s.io/k8s-1.19.yaml.template",
"cloudup/resources/addons/leader-migration.rbac.addons.k8s.io/k8s-1.23.yaml",
"cloudup/resources/addons/leader-migration.rbac.addons.k8s.io/k8s-1.25.yaml",
"cloudup/resources/addons/karpenter.sh/k8s-1.19.yaml.template",
],
importpath = "k8s.io/kops/upup/models",
visibility = ["//visibility:public"],

View File

@ -0,0 +1,678 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.7.0
creationTimestamp: null
name: provisioners.karpenter.sh
spec:
group: karpenter.sh
names:
kind: Provisioner
listKind: ProvisionerList
plural: provisioners
singular: provisioner
scope: Cluster
versions:
- name: v1alpha5
schema:
openAPIV3Schema:
description: Provisioner is the Schema for the Provisioners API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ProvisionerSpec is the top level provisioner specification.
Provisioners launch nodes in response to pods that are unschedulable.
A single provisioner is capable of managing a diverse set of nodes.
Node properties are determined from a combination of provisioner and
pod scheduling constraints.
properties:
labels:
additionalProperties:
type: string
description: Labels are layered with Requirements and applied to every
node.
type: object
limits:
description: Limits define a set of bounds for provisioning capacity.
properties:
resources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Resources contains all the allocatable resources
that Karpenter supports for limiting.
type: object
type: object
provider:
description: Provider contains fields specific to your cloudprovider.
type: object
x-kubernetes-preserve-unknown-fields: true
requirements:
description: Requirements are layered with Labels and applied to every
node.
items:
description: A node selector requirement is a selector that contains
values, a key, and an operator that relates the key and values.
properties:
key:
description: The label key that the selector applies to.
type: string
operator:
description: Represents a key's relationship to a set of values.
Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and
Lt.
type: string
values:
description: An array of string values. If the operator is In
or NotIn, the values array must be non-empty. If the operator
is Exists or DoesNotExist, the values array must be empty.
If the operator is Gt or Lt, the values array must have a
single element, which will be interpreted as an integer. This
array is replaced during a strategic merge patch.
items:
type: string
type: array
required:
- key
- operator
type: object
type: array
taints:
description: Taints will be applied to every node launched by the
Provisioner. If specified, the provisioner will not provision nodes
for pods that do not have matching tolerations. Additional taints
will be created that match pod tolerations on a per-node basis.
items:
description: The node this Taint is attached to has the "effect"
on any pod that does not tolerate the Taint.
properties:
effect:
description: Required. The effect of the taint on pods that
do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule
and NoExecute.
type: string
key:
description: Required. The taint key to be applied to a node.
type: string
timeAdded:
description: TimeAdded represents the time at which the taint
was added. It is only written for NoExecute taints.
format: date-time
type: string
value:
description: The taint value corresponding to the taint key.
type: string
required:
- effect
- key
type: object
type: array
ttlSecondsAfterEmpty:
description: "TTLSecondsAfterEmpty is the number of seconds the controller
will wait before attempting to delete a node, measured from when
the node is detected to be empty. A Node is considered to be empty
when it does not have pods scheduled to it, excluding daemonsets.
\n Termination due to underutilization is disabled if this field
is not set."
format: int64
type: integer
ttlSecondsUntilExpired:
description: "TTLSecondsUntilExpired is the number of seconds the
controller will wait before terminating a node, measured from when
the node is created. This is useful to implement features like eventually
consistent node upgrade, memory leak protection, and disruption
testing. \n Termination due to expiration is disabled if this field
is not set."
format: int64
type: integer
type: object
status:
description: ProvisionerStatus defines the observed state of Provisioner
properties:
conditions:
description: Conditions is the set of conditions required for this
provisioner to scale its target, and indicates whether or not those
conditions are met.
items:
description: 'Condition defines a readiness condition for a Knative
resource. See: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties'
properties:
lastTransitionTime:
description: LastTransitionTime is the last time the condition
transitioned from one status to another. We use VolatileTime
in place of metav1.Time to exclude this from creating equality.Semantic
differences (all other things held constant).
type: string
message:
description: A human readable message indicating details about
the transition.
type: string
reason:
description: The reason for the condition's last transition.
type: string
severity:
description: Severity with which to treat failures of this type
of condition. When this is not specified, it defaults to Error.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
description: Type of condition.
type: string
required:
- status
- type
type: object
type: array
lastScaleTime:
description: LastScaleTime is the last time the Provisioner scaled
the number of nodes
format: date-time
type: string
resources:
additionalProperties:
anyOf:
- type: integer
- type: string
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
description: Resources is the list of resources that have been provisioned.
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
# Source: karpenter/templates/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: karpenter
namespace: kube-system
---
# Source: karpenter/templates/webhook/deployment.yaml
apiVersion: v1
kind: Secret
metadata:
name: karpenter-webhook-cert
namespace: kube-system
data: {} # Injected by karpenter-webhook
---
# Source: karpenter/templates/100-config-logging.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: config-logging
namespace: kube-system
labels:
app.kubernetes.io/part-of: karpenter
data:
# https://github.com/uber-go/zap/blob/aa3e73ec0896f8b066ddf668597a02f89628ee50/config.go
zap-logger-config: |
{
"level": "debug",
"development": true,
"disableStacktrace": true,
"disableCaller": true,
"sampling": {
"initial": 100,
"thereafter": 100
},
"outputPaths": ["stdout"],
"errorOutputPaths": ["stderr"],
"encoding": "console",
"encoderConfig": {
"timeKey": "time",
"levelKey": "level",
"nameKey": "logger",
"callerKey": "caller",
"messageKey": "message",
"stacktraceKey": "stacktrace",
"levelEncoder": "capital",
"timeEncoder": "iso8601"
}
}
# Log level overrides
# loglevel.controller: info # debug
# loglevel.webhook: info # debug
---
# Source: karpenter/templates/controller/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: karpenter-controller
rules:
- apiGroups: ["karpenter.sh"]
resources: ["provisioners", "provisioners/status"]
verbs: ["create", "delete", "patch", "get", "list", "watch"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["create", "get", "patch", "update", "watch"]
- apiGroups: [""]
resources: ["nodes", "pods"]
verbs: ["get", "list", "watch", "patch", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/binding", "pods/eviction"]
verbs: ["create"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["list", "watch"]
---
# Source: karpenter/templates/webhook/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: karpenter-webhook
rules:
- apiGroups: ["admissionregistration.k8s.io"]
resources: ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"]
verbs: ["get", "watch", "list", "update"]
---
# Source: karpenter/templates/controller/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: karpenter-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: karpenter-controller
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
# Source: karpenter/templates/webhook/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: karpenter-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: karpenter-webhook
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
# Source: karpenter/templates/controller/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: karpenter-controller
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["configmaps/status"]
verbs: ["get", "update", "patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create"]
---
# Source: karpenter/templates/webhook/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: karpenter-webhook
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["configmaps", "namespaces"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "create", "update"]
---
# Source: karpenter/templates/controller/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: karpenter-controller
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: karpenter-controller
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
# Source: karpenter/templates/webhook/rbac.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: karpenter-webhook
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: karpenter-webhook
subjects:
- kind: ServiceAccount
name: karpenter
namespace: kube-system
---
# Source: karpenter/templates/controller/deployment.yaml
apiVersion: v1
kind: Service
metadata:
name: karpenter-metrics
namespace: kube-system
spec:
ports:
- port: 8080
targetPort: metrics
selector:
karpenter: controller
---
# Source: karpenter/templates/webhook/deployment.yaml
apiVersion: v1
kind: Service
metadata:
name: karpenter-webhook
namespace: kube-system
spec:
ports:
- port: 443
targetPort: webhook
selector:
karpenter: webhook
---
# Source: karpenter/templates/controller/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karpenter-controller
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
karpenter: controller
template:
metadata:
labels:
karpenter: controller
spec:
priorityClassName: system-cluster-critical
serviceAccountName: karpenter
containers:
- name: manager
image: public.ecr.aws/karpenter/controller:v0.5.2
resources:
limits:
cpu: 1
memory: 1Gi
requests:
cpu: 1
memory: 1Gi
ports:
- name: metrics
containerPort: 8080
- name: health-probe
containerPort: 8081
livenessProbe:
httpGet:
path: /healthz
port: 8081
readinessProbe:
httpGet:
path: /readyz
port: 8081
env:
- name: AWS_REGION
value: {{ Region }}
- name: CLUSTER_NAME
value: {{ ClusterName }}
- name: CLUSTER_ENDPOINT
value: https://{{ .MasterInternalName }}
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.sh/provisioner-name
operator: DoesNotExist
---
# Source: karpenter/templates/webhook/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: karpenter-webhook
namespace: kube-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
karpenter: webhook
template:
metadata:
labels:
karpenter: webhook
spec:
priorityClassName: system-cluster-critical
serviceAccountName: karpenter
containers:
- name: webhook
image: public.ecr.aws/karpenter/webhook:v0.5.2
args:
- -port=8443
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 100m
memory: 50Mi
ports:
- name: webhook
containerPort: 8443
livenessProbe:
httpGet:
scheme: HTTPS
port: 8443
readinessProbe:
httpGet:
scheme: HTTPS
port: 8443
env:
- name: AWS_REGION
value: {{ Region }}
- name: CLUSTER_NAME
value: {{ ClusterName }}
- name: CLUSTER_ENDPOINT
value: https://{{ .MasterInternalName }}
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
- key: karpenter.sh/provisioner-name
operator: DoesNotExist
---
# Source: karpenter/templates/webhook/webhooks.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: MutatingWebhookConfiguration
metadata:
name: defaulting.webhook.provisioners.karpenter.sh
webhooks:
- admissionReviewVersions: ["v1"]
clientConfig:
service:
name: karpenter-webhook
namespace: 'kube-system'
failurePolicy: Fail
sideEffects: None
name: defaulting.webhook.provisioners.karpenter.sh
rules:
- apiGroups:
- karpenter.sh
apiVersions:
- v1alpha5
resources:
- provisioners
provisioners/status
operations:
- CREATE
- UPDATE
---
# Source: karpenter/templates/webhook/webhooks.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validation.webhook.provisioners.karpenter.sh
webhooks:
- admissionReviewVersions: ["v1"]
clientConfig:
service:
name: karpenter-webhook
namespace: 'kube-system'
failurePolicy: Fail
sideEffects: None
name: validation.webhook.provisioners.karpenter.sh
rules:
- apiGroups:
- karpenter.sh
apiVersions:
- v1alpha5
resources:
- provisioners
provisioners/status
operations:
- CREATE
- UPDATE
---
# Source: karpenter/templates/webhook/webhooks.yaml
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
name: validation.webhook.config.karpenter.sh
webhooks:
- admissionReviewVersions: ["v1"]
clientConfig:
service:
name: karpenter-webhook
namespace: 'kube-system'
failurePolicy: Fail
sideEffects: None
name: validation.webhook.config.karpenter.sh
objectSelector:
matchLabels:
app.kubernetes.io/part-of: karpenter
{{ range $name, $spec := GetNodeInstanceGroups }}
{{ if eq $spec.Manager "Karpenter" }}
---
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: {{ $name }}
spec:
requirements:
- key: karpenter.sh/capacity-type
operator: In
values: ["spot"]
- key: kubernetes.io/arch
operator: In
values: ["{{ ArchitectureOfAMI $spec.Image }}"]
- key: "node.kubernetes.io/instance-type"
operator: In
values:
- {{ $spec.MachineType }}
{{ with $spec.MixedInstancesPolicy }}
{{ range $key := .Instances }}
- {{ $key }}
{{ end }}
{{ end }}
{{ with $spec.Taints }}
taints:
{{ range $taintString := $spec.Taints }}
{{ $taint := ParseTaint $taintString }}
- key: {{ $taint.key }}
effect: {{ $taint.effect }}
{{ if $taint.value }}
value: {{ $taint.value }}
{{ end }}
{{ end }}
{{ end }}
{{ with $spec.NodeLabels }}
labels:
{{ range $key, $value := . }}
{{ $key }}: {{ $value }}
{{ end }}
{{ end }}
limits:
resources:
cpu: 1000
provider:
instanceProfile: nodes.{{ ClusterName }}
launchTemplate: {{ $name }}.{{ ClusterName }}
securityGroupSelector:
Name: nodes.{{ ClusterName }}
subnetSelector:
kubernetes.io/role/internal-elb: "1"
KubernetesCluster: {{ ClusterName }}
ttlSecondsAfterEmpty: 30
{{ end }}
{{ end }}

View File

@ -51,6 +51,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
k8s_aws "k8s.io/legacy-cloud-providers/aws"
"k8s.io/kops/dnsprovider/pkg/dnsprovider"
dnsproviderroute53 "k8s.io/kops/dnsprovider/pkg/dnsprovider/providers/aws/route53"
"k8s.io/kops/pkg/apis/kops"
@ -60,7 +62,6 @@ import (
identity_aws "k8s.io/kops/pkg/nodeidentity/aws"
"k8s.io/kops/pkg/resources/spotinst"
"k8s.io/kops/upup/pkg/fi"
k8s_aws "k8s.io/legacy-cloud-providers/aws"
)
// By default, aws-sdk-go only retries 3 times, which doesn't give
@ -692,7 +693,125 @@ func (c *awsCloudImplementation) GetCloudGroups(cluster *kops.Cluster, instanceg
return sgroups, nil
}
return getCloudGroups(c, cluster, instancegroups, warnUnmatched, nodes)
cloudGroups, err := getCloudGroups(c, cluster, instancegroups, warnUnmatched, nodes)
if err != nil {
return nil, err
}
karpenterGroups, err := getKarpenterGroups(c, cluster, instancegroups, nodes)
if err != nil {
return nil, err
}
for name, group := range karpenterGroups {
cloudGroups[name] = group
}
return cloudGroups, nil
}
func getKarpenterGroups(c AWSCloud, cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) {
cloudGroups := make(map[string]*cloudinstances.CloudInstanceGroup)
for _, ig := range instancegroups {
if ig.Spec.Manager == kops.InstanceManagerKarpenter {
group, err := buildKarpenterGroup(c, cluster, ig, nodes)
if err != nil {
return nil, err
}
cloudGroups[ig.ObjectMeta.Name] = group
}
}
return cloudGroups, nil
}
func buildKarpenterGroup(c AWSCloud, cluster *kops.Cluster, ig *kops.InstanceGroup, nodes []v1.Node) (*cloudinstances.CloudInstanceGroup, error) {
nodeMap := cloudinstances.GetNodeMap(nodes, cluster)
instances := make(map[string]*ec2.Instance)
updatedInstances := make(map[string]*ec2.Instance)
clusterName := c.Tags()[TagClusterName]
var version string
{
result, err := c.EC2().DescribeLaunchTemplates(&ec2.DescribeLaunchTemplatesInput{
Filters: []*ec2.Filter{
NewEC2Filter("tag:"+identity_aws.CloudTagInstanceGroupName, ig.ObjectMeta.Name),
NewEC2Filter("tag:"+TagClusterName, clusterName),
},
})
if err != nil {
return nil, err
}
lt := result.LaunchTemplates[0]
versionNumber := *lt.LatestVersionNumber
version = strconv.Itoa(int(versionNumber))
}
karpenterGroup := &cloudinstances.CloudInstanceGroup{
InstanceGroup: ig,
HumanName: ig.ObjectMeta.Name,
}
{
req := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
NewEC2Filter("tag:"+identity_aws.CloudTagInstanceGroupName, ig.ObjectMeta.Name),
NewEC2Filter("tag:"+TagClusterName, clusterName),
NewEC2Filter("instance-state-name", "pending", "running", "stopping", "stopped"),
},
}
result, err := c.EC2().DescribeInstances(req)
if err != nil {
return nil, err
}
for _, r := range result.Reservations {
for _, i := range r.Instances {
id := aws.StringValue(i.InstanceId)
instances[id] = i
}
}
}
klog.Infof("found %d karpenter instances", len(instances))
{
req := &ec2.DescribeInstancesInput{
Filters: []*ec2.Filter{
NewEC2Filter("tag:"+identity_aws.CloudTagInstanceGroupName, ig.ObjectMeta.Name),
NewEC2Filter("tag:"+TagClusterName, clusterName),
NewEC2Filter("instance-state-name", "pending", "running", "stopping", "stopped"),
NewEC2Filter("tag:aws:ec2launchtemplate:version", version),
},
}
result, err := c.EC2().DescribeInstances(req)
if err != nil {
return nil, err
}
for _, r := range result.Reservations {
for _, i := range r.Instances {
id := aws.StringValue(i.InstanceId)
updatedInstances[id] = i
}
}
}
klog.Infof("found %d updated instances", len(updatedInstances))
{
for _, instance := range instances {
id := *instance.InstanceId
_, ready := updatedInstances[id]
var status string
if ready {
status = cloudinstances.CloudInstanceStatusUpToDate
} else {
status = cloudinstances.CloudInstanceStatusNeedsUpdate
}
cloudInstance, _ := karpenterGroup.NewCloudInstance(id, status, nodeMap[id])
addCloudInstanceData(cloudInstance, instance)
}
}
return karpenterGroup, nil
}
func getCloudGroups(c AWSCloud, cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) {

View File

@ -24,6 +24,7 @@ go_library(
"//pkg/model/components/addonmanifests/clusterautoscaler:go_default_library",
"//pkg/model/components/addonmanifests/dnscontroller:go_default_library",
"//pkg/model/components/addonmanifests/externaldns:go_default_library",
"//pkg/model/components/addonmanifests/karpenter:go_default_library",
"//pkg/model/components/addonmanifests/nodeterminationhandler:go_default_library",
"//pkg/model/iam:go_default_library",
"//pkg/templates:go_default_library",

View File

@ -21,6 +21,7 @@ import (
"strings"
"k8s.io/klog/v2"
channelsapi "k8s.io/kops/channels/pkg/api"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
@ -35,6 +36,7 @@ import (
"k8s.io/kops/pkg/model/components/addonmanifests/clusterautoscaler"
"k8s.io/kops/pkg/model/components/addonmanifests/dnscontroller"
"k8s.io/kops/pkg/model/components/addonmanifests/externaldns"
"k8s.io/kops/pkg/model/components/addonmanifests/karpenter"
"k8s.io/kops/pkg/model/components/addonmanifests/nodeterminationhandler"
"k8s.io/kops/pkg/model/iam"
"k8s.io/kops/pkg/templates"
@ -1058,6 +1060,23 @@ func (b *BootstrapChannelBuilder) buildAddons(c *fi.ModelBuilderContext) (*Addon
})
}
}
if b.Cluster.Spec.Karpenter != nil && fi.BoolValue(&b.Cluster.Spec.Karpenter.Enabled) {
key := "karpenter.sh"
{
id := "k8s-1.19"
location := key + "/" + id + ".yaml"
addons.Add(&channelsapi.AddonSpec{
Name: fi.String(key),
Manifest: fi.String(location),
Selector: map[string]string{"k8s-addon": key},
Id: id,
})
if b.UseServiceAccountExternalPermissions() {
serviceAccountRoles = append(serviceAccountRoles, &karpenter.ServiceAccount{})
}
}
}
if b.Cluster.Spec.KubeScheduler.UsePolicyConfigMap != nil {
key := "scheduler.addons.k8s.io"

View File

@ -22,6 +22,7 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/blang/semver/v4"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/apis/kops/validation"
@ -173,6 +174,10 @@ func PopulateInstanceGroupSpec(cluster *kops.Cluster, input *kops.InstanceGroup,
}
}
}
if ig.Spec.Manager == "" {
ig.Spec.Manager = kops.InstanceManagerCloudGroup
}
return ig, nil
}

View File

@ -43,6 +43,8 @@ import (
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"sigs.k8s.io/yaml"
kopscontrollerconfig "k8s.io/kops/cmd/kops-controller/pkg/config"
"k8s.io/kops/pkg/apis/kops"
apiModel "k8s.io/kops/pkg/apis/kops/model"
@ -59,7 +61,6 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
gcetpm "k8s.io/kops/upup/pkg/fi/cloudup/gce/tpm"
"k8s.io/kops/util/pkg/env"
"sigs.k8s.io/yaml"
)
// TemplateFunctions provides a collection of methods used throughout the templates
@ -280,6 +281,10 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap, secretStore fi.SecretS
dest["EnableSQSTerminationDraining"] = func() bool { return *cluster.Spec.NodeTerminationHandler.EnableSQSTerminationDraining }
}
dest["ArchitectureOfAMI"] = tf.architectureOfAMI
dest["ParseTaint"] = parseTaint
return nil
}
@ -735,3 +740,48 @@ func (tf *TemplateFunctions) GetNodeInstanceGroups() map[string]kops.InstanceGro
}
return nodegroups
}
func (tf *TemplateFunctions) architectureOfAMI(amiID string) string {
image, _ := tf.cloud.(awsup.AWSCloud).ResolveImage(amiID)
switch *image.Architecture {
case "x86_64":
return "amd64"
}
return "arm64"
}
// parseTaint takes a string and returns a map of its value
// it mimics the function from https://github.com/kubernetes/kubernetes/blob/master/pkg/util/taints/taints.go
// but returns a map instead of a v1.Taint
func parseTaint(st string) (map[string]string, error) {
taint := make(map[string]string)
var key string
var value string
var effect string
parts := strings.Split(st, ":")
switch len(parts) {
case 1:
key = parts[0]
case 2:
effect = parts[1]
partsKV := strings.Split(parts[0], "=")
if len(partsKV) > 2 {
return taint, fmt.Errorf("invalid taint spec: %v", st)
}
key = partsKV[0]
if len(partsKV) == 2 {
value = partsKV[1]
}
default:
return taint, fmt.Errorf("invalid taint spec: %v", st)
}
taint["key"] = key
taint["value"] = value
taint["effect"] = effect
return taint, nil
}