Make it possible to create clusters with only karpenter-managed worker nodes

Apply suggestions from code review

Co-authored-by: John Gardiner Myers <jgmyers@proofpoint.com>
This commit is contained in:
Ole Markus With 2021-12-13 10:33:08 +01:00
parent 0ead405b30
commit 8f276cf944
5 changed files with 79 additions and 6 deletions

View File

@ -35,6 +35,9 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
kopsbase "k8s.io/kops"
"k8s.io/kops/cmd/kops/util"
api "k8s.io/kops/pkg/apis/kops"
@ -53,8 +56,6 @@ import (
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/utils"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
)
type CreateClusterOptions struct {
@ -449,6 +450,12 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
return pflag.NormalizedName(name)
})
if featureflag.Karpenter.Enabled() {
cmd.Flags().StringVar(&options.InstanceManager, "instance-manager", options.InstanceManager, "Instance manager to use (cloudgroups or karpenter. Default: cloudgroups)")
cmd.RegisterFlagCompletionFunc("instance-manager", func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"cloudgroups", "karpenter"}, cobra.ShellCompDirectiveNoFileComp
})
}
return cmd
}

View File

@ -54,7 +54,7 @@ spec:
version: 9.99.0
- id: k8s-1.19
manifest: karpenter.sh/k8s-1.19.yaml
manifestHash: 45610f3413287dd87de888b7dbbe31931a10f7125aec6ee88516d0d92c9494b4
manifestHash: 632dd9ea482692ffcaf421dafc79081b9f0a745b154948ee5558c47ed1802d52
name: karpenter.sh
selector:
k8s-addon: karpenter.sh

View File

@ -653,10 +653,15 @@ spec:
name: token-amazonaws-com
readOnly: true
dnsPolicy: Default
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 10001
serviceAccountName: karpenter
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: token-amazonaws-com
projected:
@ -744,10 +749,19 @@ spec:
- mountPath: /var/run/secrets/amazonaws.com/
name: token-amazonaws-com
readOnly: true
nodeSelector:
node-role.kubernetes.io/master: ""
priorityClassName: system-cluster-critical
securityContext:
fsGroup: 10001
serviceAccountName: karpenter
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
volumes:
- name: token-amazonaws-com
projected:

View File

@ -488,6 +488,12 @@ spec:
- linux
- key: karpenter.sh/provisioner-name
operator: DoesNotExist
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
---
# Source: karpenter/templates/webhook/deployment.yaml
apiVersion: apps/v1
@ -554,6 +560,15 @@ spec:
- linux
- key: karpenter.sh/provisioner-name
operator: DoesNotExist
tolerations:
- key: node.cloudprovider.kubernetes.io/uninitialized
operator: Exists
- key: node.kubernetes.io/not-ready
operator: Exists
- key: node-role.kubernetes.io/master
operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
---
# Source: karpenter/templates/webhook/webhooks.yaml
apiVersion: admissionregistration.k8s.io/v1

View File

@ -27,6 +27,7 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog/v2"
"k8s.io/kops"
api "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/model"
@ -146,6 +147,9 @@ type NewClusterOptions struct {
// APISSLCertificate is the SSL certificate to use for the API loadbalancer.
// Currently only supported in AWS.
APISSLCertificate string
// InstanceManager specifies which manager to use for managing instances.
InstanceManager string
}
func (o *NewClusterOptions) InitDefaults() {
@ -155,6 +159,7 @@ func (o *NewClusterOptions) InitDefaults() {
o.Networking = "kubenet"
o.Topology = api.TopologyPublic
o.DNSType = string(api.DNSTypePublic)
o.InstanceManager = "cloudgroups"
}
type NewClusterResult struct {
@ -286,9 +291,27 @@ func NewCluster(opt *NewClusterOptions, clientset simple.Clientset) (*NewCluster
return nil, err
}
nodes, err := setupNodes(opt, &cluster, zoneToSubnetMap)
if err != nil {
return nil, err
var nodes []*api.InstanceGroup
switch opt.InstanceManager {
case "karpenter":
if opt.DiscoveryStore == "" {
return nil, fmt.Errorf("karpenter requires --discovery-store")
}
cluster.Spec.Karpenter = &api.KarpenterConfig{
Enabled: true,
}
nodes, err = setupKarpenterNodes(opt, &cluster, zoneToSubnetMap)
if err != nil {
return nil, err
}
case "cloudgroups":
nodes, err = setupNodes(opt, &cluster, zoneToSubnetMap)
if err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("invalid value %q for --instance-manager", opt.InstanceManager)
}
apiservers, err := setupAPIServers(opt, &cluster, zoneToSubnetMap)
@ -842,6 +865,20 @@ func setupNodes(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap ma
return nodes, nil
}
func setupKarpenterNodes(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap map[string]*api.ClusterSubnetSpec) ([]*api.InstanceGroup, error) {
g := &api.InstanceGroup{}
g.Spec.Role = api.InstanceGroupRoleNode
g.Spec.Manager = api.InstanceManagerKarpenter
g.ObjectMeta.Name = "nodes"
g.Spec.InstanceMetadata = &api.InstanceMetadataOptions{
HTTPPutResponseHopLimit: fi.Int64(1),
HTTPTokens: fi.String("required"),
}
return []*api.InstanceGroup{g}, nil
}
func setupAPIServers(opt *NewClusterOptions, cluster *api.Cluster, zoneToSubnetMap map[string]*api.ClusterSubnetSpec) ([]*api.InstanceGroup, error) {
cloudProvider := api.CloudProviderID(cluster.Spec.CloudProvider)