Merge pull request #16147 from justinsb/metal_enroll

feat: "enroll" command to add nodes to a bare-metal cluster
This commit is contained in:
Kubernetes Prow Robot 2023-12-03 07:53:15 +01:00 committed by GitHub
commit 4d0875569b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 800 additions and 4 deletions

View File

@ -33,6 +33,7 @@ func NewCmdToolbox(f commandutils.Factory, out io.Writer) *cobra.Command {
}
cmd.AddCommand(NewCmdToolboxDump(f, out))
cmd.AddCommand(NewCmdToolboxEnroll(f, out))
cmd.AddCommand(NewCmdToolboxTemplate(f, out))
cmd.AddCommand(NewCmdToolboxInstanceSelector(f, out))
cmd.AddCommand(NewCmdToolboxAddons(out))

View File

@ -0,0 +1,54 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"io"
"github.com/spf13/cobra"
"k8s.io/kops/pkg/commands"
"k8s.io/kops/pkg/commands/commandutils"
"k8s.io/kubectl/pkg/util/i18n"
"k8s.io/kubectl/pkg/util/templates"
)
func NewCmdToolboxEnroll(f commandutils.Factory, out io.Writer) *cobra.Command {
options := &commands.ToolboxEnrollOptions{}
options.InitDefaults()
cmd := &cobra.Command{
Use: "enroll [CLUSTER]",
Short: i18n.T(`Add machine to cluster`),
Long: templates.LongDesc(i18n.T(`
Adds an individual machine to the cluster.`)),
Example: templates.Examples(i18n.T(`
kops toolbox enroll --name k8s-cluster.example.com
`)),
RunE: func(cmd *cobra.Command, args []string) error {
return commands.RunToolboxEnroll(cmd.Context(), f, out, options)
},
}
cmd.Flags().StringVar(&options.ClusterName, "cluster", options.ClusterName, "Name of cluster to join")
cmd.Flags().StringVar(&options.InstanceGroup, "instance-group", options.InstanceGroup, "Name of instance-group to join")
cmd.Flags().StringVar(&options.Host, "host", options.Host, "IP/hostname for machine to add")
cmd.Flags().StringVar(&options.SSHUser, "ssh-user", options.SSHUser, "user for ssh")
cmd.Flags().IntVar(&options.SSHPort, "ssh-port", options.SSHPort, "port for ssh")
return cmd
}

View File

@ -25,6 +25,7 @@ Miscellaneous, experimental, or infrequently used commands.
* [kops](kops.md) - kOps is Kubernetes Operations.
* [kops toolbox addons](kops_toolbox_addons.md) - Manage addons
* [kops toolbox dump](kops_toolbox_dump.md) - Dump cluster information
* [kops toolbox enroll](kops_toolbox_enroll.md) - Add machine to cluster
* [kops toolbox instance-selector](kops_toolbox_instance-selector.md) - Generate instance-group specs by providing resource specs such as vcpus and memory.
* [kops toolbox template](kops_toolbox_template.md) - Generate cluster.yaml from template

45
docs/cli/kops_toolbox_enroll.md generated Normal file
View File

@ -0,0 +1,45 @@
<!--- This file is automatically generated by make gen-cli-docs; changes should be made in the go CLI command code (under cmd/kops) -->
## kops toolbox enroll
Add machine to cluster
### Synopsis
Adds an individual machine to the cluster.
```
kops toolbox enroll [CLUSTER] [flags]
```
### Examples
```
kops toolbox enroll --name k8s-cluster.example.com
```
### Options
```
--cluster string Name of cluster to join
-h, --help help for enroll
--host string IP/hostname for machine to add
--instance-group string Name of instance-group to join
--ssh-port int port for ssh (default 22)
--ssh-user string user for ssh (default "root")
```
### Options inherited from parent commands
```
--config string yaml config file (default is $HOME/.kops.yaml)
--name string Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable
--state string Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable
-v, --v Level number for the log level verbosity
```
### SEE ALSO
* [kops toolbox](kops_toolbox.md) - Miscellaneous, experimental, or infrequently used commands.

164
docs/metal.md Normal file
View File

@ -0,0 +1,164 @@
# Bare Metal Support
***Bare metal support is experimental, and may be removed at any time***
## Introduction
kOps has some experimental bare-metal support, specifically for nodes. The idea
we are exploring is that you can run your control-plane in a cloud, but you can
join physical machines as nodes to that control-plane, even though those nodes
are not located in the cloud.
This approach has some limitations and complexities - for example the
cloud-controller-manager for the control plane won't be able to attach volumes
to the nodes, because they aren't cloud VMs. The advantage is that we can first
implement node bare-metal support, before tackling the complexities of the
control plane.
## Walkthrough
Create a "normal" kOps cluster, but make sure the Metal feature-flag is set;
here we are using GCE:
```
export KOPS_FEATURE_FLAGS=Metal
kops create cluster foo.k8s.local --cloud gce --zones us-east4-a
kops update cluster --yes --admin foo.k8s.local
kops validate cluster --wait=10m
```
Create a kops-system namespace, to hold the host information that is generated
as part of joining the machine. Although these are sensitive, they aren't
secrets, because they only hold public keys:
```
kubectl create ns kops-system
kubectl apply --server-side -f k8s/crds/kops.k8s.io_hosts.yaml
```
Create a ClusterRoleBinding and ClusterRole to allow kops-controller
to read the Host objects:
```
kubectl apply --server-side -f - <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kops-controller:pki-verifier
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kops-controller:pki-verifier
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:serviceaccount:kube-system:kops-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kops-controller:pki-verifier
rules:
- apiGroups:
- "kops.k8s.io"
resources:
- hosts
verbs:
- get
- list
- watch
EOF
```
### Create a VM
When first trying this out, we recommend creating a local VM instead of a true
bare-metal machine.
```
mkdir vm1
cd vm1
wget -O debian11.qcow2 https://cloud.debian.org/images/cloud/bullseye/20231013-1532/debian-11-nocloud-amd64-20231013-1532.qcow2
qemu-img create -o backing_file=debian11.qcow2,backing_fmt=qcow2 -f qcow2 vm1-root.qcow2 10G
qemu-system-x86_64 \
-smp 2 \
-enable-kvm \
-netdev user,id=net0,net=192.168.76.0/24,dhcpstart=192.168.76.9,hostfwd=tcp::2222-:22 \
-device rtl8139,netdev=net0 \
-m 4G \
-drive file=vm1-root.qcow2,if=virtio,format=qcow2 \
-nographic -serial mon:stdio
```
Now login as root (with no password, and set up SSH and the machine name):
```
ssh-keygen -A
systemctl restart sshd
echo "vm1" > /etc/hostname
hostname vm1
```
Currently the `kops toolbox enroll` command only supports SSH agents for
the private key; so get your public key from `ssh-add -L`, and then you must
currently manually add it to the `authorized_keys` file on the VM.
```
mkdir ~/.ssh/
vim ~/.ssh/authorized_keys
```
After you've done this, open a new terminal and SSH should now work
from the host: `ssh -p 2222 root@127.0.0.1 uptime`
### Joining the VM to the cluster
```
go run ./cmd/kops toolbox enroll --cluster foo.k8s.local --instance-group nodes-us-east4-a --ssh-user root --host 127.0.0.1 --ssh-port 2222
```
Within a minute or so, the node should appear in `kubectl get nodes`.
If it doesn't work, first check the kops-configuration log:
`ssh root@127.0.0.1 -p 2222 journalctl -u kops-configuration`
And then if that looks OK (ends in "success"), check the kubelet log:
`ssh root@127.0.0.1 -p 2222 journalctl -u kubelet`.
### The state of the node
You should observe that the node is running, and pods are scheduled to the node.
```
kubectl get pods -A --field-selector spec.nodeName=vm1
```
Cilium will likely be running on the node.
The GCE PD CSI driver is scheduled, but is likely crash-looping
because it can't reach the GCE metadata service. You can see this from the
logs on the VM in `/var/log/container`
(e.g. `ssh root@127.0.0.1 -p 2222 cat /var/log/containers/*gce-pd-driver*.log`)
If you try to use `kubectl logs`, you will see an error like the below, which
indicates another problem - that the control plane cannot reach the kubelet:
`Error from server: Get "https://192.168.76.9:10250/containerLogs/gce-pd-csi-driver/csi-gce-pd-node-l2rm8/csi-driver-registrar": dial tcp 192.168.76.9:10250: i/o timeout`
### Cleanup
Quit the qemu VM with Ctrl-a x.
Delete the node and the secret
```
kubectl delete node vm1
kubectl delete host -n kops-system vm1
```
If you're done with the cluster also:
```
kops delete cluster foo.k8s.local --yes
```

View File

@ -0,0 +1,522 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package commands
import (
"bytes"
"context"
cryptorand "crypto/rand"
"encoding/hex"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"path"
"sort"
"strconv"
"strings"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/v1alpha2"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/commands/commandutils"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/model/resources"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/mirrors"
"k8s.io/kops/util/pkg/vfs"
)
type ToolboxEnrollOptions struct {
ClusterName string
InstanceGroup string
Host string
SSHUser string
SSHPort int
}
func (o *ToolboxEnrollOptions) InitDefaults() {
o.SSHUser = "root"
o.SSHPort = 22
}
func RunToolboxEnroll(ctx context.Context, f commandutils.Factory, out io.Writer, options *ToolboxEnrollOptions) error {
if !featureflag.Metal.Enabled() {
return fmt.Errorf("bare-metal support requires the Metal feature flag to be enabled")
}
if options.ClusterName == "" {
return fmt.Errorf("cluster is required")
}
if options.InstanceGroup == "" {
return fmt.Errorf("instance-group is required")
}
clientset, err := f.KopsClient()
if err != nil {
return err
}
cluster, err := clientset.GetCluster(ctx, options.ClusterName)
if err != nil {
return err
}
if cluster == nil {
return fmt.Errorf("cluster not found %q", options.ClusterName)
}
ig, err := clientset.InstanceGroupsFor(cluster).Get(ctx, options.InstanceGroup, metav1.GetOptions{})
if err != nil {
return err
}
cloud, err := cloudup.BuildCloud(cluster)
if err != nil {
return err
}
apiserverAdditionalIPs := []string{}
{
ingresses, err := cloud.GetApiIngressStatus(cluster)
if err != nil {
return fmt.Errorf("error getting ingress status: %v", err)
}
for _, ingress := range ingresses {
// TODO: Do we need to support hostnames?
// if ingress.Hostname != "" {
// apiserverAdditionalIPs = append(apiserverAdditionalIPs, ingress.Hostname)
// }
if ingress.IP != "" {
apiserverAdditionalIPs = append(apiserverAdditionalIPs, ingress.IP)
}
}
}
if len(apiserverAdditionalIPs) == 0 {
// TODO: Should we support DNS?
return fmt.Errorf("unable to determine IP address for kops-controller")
}
scriptBytes, err := buildBootstrapData(ctx, clientset, cluster, ig, apiserverAdditionalIPs)
if err != nil {
return err
}
if options.Host != "" {
// TODO: This is the pattern we use a lot, but should we try to access it directly?
contextName := cluster.ObjectMeta.Name
clientGetter := genericclioptions.NewConfigFlags(true)
clientGetter.Context = &contextName
restConfig, err := clientGetter.ToRESTConfig()
if err != nil {
return fmt.Errorf("cannot load kubecfg settings for %q: %w", contextName, err)
}
if err := enrollHost(ctx, options, string(scriptBytes), restConfig); err != nil {
return err
}
}
return nil
}
func enrollHost(ctx context.Context, options *ToolboxEnrollOptions, nodeupScript string, restConfig *rest.Config) error {
scheme := runtime.NewScheme()
if err := v1alpha2.AddToScheme(scheme); err != nil {
return fmt.Errorf("building kubernetes scheme: %w", err)
}
kubeClient, err := client.New(restConfig, client.Options{
Scheme: scheme,
})
if err != nil {
return fmt.Errorf("building kubernetes client: %w", err)
}
sudo := true
if options.SSHUser == "root" {
sudo = false
}
host, err := NewSSHHost(ctx, options.Host, options.SSHPort, options.SSHUser, sudo)
if err != nil {
return err
}
defer host.Close()
publicKeyPath := "/etc/kubernetes/kops/pki/machine/public.pem"
publicKeyBytes, err := host.readFile(ctx, publicKeyPath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
publicKeyBytes = nil
} else {
return fmt.Errorf("error reading public key %q: %w", publicKeyPath, err)
}
}
publicKeyBytes = bytes.TrimSpace(publicKeyBytes)
if len(publicKeyBytes) == 0 {
if _, err := host.runScript(ctx, scriptCreateKey, ExecOptions{Sudo: sudo, Echo: true}); err != nil {
return err
}
b, err := host.readFile(ctx, publicKeyPath)
if err != nil {
return fmt.Errorf("error reading public key %q (after creation): %w", publicKeyPath, err)
}
publicKeyBytes = b
}
klog.Infof("public key is %s", string(publicKeyBytes))
hostname, err := host.getHostname(ctx)
if err != nil {
return err
}
if err := createHost(ctx, options, hostname, publicKeyBytes, kubeClient); err != nil {
return err
}
if len(nodeupScript) != 0 {
if _, err := host.runScript(ctx, nodeupScript, ExecOptions{Sudo: sudo, Echo: true}); err != nil {
return err
}
}
return nil
}
func createHost(ctx context.Context, options *ToolboxEnrollOptions, nodeName string, publicKey []byte, client client.Client) error {
host := &v1alpha2.Host{}
host.Namespace = "kops-system"
host.Name = nodeName
host.Spec.InstanceGroup = options.InstanceGroup
host.Spec.PublicKey = string(publicKey)
if err := client.Create(ctx, host); err != nil {
return fmt.Errorf("failed to create host %s/%s: %w", host.Namespace, host.Name, err)
}
return nil
}
const scriptCreateKey = `
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
set -x
DIR=/etc/kubernetes/kops/pki/machine/
mkdir -p ${DIR}
if [[ ! -f "${DIR}/private.pem" ]]; then
openssl ecparam -name prime256v1 -genkey -noout -out "${DIR}/private.pem"
fi
if [[ ! -f "${DIR}/public.pem" ]]; then
openssl ec -in "${DIR}/private.pem" -pubout -out "${DIR}/public.pem"
fi
`
// SSHHost is a wrapper around an SSH connection to a host machine.
type SSHHost struct {
hostname string
sshClient *ssh.Client
sudo bool
}
// Close closes the connection.
func (s *SSHHost) Close() error {
if s.sshClient != nil {
if err := s.sshClient.Close(); err != nil {
return err
}
s.sshClient = nil
}
return nil
}
// NewSSHHost creates a new SSHHost.
func NewSSHHost(ctx context.Context, host string, sshPort int, sshUser string, sudo bool) (*SSHHost, error) {
socket := os.Getenv("SSH_AUTH_SOCK")
if socket == "" {
return nil, fmt.Errorf("cannot connect to SSH agent; SSH_AUTH_SOCK env variable not set")
}
conn, err := net.Dial("unix", socket)
if err != nil {
return nil, fmt.Errorf("failed to connect to SSH agent with SSH_AUTH_SOCK %q: %w", socket, err)
}
agentClient := agent.NewClient(conn)
sshConfig := &ssh.ClientConfig{
HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error {
klog.Warningf("accepting SSH key %v for %q", key, hostname)
return nil
},
Auth: []ssh.AuthMethod{
// Use a callback rather than PublicKeys so we only consult the
// agent once the remote server wants it.
ssh.PublicKeysCallback(agentClient.Signers),
},
User: sshUser,
}
sshClient, err := ssh.Dial("tcp", host+":"+strconv.Itoa(sshPort), sshConfig)
if err != nil {
return nil, fmt.Errorf("failed to SSH to %q (with user %q): %w", host, sshUser, err)
}
return &SSHHost{
hostname: host,
sshClient: sshClient,
sudo: sudo,
}, nil
}
func (s *SSHHost) readFile(ctx context.Context, path string) ([]byte, error) {
p := vfs.NewSSHPath(s.sshClient, s.hostname, path, s.sudo)
return p.ReadFile(ctx)
}
func (s *SSHHost) runScript(ctx context.Context, script string, options ExecOptions) (*CommandOutput, error) {
var tempDir string
{
b := make([]byte, 32)
if _, err := cryptorand.Read(b); err != nil {
return nil, fmt.Errorf("error getting random data: %w", err)
}
tempDir = path.Join("/tmp", hex.EncodeToString(b))
}
scriptPath := path.Join(tempDir, "script.sh")
p := vfs.NewSSHPath(s.sshClient, s.hostname, scriptPath, s.sudo)
defer func() {
if _, err := s.runCommand(ctx, "rm -rf "+tempDir, ExecOptions{Sudo: s.sudo, Echo: false}); err != nil {
klog.Warningf("error cleaning up temp directory %q: %v", tempDir, err)
}
}()
if err := p.WriteFile(ctx, bytes.NewReader([]byte(script)), nil); err != nil {
return nil, fmt.Errorf("error writing script to SSH target: %w", err)
}
scriptCommand := "/bin/bash " + scriptPath
return s.runCommand(ctx, scriptCommand, options)
}
// CommandOutput holds the results of running a command.
type CommandOutput struct {
Stdout bytes.Buffer
Stderr bytes.Buffer
}
// ExecOptions holds options for running a command remotely.
type ExecOptions struct {
Sudo bool
Echo bool
}
func (s *SSHHost) runCommand(ctx context.Context, command string, options ExecOptions) (*CommandOutput, error) {
session, err := s.sshClient.NewSession()
if err != nil {
return nil, fmt.Errorf("failed to start SSH session: %w", err)
}
defer session.Close()
output := &CommandOutput{}
session.Stdout = &output.Stdout
session.Stderr = &output.Stderr
if options.Echo {
session.Stdout = io.MultiWriter(os.Stdout, session.Stdout)
session.Stderr = io.MultiWriter(os.Stderr, session.Stderr)
}
if options.Sudo {
command = "sudo " + command
}
if err := session.Run(command); err != nil {
return output, fmt.Errorf("error running command %q: %w", command, err)
}
return output, nil
}
// getHostname gets the hostname of the SSH target.
// This is used as the node name when registering the node.
func (s *SSHHost) getHostname(ctx context.Context) (string, error) {
output, err := s.runCommand(ctx, "hostname", ExecOptions{Sudo: false, Echo: true})
if err != nil {
return "", fmt.Errorf("failed to get hostname: %w", err)
}
hostname := output.Stdout.String()
hostname = strings.TrimSpace(hostname)
if len(hostname) == 0 {
return "", fmt.Errorf("hostname was empty")
}
return hostname, nil
}
func buildBootstrapData(ctx context.Context, clientset simple.Clientset, cluster *kops.Cluster, ig *kops.InstanceGroup, apiserverAdditionalIPs []string) ([]byte, error) {
if cluster.Spec.KubeAPIServer == nil {
cluster.Spec.KubeAPIServer = &kops.KubeAPIServerConfig{}
}
getAssets := false
assetBuilder := assets.NewAssetBuilder(clientset.VFSContext(), cluster.Spec.Assets, cluster.Spec.KubernetesVersion, getAssets)
encryptionConfigSecretHash := ""
// TODO: Support encryption config?
// if fi.ValueOf(c.Cluster.Spec.EncryptionConfig) {
// secret, err := secretStore.FindSecret("encryptionconfig")
// if err != nil {
// return fmt.Errorf("could not load encryptionconfig secret: %v", err)
// }
// if secret == nil {
// fmt.Println("")
// fmt.Println("You have encryptionConfig enabled, but no encryptionconfig secret has been set.")
// fmt.Println("See `kops create secret encryptionconfig -h` and https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/")
// return fmt.Errorf("could not find encryptionconfig secret")
// }
// hashBytes := sha256.Sum256(secret.Data)
// encryptionConfigSecretHash = base64.URLEncoding.EncodeToString(hashBytes[:])
// }
nodeUpAssets := make(map[architectures.Architecture]*mirrors.MirroredAsset)
for _, arch := range architectures.GetSupported() {
asset, err := cloudup.NodeUpAsset(assetBuilder, arch)
if err != nil {
return nil, err
}
nodeUpAssets[arch] = asset
}
assets := make(map[architectures.Architecture][]*mirrors.MirroredAsset)
configBuilder, err := cloudup.NewNodeUpConfigBuilder(cluster, assetBuilder, assets, encryptionConfigSecretHash)
if err != nil {
return nil, err
}
keysets := make(map[string]*fi.Keyset)
keystore, err := clientset.KeyStore(cluster)
if err != nil {
return nil, err
}
for _, keyName := range []string{"kubernetes-ca"} {
keyset, err := keystore.FindKeyset(ctx, keyName)
if err != nil {
return nil, fmt.Errorf("getting keyset %q: %w", keyName, err)
}
if keyset == nil {
return nil, fmt.Errorf("failed to find keyset %q", keyName)
}
keysets[keyName] = keyset
}
_, bootConfig, err := configBuilder.BuildConfig(ig, apiserverAdditionalIPs, keysets)
if err != nil {
return nil, err
}
bootConfig.CloudProvider = "metal"
// TODO: Should we / can we specify the node config hash?
// configData, err := utils.YamlMarshal(config)
// if err != nil {
// return nil, fmt.Errorf("error converting nodeup config to yaml: %v", err)
// }
// sum256 := sha256.Sum256(configData)
// bootConfig.NodeupConfigHash = base64.StdEncoding.EncodeToString(sum256[:])
var nodeupScript resources.NodeUpScript
nodeupScript.NodeUpAssets = nodeUpAssets
nodeupScript.BootConfig = bootConfig
{
nodeupScript.EnvironmentVariables = func() (string, error) {
env := make(map[string]string)
// TODO: Support the full set of environment variables?
// env, err := b.buildEnvironmentVariables()
// if err != nil {
// return "", err
// }
// Sort keys to have a stable sequence of "export xx=xxx"" statements
var keys []string
for k := range env {
keys = append(keys, k)
}
sort.Strings(keys)
var b bytes.Buffer
for _, k := range keys {
b.WriteString(fmt.Sprintf("export %s=%s\n", k, env[k]))
}
return b.String(), nil
}
nodeupScript.ProxyEnv = func() (string, error) {
return "", nil
// TODO: Support proxy?
// return b.createProxyEnv(cluster.Spec.Networking.EgressProxy)
}
}
// TODO: Support sysctls?
// By setting some sysctls early, we avoid broken configurations that prevent nodeup download.
// See https://github.com/kubernetes/kops/issues/10206 for details.
// nodeupScript.SetSysctls = setSysctls()
nodeupScript.CloudProvider = string(cluster.Spec.GetCloudProvider())
nodeupScriptResource, err := nodeupScript.Build()
if err != nil {
return nil, err
}
b, err := fi.ResourceAsBytes(nodeupScriptResource)
if err != nil {
return nil, err
}
return b, nil
}

View File

@ -94,6 +94,8 @@ var (
SELinuxMount = new("SELinuxMount", Bool(false))
// DO Terraform toggles the DO terraform support.
DOTerraform = new("DOTerraform", Bool(false))
// Metal enables the experimental bare-metal support.
Metal = new("Metal", Bool(false))
)
// FeatureFlag defines a feature flag

View File

@ -516,7 +516,7 @@ func (c *ApplyClusterCmd) Run(ctx context.Context) error {
cloud: cloud,
}
configBuilder, err := newNodeUpConfigBuilder(cluster, assetBuilder, c.Assets, encryptionConfigSecretHash)
configBuilder, err := NewNodeUpConfigBuilder(cluster, assetBuilder, c.Assets, encryptionConfigSecretHash)
if err != nil {
return err
}
@ -1196,7 +1196,7 @@ type nodeUpConfigBuilder struct {
encryptionConfigSecretHash string
}
func newNodeUpConfigBuilder(cluster *kops.Cluster, assetBuilder *assets.AssetBuilder, assets map[architectures.Architecture][]*mirrors.MirroredAsset, encryptionConfigSecretHash string) (model.NodeUpConfigBuilder, error) {
func NewNodeUpConfigBuilder(cluster *kops.Cluster, assetBuilder *assets.AssetBuilder, assets map[architectures.Architecture][]*mirrors.MirroredAsset, encryptionConfigSecretHash string) (model.NodeUpConfigBuilder, error) {
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigStore.Base)
if err != nil {
return nil, fmt.Errorf("error parsing configStore.base %q: %v", cluster.Spec.ConfigStore.Base, err)

View File

@ -50,6 +50,7 @@ import (
"k8s.io/kops/pkg/apis/kops"
apiModel "k8s.io/kops/pkg/apis/kops/model"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/bootstrap/pkibootstrap"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/flagbuilder"
@ -684,6 +685,10 @@ func (tf *TemplateFunctions) KopsControllerConfig() (string, error) {
CertNames: certNames,
}
if featureflag.Metal.Enabled() {
config.Server.PKI = &pkibootstrap.Options{}
}
switch cluster.Spec.GetCloudProvider() {
case kops.CloudProviderAWS:
nodesRoles := sets.String{}

View File

@ -154,8 +154,10 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
return fmt.Errorf("no instance group defined in nodeup config")
}
if want, got := bootConfig.NodeupConfigHash, base64.StdEncoding.EncodeToString(nodeupConfigHash[:]); got != want {
return fmt.Errorf("nodeup config hash mismatch (was %q, expected %q)", got, want)
if want := bootConfig.NodeupConfigHash; want != "" {
if got := base64.StdEncoding.EncodeToString(nodeupConfigHash[:]); got != want {
return fmt.Errorf("nodeup config hash mismatch (was %q, expected %q)", got, want)
}
}
err = evaluateSpec(&nodeupConfig, bootConfig.CloudProvider)