mirror of https://github.com/kubernetes/kops.git
Initial implementation of bundle command
The bundle command will support enrollment of a machine via SSH.
This commit is contained in:
parent
fcc904f468
commit
ec8db8b78c
|
@ -41,6 +41,7 @@ go_library(
|
|||
"root.go",
|
||||
"status_discovery.go",
|
||||
"toolbox.go",
|
||||
"toolbox_bundle.go",
|
||||
"toolbox_convert_imported.go",
|
||||
"toolbox_dump.go",
|
||||
"toolbox_template.go",
|
||||
|
@ -66,6 +67,7 @@ go_library(
|
|||
"//pkg/apis/kops/v1alpha1:go_default_library",
|
||||
"//pkg/apis/kops/validation:go_default_library",
|
||||
"//pkg/assets:go_default_library",
|
||||
"//pkg/bundle:go_default_library",
|
||||
"//pkg/client/simple:go_default_library",
|
||||
"//pkg/cloudinstances:go_default_library",
|
||||
"//pkg/dns:go_default_library",
|
||||
|
@ -99,6 +101,7 @@ go_library(
|
|||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra/doc:go_default_library",
|
||||
"//vendor/github.com/spf13/viper:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
|
|
@ -273,7 +273,7 @@ func (c *RootCmd) Cluster() (*kopsapi.Cluster, error) {
|
|||
return GetCluster(c.factory, clusterName)
|
||||
}
|
||||
|
||||
func GetCluster(factory *util.Factory, clusterName string) (*kopsapi.Cluster, error) {
|
||||
func GetCluster(factory Factory, clusterName string) (*kopsapi.Cluster, error) {
|
||||
if clusterName == "" {
|
||||
return nil, field.Required(field.NewPath("ClusterName"), "Cluster name is required")
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ func NewCmdToolbox(f *util.Factory, out io.Writer) *cobra.Command {
|
|||
|
||||
cmd.AddCommand(NewCmdToolboxConvertImported(f, out))
|
||||
cmd.AddCommand(NewCmdToolboxDump(f, out))
|
||||
cmd.AddCommand(NewCmdToolboxBundle(f, out))
|
||||
cmd.AddCommand(NewCmdToolboxTemplate(f, out))
|
||||
|
||||
return cmd
|
||||
|
|
|
@ -0,0 +1,215 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/crypto/ssh"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kops/cmd/kops/util"
|
||||
"k8s.io/kops/pkg/bundle"
|
||||
"k8s.io/kops/upup/pkg/kutil"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
"k8s.io/kubernetes/pkg/kubectl/cmd/templates"
|
||||
"k8s.io/kubernetes/pkg/kubectl/util/i18n"
|
||||
)
|
||||
|
||||
var (
|
||||
toolbox_bundle_long = templates.LongDesc(i18n.T(`
|
||||
Creates a bundle for enrolling a bare metal machine.`))
|
||||
|
||||
toolbox_bundle_example = templates.Examples(i18n.T(`
|
||||
# Bundle
|
||||
kops toolbox bundle --name k8s-cluster.example.com
|
||||
`))
|
||||
|
||||
toolbox_bundle_short = i18n.T(`Bundle cluster information`)
|
||||
)
|
||||
|
||||
type ToolboxBundleOptions struct {
|
||||
// Target is the machine we are enrolling in the cluster
|
||||
Target string
|
||||
}
|
||||
|
||||
func (o *ToolboxBundleOptions) InitDefaults() {
|
||||
}
|
||||
|
||||
func NewCmdToolboxBundle(f *util.Factory, out io.Writer) *cobra.Command {
|
||||
options := &ToolboxBundleOptions{}
|
||||
options.InitDefaults()
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "bundle",
|
||||
Short: toolbox_bundle_short,
|
||||
Long: toolbox_bundle_long,
|
||||
Example: toolbox_bundle_example,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
err := RunToolboxBundle(f, out, options, args)
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringVar(&options.Target, "target", options.Target, "machine to target (IP address)")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func RunToolboxBundle(context Factory, out io.Writer, options *ToolboxBundleOptions, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("Specify name of instance group for node")
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("Can only specify one instance group")
|
||||
}
|
||||
|
||||
if options.Target == "" {
|
||||
return fmt.Errorf("target is required")
|
||||
}
|
||||
groupName := args[0]
|
||||
|
||||
cluster, err := rootCommand.Cluster()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientset, err := context.Clientset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ig, err := clientset.InstanceGroupsFor(cluster).Get(groupName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading InstanceGroup %q: %v", groupName, err)
|
||||
}
|
||||
if ig == nil {
|
||||
return fmt.Errorf("InstanceGroup %q not found", groupName)
|
||||
}
|
||||
|
||||
builder := bundle.Builder{
|
||||
Clientset: clientset,
|
||||
}
|
||||
bundleData, err := builder.Build(cluster, ig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building bundle: %v", err)
|
||||
}
|
||||
|
||||
sshUser := os.Getenv("USER")
|
||||
|
||||
nodeSSH := &kutil.NodeSSH{
|
||||
Hostname: options.Target,
|
||||
}
|
||||
nodeSSH.SSHConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()
|
||||
nodeSSH.SSHConfig.User = sshUser
|
||||
sshIdentity := filepath.Join(os.Getenv("HOME"), ".ssh", "id_rsa")
|
||||
if err := kutil.AddSSHIdentity(&nodeSSH.SSHConfig, sshIdentity); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sshClient, err := nodeSSH.GetSSHClient()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting SSH client: %v", err)
|
||||
}
|
||||
|
||||
if err := runSshCommand(sshClient, "sudo mkdir -p /etc/kubernetes/bootstrap"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
root, err := nodeSSH.Root()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error connecting to nodeSSH: %v", err)
|
||||
}
|
||||
for _, file := range bundleData.Files {
|
||||
sshAcl := &vfs.SSHAcl{
|
||||
Mode: file.Header.FileInfo().Mode(),
|
||||
}
|
||||
p := root.Join("etc", "kubernetes", "bootstrap", file.Header.Name)
|
||||
glog.Infof("writing %s", p)
|
||||
if err := p.WriteFile(file.Data, sshAcl); err != nil {
|
||||
return fmt.Errorf("error writing file %q: %v", file.Header.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := runSshCommand(sshClient, "sudo /etc/kubernetes/bootstrap/bootstrap.sh"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func runSshCommand(sshClient *ssh.Client, cmd string) error {
|
||||
s, err := sshClient.NewSession()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating ssh session: %v", err)
|
||||
}
|
||||
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
s.Stdout = io.MultiWriter(&stdout, os.Stdout)
|
||||
s.Stderr = io.MultiWriter(&stderr, os.Stderr)
|
||||
|
||||
glog.Infof("running %s", cmd)
|
||||
if err := s.Run(cmd); err != nil {
|
||||
return fmt.Errorf("error running %s: %v\nstdout: %s\nstderr: %s", cmd, err, stdout.String(), stderr.String())
|
||||
}
|
||||
|
||||
glog.Infof("stdout: %s", stdout.String())
|
||||
glog.Infof("stderr: %s", stderr.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeToTar(files []*bundle.DataFile, bundlePath string) error {
|
||||
f, err := os.Create(bundlePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating output bundle file %q: %v", bundlePath, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
gw := gzip.NewWriter(f)
|
||||
defer gw.Close()
|
||||
tw := tar.NewWriter(gw)
|
||||
defer tw.Close()
|
||||
|
||||
for _, file := range files {
|
||||
if err := tw.WriteHeader(&file.Header); err != nil {
|
||||
return fmt.Errorf("error writing tar file header: %v", err)
|
||||
}
|
||||
|
||||
if _, err := tw.Write(file.Data); err != nil {
|
||||
return fmt.Errorf("error writing tar file data: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// bazel build //cmd/kops && bazel-bin/cmd/kops/kops toolbox bundle --name ${CLUSTER} ${IGNAME} && scp output.tar.gz ${TARGET}:/tmp/output.tar.gz
|
||||
// sudo apt-get install --yes ca-certificates
|
||||
// sudo mkdir -p /etc/kubernetes/bootstrap
|
||||
// sudo tar zx -C /etc/kubernetes/bootstrap -f /tmp/output.tar.gz
|
||||
// sudo /etc/kubernetes/bootstrap/bootstrap.sh
|
|
@ -34,6 +34,7 @@ Misc infrequently used commands.
|
|||
|
||||
### SEE ALSO
|
||||
* [kops](kops.md) - kops is Kubernetes ops.
|
||||
* [kops toolbox bundle](kops_toolbox_bundle.md) - Bundle cluster information
|
||||
* [kops toolbox convert-imported](kops_toolbox_convert-imported.md) - Convert an imported cluster into a kops cluster.
|
||||
* [kops toolbox dump](kops_toolbox_dump.md) - Dump cluster information
|
||||
* [kops toolbox template](kops_toolbox_template.md) - Generate cluster.yaml from template
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
|
||||
<!--- This file is automatically generated by make gen-cli-docs; changes should be made in the go CLI command code (under cmd/kops) -->
|
||||
|
||||
## kops toolbox bundle
|
||||
|
||||
Bundle cluster information
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Creates a bundle for enrolling a bare metal machine.
|
||||
|
||||
```
|
||||
kops toolbox bundle
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
# Bundle
|
||||
kops toolbox bundle --name k8s-cluster.example.com
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
--target string machine to target (IP address)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--config string config file (default is $HOME/.kops.yaml)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files (default false)
|
||||
--name string Name of cluster
|
||||
--state string Location of state storage
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [kops toolbox](kops_toolbox.md) - Misc infrequently used commands.
|
||||
|
|
@ -52,6 +52,7 @@ k8s.io/kops/pkg/apiserver/cmd/server
|
|||
k8s.io/kops/pkg/apiserver/registry/cluster
|
||||
k8s.io/kops/pkg/apiserver/registry/instancegroup
|
||||
k8s.io/kops/pkg/assets
|
||||
k8s.io/kops/pkg/bundle
|
||||
k8s.io/kops/pkg/client/clientset_generated/clientset
|
||||
k8s.io/kops/pkg/client/clientset_generated/clientset/fake
|
||||
k8s.io/kops/pkg/client/clientset_generated/clientset/scheme
|
||||
|
|
|
@ -176,7 +176,7 @@ def get_regexs():
|
|||
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
|
||||
regexs["year"] = re.compile( 'YEAR' )
|
||||
# dates can be 2014, 2015, 2016, or 2017; company holder names can be anything
|
||||
regexs["date"] = re.compile( '(2014|2015|2016|2017)' )
|
||||
regexs["date"] = re.compile( '(2014|2015|2016|2017|2018)' )
|
||||
# strip // +build \n\n build constraints
|
||||
regexs["go_build_constraints"] = re.compile(r"^(// \+build.*\n)+\n", re.MULTILINE)
|
||||
# strip #!.* from shell scripts
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["builder.go"],
|
||||
importpath = "k8s.io/kops/pkg/bundle",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
"//pkg/apis/kops/registry:go_default_library",
|
||||
"//pkg/apis/nodeup:go_default_library",
|
||||
"//pkg/assets:go_default_library",
|
||||
"//pkg/client/simple:go_default_library",
|
||||
"//pkg/kopscodecs:go_default_library",
|
||||
"//pkg/model:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//upup/pkg/fi/cloudup:go_default_library",
|
||||
"//upup/pkg/fi/utils:go_default_library",
|
||||
"//util/pkg/vfs:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
],
|
||||
)
|
|
@ -0,0 +1,315 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bundle
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/registry"
|
||||
"k8s.io/kops/pkg/apis/nodeup"
|
||||
"k8s.io/kops/pkg/assets"
|
||||
"k8s.io/kops/pkg/client/simple"
|
||||
"k8s.io/kops/pkg/kopscodecs"
|
||||
"k8s.io/kops/pkg/model"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
// Builder builds a bundle
|
||||
type Builder struct {
|
||||
Clientset simple.Clientset
|
||||
}
|
||||
|
||||
type Data struct {
|
||||
Files []*DataFile
|
||||
}
|
||||
|
||||
type DataFile struct {
|
||||
Header tar.Header
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func (b *Builder) Build(cluster *kops.Cluster, ig *kops.InstanceGroup) (*Data, error) {
|
||||
glog.Infof("building bundle for %q", ig.Name)
|
||||
keyStore, err := b.Clientset.KeyStore(cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullCluster := &kops.Cluster{}
|
||||
{
|
||||
configBase, err := b.Clientset.ConfigBaseFor(cluster)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building ConfigBase for cluster: %v", err)
|
||||
}
|
||||
|
||||
p := configBase.Join(registry.PathClusterCompleted)
|
||||
|
||||
b, err := p.ReadFile()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading Cluster %q: %v", p, err)
|
||||
}
|
||||
|
||||
err = utils.YamlUnmarshal(b, fullCluster)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing Cluster %q: %v", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("fullCluster %v", fullCluster)
|
||||
|
||||
fullCluster.Spec.ConfigBase = "/etc/kubernetes/bootstrap"
|
||||
fullCluster.Spec.ConfigStore = "/etc/kubernetes/bootstrap"
|
||||
fullCluster.Spec.KeyStore = "/etc/kubernetes/bootstrap/pki"
|
||||
fullCluster.Spec.SecretStore = "/etc/kubernetes/bootstrap/secrets"
|
||||
|
||||
var files []*DataFile
|
||||
|
||||
{
|
||||
data, err := utils.YamlMarshal(fullCluster)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling configuration: %v", err)
|
||||
}
|
||||
|
||||
file := &DataFile{}
|
||||
file.Header.Name = "cluster.spec"
|
||||
file.Header.Size = int64(len(data))
|
||||
file.Header.Mode = 0644
|
||||
file.Data = data
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
{
|
||||
data, err := kopscodecs.ToVersionedYaml(ig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error encoding instancegroup: %v", err)
|
||||
}
|
||||
|
||||
file := &DataFile{}
|
||||
file.Header.Name = "instancegroup/" + ig.Name
|
||||
file.Header.Size = int64(len(data))
|
||||
file.Header.Mode = 0644
|
||||
file.Data = data
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
if pkiFiles, err := b.buildPKIFiles(cluster, ig, keyStore); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
glog.Infof("pki files %v", pkiFiles)
|
||||
files = append(files, pkiFiles...)
|
||||
}
|
||||
|
||||
copyManifest := make(map[string]string)
|
||||
|
||||
{
|
||||
phase := cloudup.PhaseCluster
|
||||
assetBuilder := assets.NewAssetBuilder(cluster.Spec.Assets, string(phase))
|
||||
|
||||
applyCmd := &cloudup.ApplyClusterCmd{
|
||||
Cluster: cluster,
|
||||
Clientset: b.Clientset,
|
||||
InstanceGroups: []*kops.InstanceGroup{ig},
|
||||
Phase: phase,
|
||||
}
|
||||
|
||||
if err := applyCmd.AddFileAssets(assetBuilder); err != nil {
|
||||
return nil, fmt.Errorf("error adding assets: %v", err)
|
||||
}
|
||||
|
||||
nodeupConfig, err := applyCmd.BuildNodeUpConfig(assetBuilder, ig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building nodeup config: %v", err)
|
||||
}
|
||||
|
||||
if !ig.IsMaster() {
|
||||
nodeupConfig.ProtokubeImage = nil
|
||||
nodeupConfig.Channels = nil
|
||||
}
|
||||
|
||||
nodeupConfig.ConfigBase = fi.String("/etc/kubernetes/bootstrap")
|
||||
|
||||
{
|
||||
var localChannels []string
|
||||
for _, channel := range nodeupConfig.Channels {
|
||||
base := path.Base(channel)
|
||||
localChannel := "file://" + path.Join("/rootfs/etc/kubernetes/bootstrap/addons/", base)
|
||||
localChannels = append(localChannels, localChannel)
|
||||
copyManifest[channel] = "addons/" + base
|
||||
}
|
||||
nodeupConfig.Channels = localChannels
|
||||
}
|
||||
|
||||
// Temp hack:
|
||||
if ig.IsMaster() {
|
||||
if nodeupConfig.ProtokubeImage == nil {
|
||||
nodeupConfig.ProtokubeImage = &nodeup.Image{}
|
||||
}
|
||||
nodeupConfig.ProtokubeImage.Name = "justinsb/protokube:latest"
|
||||
}
|
||||
|
||||
bootstrapScript := model.BootstrapScript{}
|
||||
|
||||
nodeupLocation, nodeupHash, err := cloudup.NodeUpLocation(assetBuilder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bootstrapScript.NodeUpSource = nodeupLocation.String()
|
||||
bootstrapScript.NodeUpSourceHash = nodeupHash.Hex()
|
||||
bootstrapScript.NodeUpConfigBuilder = func(ig *kops.InstanceGroup) (*nodeup.Config, error) {
|
||||
return nodeupConfig, err
|
||||
}
|
||||
|
||||
script, err := bootstrapScript.ResourceNodeUp(ig, &cluster.Spec)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building bootstrap script: %v", err)
|
||||
}
|
||||
|
||||
scriptBytes, err := script.AsBytes()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building bootstrap script: %v", err)
|
||||
}
|
||||
|
||||
file := &DataFile{}
|
||||
file.Header.Name = "bootstrap.sh"
|
||||
file.Header.Size = int64(len(scriptBytes))
|
||||
file.Header.Mode = 0755
|
||||
file.Data = scriptBytes
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
glog.Infof("copyManifest %v", copyManifest)
|
||||
|
||||
for src, dest := range copyManifest {
|
||||
data, err := vfs.Context.ReadFile(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading file %q: %v", src, err)
|
||||
}
|
||||
|
||||
file := &DataFile{}
|
||||
file.Header.Name = dest
|
||||
file.Header.Size = int64(len(data))
|
||||
file.Header.Mode = 0644
|
||||
file.Data = data
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
return &Data{
|
||||
Files: files,
|
||||
}, nil
|
||||
//bundlePath := "output.tar.gz"
|
||||
//if err := writeToTar(files, bundlePath); err != nil {
|
||||
// return err
|
||||
//}
|
||||
}
|
||||
|
||||
func (b *Builder) buildPKIFiles(cluster *kops.Cluster, ig *kops.InstanceGroup, keyStore fi.CAStore) ([]*DataFile, error) {
|
||||
var files []*DataFile
|
||||
|
||||
certs := []string{fi.CertificateId_CA, "kubelet"}
|
||||
keys := []string{"kubelet"}
|
||||
|
||||
// Used by kube-proxy to auth to API
|
||||
certs = append(certs, "kube-proxy")
|
||||
keys = append(keys, "kube-proxy")
|
||||
|
||||
if ig.IsMaster() {
|
||||
// Used by e.g. protokube
|
||||
certs = append(certs, "kops")
|
||||
keys = append(keys, "kops")
|
||||
|
||||
// Used by apiserver-aggregator
|
||||
certs = append(certs, "apiserver-aggregator")
|
||||
keys = append(keys, "apiserver-aggregator")
|
||||
certs = append(certs, "apiserver-aggregator-ca")
|
||||
|
||||
certs = append(certs, "apiserver-proxy-client")
|
||||
keys = append(keys, "apiserver-proxy-client")
|
||||
|
||||
// Used by k-c-m, for example
|
||||
//certs = append(certs, "ca")
|
||||
keys = append(keys, "ca")
|
||||
|
||||
// Used by kube-controller-manager to auth to API
|
||||
certs = append(certs, "kube-controller-manager")
|
||||
keys = append(keys, "kube-controller-manager")
|
||||
|
||||
// Used by kube-scheduler to auth to API
|
||||
certs = append(certs, "kube-scheduler")
|
||||
keys = append(keys, "kube-scheduler")
|
||||
|
||||
// key for the apiserver
|
||||
certs = append(certs, "master")
|
||||
keys = append(keys, "master")
|
||||
|
||||
// We store kubecfg on the master
|
||||
certs = append(certs, "kubecfg")
|
||||
keys = append(keys, "kubecfg")
|
||||
}
|
||||
|
||||
for _, name := range certs {
|
||||
certPool, err := keyStore.FindCertificateKeyset(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error querying certificate %q: %v", name, err)
|
||||
}
|
||||
if certPool == nil {
|
||||
return nil, fmt.Errorf("certificate %q not found", name)
|
||||
}
|
||||
|
||||
data, err := fi.SerializeKeyset(certPool)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error serializing certificate %q: %v", name, err)
|
||||
}
|
||||
|
||||
file := &DataFile{}
|
||||
file.Header.Name = "pki/issued/" + name + "/keyset.yaml"
|
||||
file.Header.Size = int64(len(data))
|
||||
file.Header.Mode = 0644
|
||||
file.Data = data
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
for _, name := range keys {
|
||||
key, err := keyStore.FindPrivateKeyset(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error querying private key %q: %v", name, err)
|
||||
}
|
||||
if key == nil {
|
||||
return nil, fmt.Errorf("private key %q not found", name)
|
||||
}
|
||||
|
||||
data, err := fi.SerializeKeyset(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error serializing private key %q: %v", name, err)
|
||||
}
|
||||
|
||||
file := &DataFile{}
|
||||
file.Header.Name = "pki/private/" + name + "/keyset.yaml"
|
||||
file.Header.Size = int64(len(data))
|
||||
file.Header.Mode = 0644
|
||||
file.Data = data
|
||||
files = append(files, file)
|
||||
}
|
||||
|
||||
return files, nil
|
||||
}
|
|
@ -41,6 +41,21 @@ type BootstrapScript struct {
|
|||
NodeUpConfigBuilder func(ig *kops.InstanceGroup) (*nodeup.Config, error)
|
||||
}
|
||||
|
||||
// KubeEnv returns the nodeup config for the instance group
|
||||
func (b *BootstrapScript) KubeEnv(ig *kops.InstanceGroup) (string, error) {
|
||||
config, err := b.NodeUpConfigBuilder(ig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data, err := kops.ToRawYaml(config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(data), nil
|
||||
}
|
||||
|
||||
// ResourceNodeUp generates and returns a nodeup (bootstrap) script from a
|
||||
// template file, substituting in specific env vars & cluster spec configuration
|
||||
func (b *BootstrapScript) ResourceNodeUp(ig *kops.InstanceGroup, cs *kops.ClusterSpec) (*fi.ResourceHolder, error) {
|
||||
|
@ -57,17 +72,7 @@ func (b *BootstrapScript) ResourceNodeUp(ig *kops.InstanceGroup, cs *kops.Cluste
|
|||
return b.NodeUpSourceHash
|
||||
},
|
||||
"KubeEnv": func() (string, error) {
|
||||
config, err := b.NodeUpConfigBuilder(ig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
data, err := kops.ToRawYaml(config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(data), nil
|
||||
return b.KubeEnv(ig)
|
||||
},
|
||||
|
||||
// Pass in extra environment variables for user-defined S3 service
|
||||
|
|
|
@ -70,9 +70,15 @@ type CAStore interface {
|
|||
// FindCertificatePool returns the named CertificatePool, or (nil,nil) if not found
|
||||
FindCertificatePool(name string) (*CertificatePool, error)
|
||||
|
||||
// FindCertificateKeyset will return the keyset for a certificate
|
||||
FindCertificateKeyset(name string) (*kops.Keyset, error)
|
||||
|
||||
// FindPrivateKey returns the named private key, or (nil,nil) if not found
|
||||
FindPrivateKey(name string) (*pki.PrivateKey, error)
|
||||
|
||||
// FindPrivateKeyset will return the keyset for a private key
|
||||
FindPrivateKeyset(name string) (*kops.Keyset, error)
|
||||
|
||||
// FindCert returns the specified certificate, if it exists, or nil if not found
|
||||
FindCert(name string) (*pki.Certificate, error)
|
||||
|
||||
|
@ -107,6 +113,17 @@ type CertificatePool struct {
|
|||
Primary *pki.Certificate
|
||||
}
|
||||
|
||||
func (c *CertificatePool) All() []*pki.Certificate {
|
||||
var certs []*pki.Certificate
|
||||
if c.Primary != nil {
|
||||
certs = append(certs, c.Primary)
|
||||
}
|
||||
if len(c.Secondary) != 0 {
|
||||
certs = append(certs, c.Secondary...)
|
||||
}
|
||||
return certs
|
||||
}
|
||||
|
||||
func (c *CertificatePool) AsString() (string, error) {
|
||||
// Nicer behaviour because this is called from templates
|
||||
if c == nil {
|
||||
|
|
|
@ -288,6 +288,18 @@ func (c *ClientsetCAStore) FindCertificatePool(name string) (*CertificatePool, e
|
|||
return pool, nil
|
||||
}
|
||||
|
||||
// FindCertificateKeyset implements CAStore::FindCertificateKeyset
|
||||
func (c *ClientsetCAStore) FindCertificateKeyset(name string) (*kops.Keyset, error) {
|
||||
o, err := c.clientset.Keysets(c.namespace).Get(name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error reading keyset %q: %v", name, err)
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// ListKeysets implements CAStore::ListKeysets
|
||||
func (c *ClientsetCAStore) ListKeysets() ([]*kops.Keyset, error) {
|
||||
var items []*kops.Keyset
|
||||
|
@ -426,6 +438,18 @@ func (c *ClientsetCAStore) FindPrivateKey(name string) (*pki.PrivateKey, error)
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
// FindPrivateKeyset implements CAStore::FindPrivateKeyset
|
||||
func (c *ClientsetCAStore) FindPrivateKeyset(name string) (*kops.Keyset, error) {
|
||||
o, err := c.clientset.Keysets(c.namespace).Get(name, v1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("error reading keyset %q: %v", name, err)
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// CreateKeypair implements CAStore::CreateKeypair
|
||||
func (c *ClientsetCAStore) CreateKeypair(signer string, id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
serial := c.buildSerial()
|
||||
|
|
|
@ -255,10 +255,6 @@ func (c *ApplyClusterCmd) Run() error {
|
|||
return err
|
||||
}
|
||||
|
||||
channels := []string{
|
||||
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
|
||||
}
|
||||
|
||||
// Normalize k8s version
|
||||
versionWithoutV := strings.TrimSpace(cluster.Spec.KubernetesVersion)
|
||||
if strings.HasPrefix(versionWithoutV, "v") {
|
||||
|
@ -269,7 +265,7 @@ func (c *ApplyClusterCmd) Run() error {
|
|||
cluster.Spec.KubernetesVersion = versionWithoutV
|
||||
}
|
||||
|
||||
if err := c.addFileAssets(assetBuilder); err != nil {
|
||||
if err := c.AddFileAssets(assetBuilder); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -561,86 +557,10 @@ func (c *ApplyClusterCmd) Run() error {
|
|||
return secretStore
|
||||
}
|
||||
|
||||
// RenderNodeUpConfig returns the NodeUp config, in YAML format
|
||||
// @@NOTE
|
||||
renderNodeUpConfig := func(ig *kops.InstanceGroup) (*nodeup.Config, error) {
|
||||
if ig == nil {
|
||||
return nil, fmt.Errorf("instanceGroup cannot be nil")
|
||||
}
|
||||
|
||||
role := ig.Spec.Role
|
||||
if role == "" {
|
||||
return nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(role, tf.cluster, tf.tags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := &nodeup.Config{}
|
||||
for _, tag := range nodeUpTags.List() {
|
||||
config.Tags = append(config.Tags, tag)
|
||||
}
|
||||
|
||||
config.Assets = c.Assets
|
||||
config.ClusterName = cluster.ObjectMeta.Name
|
||||
config.ConfigBase = fi.String(configBase.Path())
|
||||
config.InstanceGroupName = ig.ObjectMeta.Name
|
||||
|
||||
var images []*nodeup.Image
|
||||
|
||||
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
|
||||
// When using a custom version, we want to preload the images over http
|
||||
components := []string{"kube-proxy"}
|
||||
if role == kops.InstanceGroupRoleMaster {
|
||||
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
baseURL, err := url.Parse(cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseURL.Path = path.Join(baseURL.Path, "/bin/linux/amd64/", component+".tar")
|
||||
|
||||
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
image := &nodeup.Image{
|
||||
Source: u.String(),
|
||||
Hash: hash.Hex(),
|
||||
}
|
||||
images = append(images, image)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
location, hash, err := ProtokubeImageSource(assetBuilder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.ProtokubeImage = &nodeup.Image{
|
||||
Name: kopsbase.DefaultProtokubeImageName(),
|
||||
Source: location.String(),
|
||||
Hash: hash.Hex(),
|
||||
}
|
||||
}
|
||||
|
||||
config.Images = images
|
||||
config.Channels = channels
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
bootstrapScriptBuilder := &model.BootstrapScript{
|
||||
NodeUpConfigBuilder: renderNodeUpConfig,
|
||||
NodeUpSourceHash: c.NodeUpHash,
|
||||
NodeUpConfigBuilder: func(ig *kops.InstanceGroup) (*nodeup.Config, error) { return c.BuildNodeUpConfig(assetBuilder, ig) },
|
||||
NodeUpSource: c.NodeUpSource,
|
||||
NodeUpSourceHash: c.NodeUpHash,
|
||||
}
|
||||
switch kops.CloudProviderID(cluster.Spec.CloudProvider) {
|
||||
case kops.CloudProviderAWS:
|
||||
|
@ -973,8 +893,8 @@ func (c *ApplyClusterCmd) validateKubernetesVersion() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// addFileAssets adds the file assets within the assetBuilder
|
||||
func (c *ApplyClusterCmd) addFileAssets(assetBuilder *assets.AssetBuilder) error {
|
||||
// AddFileAssets adds the file assets within the assetBuilder
|
||||
func (c *ApplyClusterCmd) AddFileAssets(assetBuilder *assets.AssetBuilder) error {
|
||||
|
||||
var baseURL string
|
||||
var err error
|
||||
|
@ -1073,3 +993,95 @@ func needsKubernetesManifests(c *kops.Cluster, instanceGroups []*kops.InstanceGr
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// BuildNodeUpConfig returns the NodeUp config, in YAML format
|
||||
func (c *ApplyClusterCmd) BuildNodeUpConfig(assetBuilder *assets.AssetBuilder, ig *kops.InstanceGroup) (*nodeup.Config, error) {
|
||||
if ig == nil {
|
||||
return nil, fmt.Errorf("instanceGroup cannot be nil")
|
||||
}
|
||||
|
||||
cluster := c.Cluster
|
||||
|
||||
configBase, err := vfs.Context.BuildVfsPath(cluster.Spec.ConfigBase)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing config base %q: %v", cluster.Spec.ConfigBase, err)
|
||||
}
|
||||
|
||||
// TODO: Remove
|
||||
clusterTags, err := buildCloudupTags(cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
channels := []string{
|
||||
configBase.Join("addons", "bootstrap-channel.yaml").Path(),
|
||||
}
|
||||
|
||||
role := ig.Spec.Role
|
||||
if role == "" {
|
||||
return nil, fmt.Errorf("cannot determine role for instance group: %v", ig.ObjectMeta.Name)
|
||||
}
|
||||
|
||||
nodeUpTags, err := buildNodeupTags(role, cluster, clusterTags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := &nodeup.Config{}
|
||||
for _, tag := range nodeUpTags.List() {
|
||||
config.Tags = append(config.Tags, tag)
|
||||
}
|
||||
|
||||
config.Assets = c.Assets
|
||||
config.ClusterName = cluster.ObjectMeta.Name
|
||||
config.ConfigBase = fi.String(configBase.Path())
|
||||
config.InstanceGroupName = ig.ObjectMeta.Name
|
||||
|
||||
var images []*nodeup.Image
|
||||
|
||||
if components.IsBaseURL(cluster.Spec.KubernetesVersion) {
|
||||
// When using a custom version, we want to preload the images over http
|
||||
components := []string{"kube-proxy"}
|
||||
if role == kops.InstanceGroupRoleMaster {
|
||||
components = append(components, "kube-apiserver", "kube-controller-manager", "kube-scheduler")
|
||||
}
|
||||
|
||||
for _, component := range components {
|
||||
baseURL, err := url.Parse(c.Cluster.Spec.KubernetesVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
baseURL.Path = path.Join(baseURL.Path, "/bin/linux/amd64/", component+".tar")
|
||||
|
||||
u, hash, err := assetBuilder.RemapFileAndSHA(baseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
image := &nodeup.Image{
|
||||
Source: u.String(),
|
||||
Hash: hash.Hex(),
|
||||
}
|
||||
images = append(images, image)
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
location, hash, err := ProtokubeImageSource(assetBuilder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config.ProtokubeImage = &nodeup.Image{
|
||||
Name: kopsbase.DefaultProtokubeImageName(),
|
||||
Source: location.String(),
|
||||
Hash: hash.Hex(),
|
||||
}
|
||||
}
|
||||
|
||||
config.Images = images
|
||||
config.Channels = channels
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ func (s *VFSCAStore) readCAKeypairs(id string) (*keyset, *keyset, error) {
|
|||
}
|
||||
|
||||
if caPrivateKeys == nil {
|
||||
glog.Warningf("CA private key was not found; will generate new key")
|
||||
glog.Warningf("CA private key was not found")
|
||||
//return nil, fmt.Errorf("error loading CA private key - key not found")
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ func (c *VFSCAStore) loadKeysetBundle(p vfs.Path) (*keyset, error) {
|
|||
return keyset, nil
|
||||
}
|
||||
|
||||
func (k *keyset) ToAPIObject(name string) (*kops.Keyset, error) {
|
||||
func (k *keyset) ToAPIObject(name string, includePrivateKeyMaterial bool) (*kops.Keyset, error) {
|
||||
o := &kops.Keyset{}
|
||||
o.Name = name
|
||||
o.Spec.Type = kops.SecretTypeKeypair
|
||||
|
@ -274,7 +274,7 @@ func (k *keyset) ToAPIObject(name string) (*kops.Keyset, error) {
|
|||
oki.PublicMaterial = publicMaterial.Bytes()
|
||||
}
|
||||
|
||||
if ki.privateKey != nil {
|
||||
if includePrivateKeyMaterial && ki.privateKey != nil {
|
||||
var privateMaterial bytes.Buffer
|
||||
if _, err := ki.privateKey.WriteTo(&privateMaterial); err != nil {
|
||||
return nil, err
|
||||
|
@ -285,23 +285,18 @@ func (k *keyset) ToAPIObject(name string) (*kops.Keyset, error) {
|
|||
|
||||
o.Spec.Keys = append(o.Spec.Keys, oki)
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// writeKeysetBundle writes a keyset bundle to VFS
|
||||
func (c *VFSCAStore) writeKeysetBundle(p vfs.Path, name string, keyset *keyset, includePrivateMaterial bool) error {
|
||||
func (c *VFSCAStore) writeKeysetBundle(p vfs.Path, name string, keyset *keyset, includePrivateKeyMaterial bool) error {
|
||||
p = p.Join("keyset.yaml")
|
||||
|
||||
o, err := keyset.ToAPIObject(name)
|
||||
o, err := keyset.ToAPIObject(name, includePrivateKeyMaterial)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !includePrivateMaterial {
|
||||
o = removePrivateKeyMaterial(o)
|
||||
}
|
||||
|
||||
objectData, err := serializeKeysetBundle(o)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -341,6 +336,24 @@ func removePrivateKeyMaterial(o *kops.Keyset) *kops.Keyset {
|
|||
return copy
|
||||
}
|
||||
|
||||
func SerializeKeyset(o *kops.Keyset) ([]byte, error) {
|
||||
var objectData bytes.Buffer
|
||||
{
|
||||
codecs := kopscodecs.Codecs
|
||||
yaml, ok := runtime.SerializerInfoForMediaType(codecs.SupportedMediaTypes(), "application/yaml")
|
||||
if !ok {
|
||||
glog.Fatalf("no YAML serializer registered")
|
||||
}
|
||||
encoder := codecs.EncoderForVersion(yaml.Serializer, v1alpha2.SchemeGroupVersion)
|
||||
|
||||
if err := encoder.Encode(o, &objectData); err != nil {
|
||||
return nil, fmt.Errorf("error serializing keyset: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return objectData.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadCertificates(p vfs.Path, useBundle bool) (*keyset, error) {
|
||||
// Attempt to load prebuilt bundle, which avoids having to list files, which is a permission that can be hard to
|
||||
// give on GCE / other clouds
|
||||
|
@ -422,7 +435,7 @@ func (c *VFSCAStore) CertificatePool(id string, createIfMissing bool) (*Certific
|
|||
cert, err := c.FindCertificatePool(id)
|
||||
if err == nil && cert == nil {
|
||||
if !createIfMissing {
|
||||
glog.Warningf("using empty certificate, because running with DryRun")
|
||||
glog.Warningf("using empty certificate pool for %q, because createIfMissing=false", id)
|
||||
return &CertificatePool{}, err
|
||||
}
|
||||
return nil, fmt.Errorf("cannot find certificate pool %q", id)
|
||||
|
@ -493,6 +506,25 @@ func (c *VFSCAStore) FindCertificatePool(name string) (*CertificatePool, error)
|
|||
return pool, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) FindCertificateKeyset(name string) (*kops.Keyset, error) {
|
||||
p := c.buildCertificatePoolPath(name)
|
||||
certs, err := c.loadCertificates(p, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in 'FindCertificatePool' attempting to load cert %q: %v", name, err)
|
||||
}
|
||||
|
||||
if certs == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
o, err := certs.ToAPIObject(name, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// ListKeysets implements CAStore::ListKeysets
|
||||
func (c *VFSCAStore) ListKeysets() ([]*kops.Keyset, error) {
|
||||
keysets := make(map[string]*kops.Keyset)
|
||||
|
@ -842,7 +874,7 @@ func (c *VFSCAStore) loadOnePrivateKey(p vfs.Path) (*pki.PrivateKey, error) {
|
|||
return k, err
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) FindPrivateKey(id string) (*pki.PrivateKey, error) {
|
||||
func (c *VFSCAStore) findPrivateKeyset(id string) (*keyset, error) {
|
||||
var keys *keyset
|
||||
if id == CertificateId_CA {
|
||||
_, caPrivateKeys, err := c.readCAKeypairs(id)
|
||||
|
@ -859,6 +891,15 @@ func (c *VFSCAStore) FindPrivateKey(id string) (*pki.PrivateKey, error) {
|
|||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) FindPrivateKey(id string) (*pki.PrivateKey, error) {
|
||||
keys, err := c.findPrivateKeyset(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var key *pki.PrivateKey
|
||||
if keys != nil && keys.primary != nil {
|
||||
key = keys.primary.privateKey
|
||||
|
@ -866,6 +907,20 @@ func (c *VFSCAStore) FindPrivateKey(id string) (*pki.PrivateKey, error) {
|
|||
return key, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) FindPrivateKeyset(name string) (*kops.Keyset, error) {
|
||||
keys, err := c.findPrivateKeyset(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
o, err := keys.ToAPIObject(name, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) CreateKeypair(signer string, id string, template *x509.Certificate, privateKey *pki.PrivateKey) (*pki.Certificate, error) {
|
||||
serial := c.buildSerial()
|
||||
|
||||
|
|
|
@ -37,6 +37,10 @@ type SSHPath struct {
|
|||
path string
|
||||
}
|
||||
|
||||
type SSHAcl struct {
|
||||
Mode os.FileMode
|
||||
}
|
||||
|
||||
var _ Path = &SSHPath{}
|
||||
|
||||
func NewSSHPath(client *ssh.Client, server string, path string, sudo bool) *SSHPath {
|
||||
|
@ -175,12 +179,40 @@ func (p *SSHPath) WriteFile(data []byte, acl ACL) error {
|
|||
}
|
||||
|
||||
if err == nil {
|
||||
err = sftpClient.Rename(tempfile, p.path)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error during file write of %q: rename failed: %v", p.path, err)
|
||||
if acl != nil {
|
||||
sshAcl, ok := acl.(*SSHAcl)
|
||||
if !ok {
|
||||
err = fmt.Errorf("unexpected acl type %T", acl)
|
||||
} else {
|
||||
err = sftpClient.Chmod(tempfile, sshAcl.Mode)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error during chmod of %q: %v", tempfile, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
session, err := p.client.NewSession()
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error creating session for rename: %v", err)
|
||||
} else {
|
||||
cmd := "mv " + tempfile + " " + p.path
|
||||
if p.sudo {
|
||||
cmd = "sudo " + cmd
|
||||
}
|
||||
err = session.Run(cmd)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error renaming file %q -> %q: %v", tempfile, p.path, err)
|
||||
}
|
||||
}
|
||||
// sftp rename seems to fail if dest file exists
|
||||
//err = sftpClient.Rename(tempfile, p.path)
|
||||
//if err != nil {
|
||||
// err = fmt.Errorf("error during file write of %q: rename failed: %v", p.path, err)
|
||||
//}
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue