mirror of https://github.com/kubernetes/kops.git
Merge pull request #126 from justinsb/upup_use_vfs
upup: use vfs for secretstore/keystore
This commit is contained in:
commit
ac8ca9ad06
|
|
@ -24,7 +24,8 @@ func buildDefaultCreateCluster() *CreateClusterCmd {
|
|||
|
||||
c.Config.CloudProvider = "aws"
|
||||
|
||||
c.StateStore, err = fi.NewVFSStateStore(vfs.NewFSPath("test-state"))
|
||||
dryrun := false
|
||||
c.StateStore, err = fi.NewVFSStateStore(vfs.NewFSPath("test-state"), dryrun)
|
||||
if err != nil {
|
||||
glog.Fatalf("error building state store: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ func main() {
|
|||
modelsBaseDir := pflag.String("modelstore", modelsBaseDirDefault, "Source directory where models are stored")
|
||||
models := pflag.String("model", "proto,cloudup", "Models to apply (separate multiple models with commas)")
|
||||
nodeModel := pflag.String("nodemodel", "nodeup", "Model to use for node configuration")
|
||||
stateLocation := pflag.String("state", "./state", "Location to use to store configuration state")
|
||||
stateLocation := pflag.String("state", "", "Location to use to store configuration state")
|
||||
|
||||
cloudProvider := pflag.String("cloud", "", "Cloud provider to use - gce, aws")
|
||||
|
||||
|
|
@ -57,6 +57,7 @@ func main() {
|
|||
nodeCount := pflag.Int("node-count", 0, "Set the number of nodes")
|
||||
|
||||
dnsZone := pflag.String("dns-zone", "", "DNS hosted zone to use (defaults to last two components of cluster name)")
|
||||
outDir := pflag.String("out", "", "Path to write any local output")
|
||||
|
||||
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
|
||||
pflag.Parse()
|
||||
|
|
@ -68,8 +69,20 @@ func main() {
|
|||
*target = "dryrun"
|
||||
}
|
||||
|
||||
statePath := vfs.NewFSPath(*stateLocation)
|
||||
workDir := stateLocation
|
||||
if *stateLocation == "" {
|
||||
glog.Errorf("--state is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
statePath, err := fi.BuildVfsPath(*stateLocation)
|
||||
if err != nil {
|
||||
glog.Errorf("error building state location: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *outDir == "" {
|
||||
*outDir = "out"
|
||||
}
|
||||
|
||||
stateStore, err := fi.NewVFSStateStore(statePath, isDryrun)
|
||||
if err != nil {
|
||||
|
|
@ -166,7 +179,7 @@ func main() {
|
|||
Target: *target,
|
||||
NodeModel: *nodeModel,
|
||||
SSHPublicKey: *sshPublicKey,
|
||||
WorkDir: *workDir,
|
||||
OutDir: *outDir,
|
||||
}
|
||||
|
||||
//if *configFile != "" {
|
||||
|
|
@ -215,8 +228,8 @@ type CreateClusterCmd struct {
|
|||
NodeModel string
|
||||
// The SSH public key (file) to use
|
||||
SSHPublicKey string
|
||||
// WorkDir is a local directory in which we place output, can cache files etc
|
||||
WorkDir string
|
||||
// OutDir is a local directory in which we place output, can cache files etc
|
||||
OutDir string
|
||||
}
|
||||
|
||||
func (c *CreateClusterCmd) LoadConfig(configFile string) error {
|
||||
|
|
@ -304,8 +317,50 @@ func (c *CreateClusterCmd) Run() error {
|
|||
l := &cloudup.Loader{}
|
||||
l.Init()
|
||||
|
||||
caStore := c.StateStore.CA()
|
||||
secrets := c.StateStore.Secrets()
|
||||
keyStore := c.StateStore.CA()
|
||||
secretStore := c.StateStore.Secrets()
|
||||
|
||||
if vfs.IsClusterReadable(secretStore.VFSPath()) {
|
||||
vfsPath := secretStore.VFSPath()
|
||||
c.Config.SecretStore = vfsPath.Path()
|
||||
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
|
||||
if c.Config.MasterPermissions == nil {
|
||||
c.Config.MasterPermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.MasterPermissions.AddS3Bucket(s3Path.Bucket())
|
||||
if c.Config.NodePermissions == nil {
|
||||
c.Config.NodePermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.NodePermissions.AddS3Bucket(s3Path.Bucket())
|
||||
}
|
||||
} else {
|
||||
// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
|
||||
return fmt.Errorf("secrets path is not cluster readable: %v", secretStore.VFSPath())
|
||||
}
|
||||
|
||||
if vfs.IsClusterReadable(keyStore.VFSPath()) {
|
||||
vfsPath := keyStore.VFSPath()
|
||||
c.Config.KeyStore = vfsPath.Path()
|
||||
if s3Path, ok := vfsPath.(*vfs.S3Path); ok {
|
||||
if c.Config.MasterPermissions == nil {
|
||||
c.Config.MasterPermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.MasterPermissions.AddS3Bucket(s3Path.Bucket())
|
||||
if c.Config.NodePermissions == nil {
|
||||
c.Config.NodePermissions = &cloudup.CloudPermissions{}
|
||||
}
|
||||
c.Config.NodePermissions.AddS3Bucket(s3Path.Bucket())
|
||||
}
|
||||
} else {
|
||||
// We could implement this approach, but it seems better to get all clouds using cluster-readable storage
|
||||
return fmt.Errorf("keyStore path is not cluster readable: %v", keyStore.VFSPath())
|
||||
}
|
||||
|
||||
if vfs.IsClusterReadable(c.StateStore.VFSPath()) {
|
||||
c.Config.ConfigStore = c.StateStore.VFSPath().Path()
|
||||
} else {
|
||||
// We do support this...
|
||||
}
|
||||
|
||||
if c.Config.KubernetesVersion == "" {
|
||||
stableURL := "https://storage.googleapis.com/kubernetes-release/release/stable.txt"
|
||||
|
|
@ -385,6 +440,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
|
||||
l.AddTypes(map[string]interface{}{
|
||||
"keypair": &fitasks.Keypair{},
|
||||
"secret": &fitasks.Secret{},
|
||||
})
|
||||
|
||||
switch c.Config.CloudProvider {
|
||||
|
|
@ -517,7 +573,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
return fmt.Errorf("SSH public key must be specified when running with AWS")
|
||||
}
|
||||
|
||||
cloudTags := map[string]string{"KubernetesCluster": c.Config.ClusterName}
|
||||
cloudTags := map[string]string{awsup.TagClusterName: c.Config.ClusterName}
|
||||
|
||||
awsCloud, err := awsup.NewAWSCloud(c.Config.Region, cloudTags)
|
||||
if err != nil {
|
||||
|
|
@ -542,7 +598,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
}
|
||||
|
||||
l.Tags = tags
|
||||
l.WorkDir = c.WorkDir
|
||||
l.WorkDir = c.OutDir
|
||||
l.ModelStore = c.ModelStore
|
||||
l.NodeModel = c.NodeModel
|
||||
l.OptionsLoader = loader.NewOptionsLoader(c.Config)
|
||||
|
|
@ -556,18 +612,10 @@ func (c *CreateClusterCmd) Run() error {
|
|||
l.OptionsLoader.TemplateFunctions["HasTag"] = l.TemplateFunctions["HasTag"]
|
||||
|
||||
l.TemplateFunctions["CA"] = func() fi.CAStore {
|
||||
return caStore
|
||||
return keyStore
|
||||
}
|
||||
l.TemplateFunctions["Secrets"] = func() fi.SecretStore {
|
||||
return secrets
|
||||
}
|
||||
l.TemplateFunctions["GetOrCreateSecret"] = func(id string) (string, error) {
|
||||
secret, _, err := secrets.GetOrCreateSecret(id)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error creating secret %q: %v", id, err)
|
||||
}
|
||||
|
||||
return secret.AsString()
|
||||
return secretStore
|
||||
}
|
||||
|
||||
if c.SSHPublicKey != "" {
|
||||
|
|
@ -599,7 +647,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
|
||||
case "terraform":
|
||||
checkExisting = false
|
||||
outDir := path.Join(c.WorkDir, "terraform")
|
||||
outDir := path.Join(c.OutDir, "terraform")
|
||||
target = terraform.NewTerraformTarget(cloud, c.Config.Region, project, outDir)
|
||||
|
||||
case "dryrun":
|
||||
|
|
@ -608,7 +656,7 @@ func (c *CreateClusterCmd) Run() error {
|
|||
return fmt.Errorf("unsupported target type %q", c.Target)
|
||||
}
|
||||
|
||||
context, err := fi.NewContext(target, cloud, caStore, checkExisting)
|
||||
context, err := fi.NewContext(target, cloud, keyStore, secretStore, checkExisting)
|
||||
if err != nil {
|
||||
glog.Exitf("error building context: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
)
|
||||
|
||||
type RootCmd struct {
|
||||
|
|
@ -77,7 +76,10 @@ func (c *RootCmd) StateStore() (fi.StateStore, error) {
|
|||
return nil, fmt.Errorf("--state is required")
|
||||
}
|
||||
|
||||
statePath := vfs.NewFSPath(c.stateLocation)
|
||||
statePath, err := fi.BuildVfsPath(c.stateLocation)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building state store path: %v", err)
|
||||
}
|
||||
|
||||
isDryrun := false
|
||||
stateStore, err := fi.NewVFSStateStore(statePath, isDryrun)
|
||||
|
|
|
|||
|
|
@ -129,13 +129,9 @@ func (cmd *CreateSecretsCommand) Run() error {
|
|||
|
||||
// TODO: Allow resigning of the existing private key?
|
||||
|
||||
key, err := caStore.CreatePrivateKey(cmd.Id)
|
||||
_, _, err = caStore.CreateKeypair(cmd.Id, template)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating privatekey %v", err)
|
||||
}
|
||||
_, err = caStore.IssueCert(cmd.Id, key, template)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating certificate %v", err)
|
||||
return fmt.Errorf("error creating keypair %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,27 +0,0 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["elasticloadbalancing:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
"arn:aws:s3:::kubernetes-*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["ec2:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["elasticloadbalancing:*"],
|
||||
"Resource": ["*"]
|
||||
}
|
||||
{{- if .MasterPermissions.S3Buckets -}}
|
||||
,
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
{{ range $i, $b := .MasterPermissions.S3Buckets }}
|
||||
{{if $i}},{{end}}
|
||||
"arn:aws:s3:::{{ $b }}/*"
|
||||
{{ end }}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [ "s3:GetBucketLocation", "s3:ListBucket" ],
|
||||
"Resource": [
|
||||
{{ range $i, $b := .MasterPermissions.S3Buckets }}
|
||||
{{if $i}},{{end}}
|
||||
"arn:aws:s3:::{{ $b }}"
|
||||
{{ end }}
|
||||
]
|
||||
}
|
||||
{{ end }}
|
||||
]
|
||||
}
|
||||
|
|
@ -23,6 +23,11 @@
|
|||
"Action": "ec2:DetachVolume",
|
||||
"Resource": "*"
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["route53:*"],
|
||||
"Resource": ["*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
|
|
@ -36,5 +41,28 @@
|
|||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
{{- if .NodePermissions.S3Buckets -}}
|
||||
,
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:*",
|
||||
"Resource": [
|
||||
{{ range $i, $b := .NodePermissions.S3Buckets }}
|
||||
{{if $i}},{{end}}
|
||||
"arn:aws:s3:::{{ $b }}/*"
|
||||
{{ end }}
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [ "s3:GetBucketLocation", "s3:ListBucket" ],
|
||||
"Resource": [
|
||||
{{ range $i, $b := .NodePermissions.S3Buckets }}
|
||||
{{if $i}},{{end}}
|
||||
"arn:aws:s3:::{{ $b }}"
|
||||
{{ end }}
|
||||
]
|
||||
}
|
||||
{{ end }}
|
||||
]
|
||||
}
|
||||
|
|
@ -1,15 +1,7 @@
|
|||
Kubelet:
|
||||
Certificate: {{ Base64Encode (CA.Cert "kubelet").AsString }}
|
||||
Key: {{ Base64Encode (CA.PrivateKey "kubelet").AsString }}
|
||||
|
||||
NodeUp:
|
||||
Location: https://kubeupv2.s3.amazonaws.com/nodeup/nodeup.tar.gz
|
||||
|
||||
CACertificate: {{ Base64Encode (CA.Cert "ca").AsString }}
|
||||
|
||||
APIServer:
|
||||
Certificate: {{ Base64Encode (CA.Cert "master").AsString }}
|
||||
Key: {{ Base64Encode (CA.PrivateKey "master").AsString }}
|
||||
Image: gcr.io/google_containers/kube-apiserver:v{{ .KubernetesVersion }}
|
||||
|
||||
KubeControllerManager:
|
||||
|
|
@ -26,20 +18,6 @@ MasterInternalName: {{ .MasterInternalName }}
|
|||
DNSZone: {{ .DNSZone }}
|
||||
|
||||
KubeUser: {{ .KubeUser }}
|
||||
KubePassword: {{ GetOrCreateSecret "kube" }}
|
||||
|
||||
Tokens:
|
||||
admin: {{ GetOrCreateSecret "admin" }}
|
||||
kubelet: {{ GetOrCreateSecret "kubelet" }}
|
||||
kube-proxy: {{ GetOrCreateSecret "kube-proxy" }}
|
||||
"system:scheduler": {{ GetOrCreateSecret "system:scheduler" }}
|
||||
"system:controller_manager": {{ GetOrCreateSecret "system:controller_manager" }}
|
||||
"system:logging": {{ GetOrCreateSecret "system:logging" }}
|
||||
"system:monitoring": {{ GetOrCreateSecret "system:monitoring" }}
|
||||
"system:dns": {{ GetOrCreateSecret "system:dns" }}
|
||||
{{ if HasTag "_kope_routing" }}
|
||||
"kope-routing": {{ GetOrCreateSecret "kope-routing" }}
|
||||
{{ end }}
|
||||
|
||||
Tags:
|
||||
{{ range $tag := Args }}
|
||||
|
|
@ -53,3 +31,7 @@ Assets:
|
|||
{{ range $asset := .Assets }}
|
||||
- {{ $asset }}
|
||||
{{ end }}
|
||||
|
||||
KeyStore: {{ .KeyStore }}
|
||||
SecretStore: {{ .SecretStore }}
|
||||
ConfigStore: {{ .ConfigStore }}
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
secret/kube: {}
|
||||
|
||||
secret/admin: {}
|
||||
|
||||
secret/kubelet: {}
|
||||
|
||||
secret/kube-proxy: {}
|
||||
|
||||
secret/system-scheduler:
|
||||
name: "system:scheduler"
|
||||
|
||||
secret/system-controller_manager:
|
||||
name: "system:controller_manager"
|
||||
|
||||
secret/system-logging:
|
||||
name: "system:logging"
|
||||
|
||||
secret/system-monitoring:
|
||||
name: "system:monitoring"
|
||||
|
||||
secret/system-dns:
|
||||
name: "system:dns"
|
||||
|
||||
{{ if HasTag "_kope_routing" }}
|
||||
secret/kope-routing: {}
|
||||
{{ end }}
|
||||
|
|
@ -3,11 +3,11 @@ kind: Config
|
|||
users:
|
||||
- name: kope-routing
|
||||
user:
|
||||
token: {{ .GetToken "kope-routing" }}
|
||||
token: {{ GetToken "kope-routing" }}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: {{ Base64Encode .CACertificate.AsString }}
|
||||
certificate-authority-data: {{ Base64Encode CACertificate.AsString }}
|
||||
server: https://{{ .MasterInternalName }}
|
||||
contexts:
|
||||
- context:
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
{{ .CACertificate.AsString }}
|
||||
{{ CACertificatePool.AsString }}
|
||||
|
|
@ -1 +1 @@
|
|||
{{ .APIServer.Certificate.AsString }}
|
||||
{{ (Certificate "master").AsString }}
|
||||
|
|
@ -1 +1 @@
|
|||
{{ .APIServer.Key.AsString }}
|
||||
{{ (PrivateKey "master").AsString }}
|
||||
|
|
@ -1 +1 @@
|
|||
{{ .KubePassword }},{{ .KubeUser }},admin
|
||||
{{ GetToken "kube" }},{{ .KubeUser }},admin
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
{{ range $id, $token := .Tokens }}
|
||||
{{ range $id, $token := AllTokens }}
|
||||
{{ $token }},{{ $id }},{{ $id }}
|
||||
{{ end }}
|
||||
|
|
|
|||
|
|
@ -3,11 +3,11 @@ kind: Config
|
|||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: {{ .GetToken "kube-proxy" }}
|
||||
token: {{ GetToken "kube-proxy" }}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: {{ Base64Encode .CACertificate.AsString }}
|
||||
certificate-authority-data: {{ Base64Encode CACertificate.AsString }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ kind: Config
|
|||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: {{ Base64Encode .Kubelet.Certificate.AsString }}
|
||||
client-key-data: {{ Base64Encode .Kubelet.Key.AsString }}
|
||||
client-certificate-data: {{ Base64Encode (Certificate "kubelet").AsString }}
|
||||
client-key-data: {{ Base64Encode (PrivateKey "kubelet").AsString }}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
certificate-authority-data: {{ Base64Encode (or .Kubelet.CACertificate .CACertificate).AsString }}
|
||||
certificate-authority-data: {{ Base64Encode CACertificate.AsString }}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||
"net/http"
|
||||
"os"
|
||||
|
|
@ -23,7 +24,7 @@ type asset struct {
|
|||
type Source struct {
|
||||
Parent *Source
|
||||
URL string
|
||||
Hash string
|
||||
Hash *hashing.Hash
|
||||
ExtractFromArchive string
|
||||
}
|
||||
|
||||
|
|
@ -109,11 +110,11 @@ func (a *AssetStore) Find(key string, assetPath string) (Resource, error) {
|
|||
return nil, fmt.Errorf("found multiple matching assets for key: %q", key)
|
||||
}
|
||||
|
||||
func hashFromHttpHeader(url string) (string, error) {
|
||||
func hashFromHttpHeader(url string) (*hashing.Hash, error) {
|
||||
glog.Infof("Doing HTTP HEAD on %q", url)
|
||||
response, err := http.Head(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error doing HEAD on %q: %v", url, err)
|
||||
return nil, fmt.Errorf("error doing HEAD on %q: %v", url, err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
|
|
@ -124,32 +125,32 @@ func hashFromHttpHeader(url string) (string, error) {
|
|||
if etag != "" {
|
||||
if len(etag) == 32 {
|
||||
// Likely md5
|
||||
return etag, nil
|
||||
return hashing.HashAlgorithmMD5.FromString(etag)
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unable to determine hash from HTTP HEAD: %q", url)
|
||||
return nil, fmt.Errorf("unable to determine hash from HTTP HEAD: %q", url)
|
||||
}
|
||||
|
||||
func (a *AssetStore) Add(id string) error {
|
||||
if strings.HasSuffix(id, "http://") || strings.HasPrefix(id, "https://") {
|
||||
return a.addURL(id, "")
|
||||
return a.addURL(id, nil)
|
||||
}
|
||||
// TODO: local files!
|
||||
return fmt.Errorf("unknown asset format: %q", id)
|
||||
}
|
||||
|
||||
func (a *AssetStore) addURL(url string, hash string) error {
|
||||
func (a *AssetStore) addURL(url string, hash *hashing.Hash) error {
|
||||
var err error
|
||||
|
||||
if hash == "" {
|
||||
if hash == nil {
|
||||
hash, err = hashFromHttpHeader(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
localFile := path.Join(a.assetDir, hash+"_"+utils.SanitizeString(url))
|
||||
localFile := path.Join(a.assetDir, hash.String()+"_"+utils.SanitizeString(url))
|
||||
_, err = DownloadURL(url, localFile, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -65,16 +66,27 @@ func (c *Certificate) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
type CAStore interface {
|
||||
// Cert returns the primary specified certificate
|
||||
Cert(id string) (*Certificate, error)
|
||||
// CertificatePool returns all active certificates with the specified id
|
||||
CertificatePool(id string) (*CertificatePool, error)
|
||||
PrivateKey(id string) (*PrivateKey, error)
|
||||
|
||||
FindCert(id string) (*Certificate, error)
|
||||
FindPrivateKey(id string) (*PrivateKey, error)
|
||||
|
||||
IssueCert(id string, privateKey *PrivateKey, template *x509.Certificate) (*Certificate, error)
|
||||
CreatePrivateKey(id string) (*PrivateKey, error)
|
||||
//IssueCert(id string, privateKey *PrivateKey, template *x509.Certificate) (*Certificate, error)
|
||||
//CreatePrivateKey(id string) (*PrivateKey, error)
|
||||
|
||||
CreateKeypair(id string, template *x509.Certificate) (*Certificate, *PrivateKey, error)
|
||||
|
||||
List() ([]string, error)
|
||||
|
||||
// VFSPath returns the path where the CAStore is stored
|
||||
VFSPath() vfs.Path
|
||||
|
||||
// AddCert adds an alternative certificate to the pool (primarily useful for CAs)
|
||||
AddCert(id string, cert *Certificate) error
|
||||
}
|
||||
|
||||
func (c *Certificate) AsString() (string, error) {
|
||||
|
|
@ -307,3 +319,30 @@ func parsePEMPrivateKey(pemData []byte) (crypto.PrivateKey, error) {
|
|||
pemData = rest
|
||||
}
|
||||
}
|
||||
|
||||
type CertificatePool struct {
|
||||
Secondary []*Certificate
|
||||
Primary *Certificate
|
||||
}
|
||||
|
||||
func (c *CertificatePool) AsString() (string, error) {
|
||||
// Nicer behaviour because this is called from templates
|
||||
if c == nil {
|
||||
return "", fmt.Errorf("AsString called on nil CertificatePool")
|
||||
}
|
||||
|
||||
var data bytes.Buffer
|
||||
if c.Primary != nil {
|
||||
_, err := c.Primary.WriteTo(&data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing SSL certificate: %v", err)
|
||||
}
|
||||
}
|
||||
for _, cert := range c.Secondary {
|
||||
_, err := cert.WriteTo(&data)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing SSL certificate: %v", err)
|
||||
}
|
||||
}
|
||||
return data.String(), nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ import (
|
|||
const MaxDescribeTagsAttempts = 60
|
||||
const MaxCreateTagsAttempts = 60
|
||||
|
||||
const TagClusterName = "KubernetesCluster"
|
||||
|
||||
type AWSCloud struct {
|
||||
EC2 *ec2.EC2
|
||||
IAM *iam.IAM
|
||||
|
|
@ -107,7 +109,7 @@ func (c *AWSCloud) GetTags(resourceId string) (map[string]string, error) {
|
|||
return nil, fmt.Errorf("Got retryable error while getting tags on %q, but retried too many times without success: %v", resourceId, err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("will retry after encountering error gettings tags on %q: %v", resourceId, err)
|
||||
glog.V(2).Infof("will retry after encountering error getting tags on %q: %v", resourceId, err)
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
|
@ -154,7 +156,7 @@ func (c *AWSCloud) CreateTags(resourceId string, tags map[string]string) error {
|
|||
return fmt.Errorf("Got retryable error while creating tags on %q, but retried too many times without success: %v", resourceId, err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("will retry after encountering error creatings tags on %q: %v", resourceId, err)
|
||||
glog.V(2).Infof("will retry after encountering error creating tags on %q: %v", resourceId, err)
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,6 +24,10 @@ type CloudConfig struct {
|
|||
Region string `json:",omitempty"`
|
||||
Project string `json:",omitempty"`
|
||||
|
||||
// Permissions to configure in IAM or GCE
|
||||
MasterPermissions *CloudPermissions `json:",omitempty"`
|
||||
NodePermissions *CloudPermissions `json:",omitempty"`
|
||||
|
||||
// The internal and external names for the master nodes
|
||||
MasterPublicName string `json:",omitempty"`
|
||||
MasterInternalName string `json:",omitempty"`
|
||||
|
|
@ -33,6 +37,10 @@ type CloudConfig struct {
|
|||
NetworkCIDR string `json:",omitempty"`
|
||||
NetworkID string `json:",omitempty"`
|
||||
|
||||
SecretStore string `json:",omitempty"`
|
||||
KeyStore string `json:",omitempty"`
|
||||
ConfigStore string `json:",omitempty"`
|
||||
|
||||
// The DNS zone we should use when configuring DNS
|
||||
DNSZone string `json:",omitempty"`
|
||||
|
||||
|
|
@ -280,3 +288,19 @@ func (z *ZoneConfig) assignCIDR(c *CloudConfig) (string, error) {
|
|||
func (c *CloudConfig) SharedVPC() bool {
|
||||
return c.NetworkID != ""
|
||||
}
|
||||
|
||||
// CloudPermissions holds IAM-style permissions
|
||||
type CloudPermissions struct {
|
||||
S3Buckets []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// AddS3Bucket adds a bucket if it does not already exist
|
||||
func (p *CloudPermissions) AddS3Bucket(bucket string) {
|
||||
for _, b := range p.S3Buckets {
|
||||
if b == bucket {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
p.S3Buckets = append(p.S3Buckets, bucket)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,15 +15,17 @@ type Context struct {
|
|||
Target Target
|
||||
Cloud Cloud
|
||||
CAStore CAStore
|
||||
SecretStore SecretStore
|
||||
|
||||
CheckExisting bool
|
||||
}
|
||||
|
||||
func NewContext(target Target, cloud Cloud, castore CAStore, checkExisting bool) (*Context, error) {
|
||||
func NewContext(target Target, cloud Cloud, castore CAStore, secretStore SecretStore, checkExisting bool) (*Context, error) {
|
||||
c := &Context{
|
||||
Cloud: cloud,
|
||||
Target: target,
|
||||
CAStore: castore,
|
||||
SecretStore: secretStore,
|
||||
CheckExisting: checkExisting,
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
|
|
@ -101,13 +102,8 @@ func EnsureFileOwner(destPath string, owner string, groupName string) (bool, err
|
|||
return changed, nil
|
||||
}
|
||||
|
||||
func fileHasHash(f string, expected string) (bool, error) {
|
||||
hashAlgorithm, err := determineHashAlgorithm(expected)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
actual, err := HashFile(f, hashAlgorithm)
|
||||
func fileHasHash(f string, expected *hashing.Hash) (bool, error) {
|
||||
actual, err := expected.Algorithm.HashFile(f)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
|
|
@ -115,7 +111,7 @@ func fileHasHash(f string, expected string) (bool, error) {
|
|||
return false, err
|
||||
}
|
||||
|
||||
if actual == expected {
|
||||
if actual.Equal(expected) {
|
||||
glog.V(2).Infof("Hash matched for %q: %v", f, expected)
|
||||
return true, nil
|
||||
} else {
|
||||
|
|
@ -124,18 +120,6 @@ func fileHasHash(f string, expected string) (bool, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func HashFile(f string, hashAlgorithm HashAlgorithm) (string, error) {
|
||||
glog.V(2).Infof("hashing file %q", f)
|
||||
|
||||
fileAsset := NewFileResource(f)
|
||||
hash, err := HashForResource(fileAsset, hashAlgorithm)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func ParseFileMode(s string, defaultMode os.FileMode) (os.FileMode, error) {
|
||||
fileMode := defaultMode
|
||||
if s != "" {
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {
|
|||
|
||||
castore := c.CAStore
|
||||
|
||||
template, err := buildCertificateTempate(e.Type)
|
||||
template, err := buildCertificateTemplate(e.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -179,15 +179,11 @@ func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {
|
|||
glog.V(2).Infof("Creating PKI keypair %q", name)
|
||||
|
||||
// TODO: Reuse private key if already exists?
|
||||
privateKey, err := castore.CreatePrivateKey(name)
|
||||
cert, _, err := castore.CreateKeypair(name, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cert, err := castore.IssueCert(name, privateKey, template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(8).Infof("created certificate %v", cert)
|
||||
}
|
||||
|
||||
|
|
@ -196,7 +192,7 @@ func (_ *Keypair) Render(c *fi.Context, a, e, changes *Keypair) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func buildCertificateTempate(certificateType string) (*x509.Certificate, error) {
|
||||
func buildCertificateTemplate(certificateType string) (*x509.Certificate, error) {
|
||||
if expanded, found := wellKnownCertificateTypes[certificateType]; found {
|
||||
certificateType = expanded
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,70 @@
|
|||
package fitasks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
)
|
||||
|
||||
//go:generate fitask -type=Secret
|
||||
type Secret struct {
|
||||
Name *string
|
||||
}
|
||||
|
||||
var _ fi.HasCheckExisting = &Secret{}
|
||||
|
||||
// It's important always to check for the existing Secret, so we don't regenerate tokens e.g. on terraform
|
||||
func (e *Secret) CheckExisting(c *fi.Context) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (e *Secret) Find(c *fi.Context) (*Secret, error) {
|
||||
secrets := c.SecretStore
|
||||
|
||||
name := fi.StringValue(e.Name)
|
||||
if name == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
secret, err := secrets.FindSecret(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if secret == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
actual := &Secret{
|
||||
Name: &name,
|
||||
}
|
||||
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
func (e *Secret) Run(c *fi.Context) error {
|
||||
return fi.DefaultDeltaRunMethod(e, c)
|
||||
}
|
||||
|
||||
func (s *Secret) CheckChanges(a, e, changes *Secret) error {
|
||||
if a != nil {
|
||||
if changes.Name != nil {
|
||||
return fi.CannotChangeField("Name")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_ *Secret) Render(c *fi.Context, a, e, changes *Secret) error {
|
||||
name := fi.StringValue(e.Name)
|
||||
if name == "" {
|
||||
return fi.RequiredField("Name")
|
||||
}
|
||||
|
||||
secrets := c.SecretStore
|
||||
|
||||
_, _, err := secrets.GetOrCreateSecret(name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating secret %q: %v", name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
// Code generated by ""fitask" -type=Secret"; DO NOT EDIT
|
||||
|
||||
package fitasks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
)
|
||||
|
||||
// Secret
|
||||
|
||||
// JSON marshalling boilerplate
|
||||
type realSecret Secret
|
||||
|
||||
func (o *Secret) UnmarshalJSON(data []byte) error {
|
||||
var jsonName string
|
||||
if err := json.Unmarshal(data, &jsonName); err == nil {
|
||||
o.Name = &jsonName
|
||||
return nil
|
||||
}
|
||||
|
||||
var r realSecret
|
||||
if err := json.Unmarshal(data, &r); err != nil {
|
||||
return err
|
||||
}
|
||||
*o = Secret(r)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ fi.HasName = &Secret{}
|
||||
|
||||
func (e *Secret) GetName() *string {
|
||||
return e.Name
|
||||
}
|
||||
|
||||
func (e *Secret) SetName(name string) {
|
||||
e.Name = &name
|
||||
}
|
||||
|
||||
func (e *Secret) String() string {
|
||||
return fi.TaskAsString(e)
|
||||
}
|
||||
|
|
@ -1,46 +0,0 @@
|
|||
package fi
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"hash"
|
||||
)
|
||||
|
||||
type HashAlgorithm string
|
||||
|
||||
const (
|
||||
HashAlgorithmSHA256 = "sha256"
|
||||
HashAlgorithmSHA1 = "sha1"
|
||||
HashAlgorithmMD5 = "md5"
|
||||
)
|
||||
|
||||
func NewHasher(hashAlgorithm HashAlgorithm) hash.Hash {
|
||||
switch hashAlgorithm {
|
||||
case HashAlgorithmMD5:
|
||||
return md5.New()
|
||||
|
||||
case HashAlgorithmSHA1:
|
||||
return sha1.New()
|
||||
|
||||
case HashAlgorithmSHA256:
|
||||
return sha256.New()
|
||||
}
|
||||
|
||||
glog.Exitf("Unknown hash algorithm: %v", hashAlgorithm)
|
||||
return nil
|
||||
}
|
||||
|
||||
func determineHashAlgorithm(hash string) (HashAlgorithm, error) {
|
||||
if len(hash) == 32 {
|
||||
return HashAlgorithmMD5, nil
|
||||
} else if len(hash) == 40 {
|
||||
return HashAlgorithmSHA1, nil
|
||||
} else if len(hash) == 64 {
|
||||
return HashAlgorithmSHA256, nil
|
||||
} else {
|
||||
return "", fmt.Errorf("Unrecognized hash format: %q", hash)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,144 @@
|
|||
package hashing
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type HashAlgorithm string
|
||||
|
||||
const (
|
||||
HashAlgorithmSHA256 HashAlgorithm = "sha256"
|
||||
HashAlgorithmSHA1 HashAlgorithm = "sha1"
|
||||
HashAlgorithmMD5 HashAlgorithm = "md5"
|
||||
)
|
||||
|
||||
type Hash struct {
|
||||
Algorithm HashAlgorithm
|
||||
HashValue []byte
|
||||
}
|
||||
|
||||
func (h *Hash) String() string {
|
||||
return fmt.Sprintf("%s:%s", h.Algorithm, hex.EncodeToString(h.HashValue))
|
||||
}
|
||||
|
||||
func (ha HashAlgorithm) NewHasher() hash.Hash {
|
||||
switch ha {
|
||||
case HashAlgorithmMD5:
|
||||
return md5.New()
|
||||
|
||||
case HashAlgorithmSHA1:
|
||||
return sha1.New()
|
||||
|
||||
case HashAlgorithmSHA256:
|
||||
return sha256.New()
|
||||
}
|
||||
|
||||
glog.Exitf("Unknown hash algorithm: %v", ha)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ha HashAlgorithm) FromString(s string) (*Hash, error) {
|
||||
l := -1
|
||||
switch ha {
|
||||
case HashAlgorithmMD5:
|
||||
l = 32
|
||||
case HashAlgorithmSHA1:
|
||||
l = 40
|
||||
case HashAlgorithmSHA256:
|
||||
l = 64
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown hash algorithm: %q", ha)
|
||||
}
|
||||
|
||||
if len(s) != l {
|
||||
return nil, fmt.Errorf("invalid %q hash - unexpected length %s", ha, len(s))
|
||||
}
|
||||
|
||||
hashValue, err := hex.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid hash %q - not hex", s)
|
||||
}
|
||||
return &Hash{Algorithm: ha, HashValue: hashValue}, nil
|
||||
}
|
||||
|
||||
func FromString(s string) (*Hash, error) {
|
||||
var ha HashAlgorithm
|
||||
switch len(s) {
|
||||
case 32:
|
||||
ha = HashAlgorithmMD5
|
||||
case 40:
|
||||
ha = HashAlgorithmSHA1
|
||||
case 64:
|
||||
ha = HashAlgorithmSHA256
|
||||
default:
|
||||
return nil, fmt.Errorf("cannot determine algorithm for hash: %d", len(s))
|
||||
}
|
||||
|
||||
return ha.FromString(s)
|
||||
}
|
||||
|
||||
func (ha HashAlgorithm) Hash(r io.Reader) (*Hash, error) {
|
||||
hasher := ha.NewHasher()
|
||||
_, err := copyToHasher(hasher, r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while hashing resource: %v", err)
|
||||
}
|
||||
return &Hash{Algorithm: ha, HashValue: hasher.Sum(nil)}, nil
|
||||
}
|
||||
|
||||
func (ha HashAlgorithm) HashFile(p string) (*Hash, error) {
|
||||
f, err := os.OpenFile(p, os.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("error opening file %q: %v", p, err)
|
||||
}
|
||||
defer f.Close()
|
||||
return ha.Hash(f)
|
||||
}
|
||||
|
||||
func HashesForResource(r io.Reader, hashAlgorithms []HashAlgorithm) ([]*Hash, error) {
|
||||
var hashers []hash.Hash
|
||||
var writers []io.Writer
|
||||
for _, hashAlgorithm := range hashAlgorithms {
|
||||
hasher := hashAlgorithm.NewHasher()
|
||||
hashers = append(hashers, hasher)
|
||||
writers = append(writers, hasher)
|
||||
}
|
||||
|
||||
w := io.MultiWriter(writers...)
|
||||
|
||||
_, err := copyToHasher(w, r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while hashing resource: %v", err)
|
||||
}
|
||||
|
||||
var hashes []*Hash
|
||||
for i, hasher := range hashers {
|
||||
hashes = append(hashes, &Hash{Algorithm: hashAlgorithms[i], HashValue: hasher.Sum(nil)})
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func copyToHasher(dest io.Writer, src io.Reader) (int64, error) {
|
||||
n, err := io.Copy(dest, src)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("error hashing data: %v", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (l *Hash) Equal(r *Hash) bool {
|
||||
return (l.Algorithm == r.Algorithm) && bytes.Equal(l.HashValue, r.HashValue)
|
||||
}
|
||||
|
|
@ -4,16 +4,17 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
func DownloadURL(url string, dest string, hash string) (string, error) {
|
||||
if hash != "" {
|
||||
func DownloadURL(url string, dest string, hash *hashing.Hash) (*hashing.Hash, error) {
|
||||
if hash != nil {
|
||||
match, err := fileHasHash(dest, hash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if match {
|
||||
return hash, nil
|
||||
|
|
@ -23,21 +24,21 @@ func DownloadURL(url string, dest string, hash string) (string, error) {
|
|||
dirMode := os.FileMode(0755)
|
||||
err := downloadURLAlways(url, dest, dirMode)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hash != "" {
|
||||
if hash != nil {
|
||||
match, err := fileHasHash(dest, hash)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
if !match {
|
||||
return "", fmt.Errorf("downloaded from %q but hash did not match expected %q", url, hash)
|
||||
return nil, fmt.Errorf("downloaded from %q but hash did not match expected %q", url, hash)
|
||||
}
|
||||
} else {
|
||||
hash, err = HashFile(dest, HashAlgorithmSHA256)
|
||||
hash, err = hashing.HashAlgorithmSHA256.HashFile(dest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,15 +42,41 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
//if c.Config.ConfigurationStore != "" {
|
||||
// // TODO: If we ever delete local files, we need to filter so we only copy
|
||||
// // certain directories (i.e. not secrets / keys), because dest is a parent dir!
|
||||
// p, err := c.buildPath(c.Config.ConfigurationStore)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error building config store: %v", err)
|
||||
// }
|
||||
//
|
||||
// dest := vfs.NewFSPath("/etc/kubernetes")
|
||||
// scanner := vfs.NewVFSScan(p)
|
||||
// err = vfs.SyncDir(scanner, dest)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error copying config store: %v", err)
|
||||
// }
|
||||
//
|
||||
// c.Config.Tags = append(c.Config.Tags, "_config_store")
|
||||
//} else {
|
||||
// c.Config.Tags = append(c.Config.Tags, "_not_config_store")
|
||||
//}
|
||||
|
||||
loader := NewLoader(c.Config, assets)
|
||||
|
||||
err := buildTemplateFunctions(c.Config, loader.TemplateFunctions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing: %v", err)
|
||||
}
|
||||
|
||||
taskMap, err := loader.Build(c.ModelDir)
|
||||
if err != nil {
|
||||
glog.Exitf("error building: %v", err)
|
||||
return fmt.Errorf("error building loader: %v", err)
|
||||
}
|
||||
|
||||
var cloud fi.Cloud
|
||||
var caStore fi.CAStore
|
||||
var secretStore fi.SecretStore
|
||||
var target fi.Target
|
||||
checkExisting := true
|
||||
|
||||
|
|
@ -66,7 +92,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
return fmt.Errorf("unsupported target type %q", c.Target)
|
||||
}
|
||||
|
||||
context, err := fi.NewContext(target, cloud, caStore, checkExisting)
|
||||
context, err := fi.NewContext(target, cloud, caStore, secretStore, checkExisting)
|
||||
if err != nil {
|
||||
glog.Exitf("error building context: %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
package nodeup
|
||||
|
||||
import "k8s.io/kube-deploy/upup/pkg/fi"
|
||||
import (
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
)
|
||||
|
||||
// Our client configuration structure
|
||||
// Wherever possible, we try to use the types & names in https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
|
@ -12,13 +14,17 @@ type NodeConfig struct {
|
|||
KubeScheduler *KubeSchedulerConfig `json:",omitempty"`
|
||||
Docker *DockerConfig `json:",omitempty"`
|
||||
APIServer *APIServerConfig `json:",omitempty"`
|
||||
CACertificate *fi.Certificate `json:",omitempty"`
|
||||
|
||||
DNS *DNSConfig `json:",omitempty"`
|
||||
KubeUser string `json:",omitempty"`
|
||||
KubePassword string `json:",omitempty"`
|
||||
|
||||
Tokens map[string]string `json:",omitempty"`
|
||||
// NodeConfig can directly access a store of secrets, keys or configuration
|
||||
// (for example on S3) and then configure based on that
|
||||
// This supports (limited) dynamic reconfiguration also
|
||||
SecretStore string `json:",omitempty"`
|
||||
KeyStore string `json:",omitempty"`
|
||||
ConfigStore string `json:",omitempty"`
|
||||
|
||||
KubeUser string `json:",omitempty"`
|
||||
|
||||
Tags []string `json:",omitempty"`
|
||||
Assets []string `json:",omitempty"`
|
||||
|
|
@ -27,11 +33,11 @@ type NodeConfig struct {
|
|||
|
||||
// The DNS zone to use if configuring a cloud provided DNS zone
|
||||
DNSZone string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// A helper so that templates can get tokens which are not valid identifiers
|
||||
func (n *NodeConfig) GetToken(key string) string {
|
||||
return n.Tokens[key]
|
||||
// Deprecated in favor of KeyStore / SecretStore
|
||||
Tokens map[string]string `json:",omitempty"`
|
||||
Certificates map[string]*fi.Certificate `json:",omitempty"`
|
||||
PrivateKeys map[string]*fi.PrivateKey `json:",omitempty"`
|
||||
}
|
||||
|
||||
type DNSConfig struct {
|
||||
|
|
@ -45,11 +51,6 @@ type KubeletConfig struct {
|
|||
|
||||
LogLevel *int `json:",omitempty" flag:"v"`
|
||||
|
||||
Certificate *fi.Certificate `json:",omitempty" flag:"-"`
|
||||
Key *fi.PrivateKey `json:",omitempty" flag:"-"`
|
||||
// Allow override of CA Certificate
|
||||
CACertificate *fi.Certificate `json:",omitempty" flag:"-"`
|
||||
|
||||
// Configuration flags - a subset of https://github.com/kubernetes/kubernetes/blob/master/pkg/apis/componentconfig/types.go
|
||||
|
||||
// config is the path to the config file or directory of files
|
||||
|
|
@ -389,9 +390,6 @@ type APIServerConfig struct {
|
|||
PathSrvSshproxy string `json:",omitempty"`
|
||||
Image string `json:",omitempty"`
|
||||
|
||||
Certificate *fi.Certificate `json:",omitempty" flag:"-"`
|
||||
Key *fi.PrivateKey `json:",omitempty" flag:"-"`
|
||||
|
||||
LogLevel int `json:",omitempty" flag:"v"`
|
||||
|
||||
CloudProvider string `json:",omitempty" flag:"cloud-provider"`
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ type Loader struct {
|
|||
tasks map[string]fi.Task
|
||||
|
||||
tags map[string]struct{}
|
||||
|
||||
TemplateFunctions template.FuncMap
|
||||
}
|
||||
|
||||
func NewLoader(config *NodeConfig, assets *fi.AssetStore) *Loader {
|
||||
|
|
@ -31,6 +33,7 @@ func NewLoader(config *NodeConfig, assets *fi.AssetStore) *Loader {
|
|||
l.tasks = make(map[string]fi.Task)
|
||||
l.optionsLoader = loader.NewOptionsLoader(config)
|
||||
l.config = config
|
||||
l.TemplateFunctions = make(template.FuncMap)
|
||||
|
||||
return l
|
||||
}
|
||||
|
|
@ -47,6 +50,9 @@ func (l *Loader) executeTemplate(key string, d string) (string, error) {
|
|||
_, found := l.tags[tag]
|
||||
return found
|
||||
}
|
||||
for k, fn := range l.TemplateFunctions {
|
||||
funcMap[k] = fn
|
||||
}
|
||||
t.Funcs(funcMap)
|
||||
|
||||
context := l.config
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/nodeup/local"
|
||||
"os"
|
||||
|
|
@ -124,7 +125,15 @@ func (_ *Package) RenderLocal(t *local.LocalTarget, a, e, changes *Package) erro
|
|||
return fmt.Errorf("error creating directories %q: %v", path.Dir(local), err)
|
||||
}
|
||||
|
||||
_, err = fi.DownloadURL(fi.StringValue(e.Source), local, fi.StringValue(e.Hash))
|
||||
var hash *hashing.Hash
|
||||
if fi.StringValue(e.Hash) != "" {
|
||||
parsed, err := hashing.FromString(fi.StringValue(e.Hash))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error paring hash: %v", err)
|
||||
}
|
||||
hash = parsed
|
||||
}
|
||||
_, err = fi.DownloadURL(fi.StringValue(e.Source), local, hash)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,165 @@
|
|||
package nodeup
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
type templateFunctions struct {
|
||||
config *NodeConfig
|
||||
// keyStore is populated with a KeyStore, if KeyStore is set
|
||||
keyStore fi.CAStore
|
||||
// secretStore is populated with a SecretStore, if SecretStore is set
|
||||
secretStore fi.SecretStore
|
||||
}
|
||||
|
||||
func buildTemplateFunctions(config *NodeConfig, dest template.FuncMap) error {
|
||||
t := &templateFunctions{
|
||||
config: config,
|
||||
}
|
||||
|
||||
if config.SecretStore != "" {
|
||||
glog.Infof("Building SecretStore at %q", config.SecretStore)
|
||||
p, err := fi.BuildVfsPath(config.SecretStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building secret store path: %v", err)
|
||||
}
|
||||
|
||||
secretStore, err := fi.NewVFSSecretStore(p)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building secret store: %v", err)
|
||||
}
|
||||
|
||||
t.secretStore = secretStore
|
||||
}
|
||||
|
||||
if config.KeyStore != "" {
|
||||
glog.Infof("Building KeyStore at %q", config.KeyStore)
|
||||
p, err := fi.BuildVfsPath(config.KeyStore)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building key store path: %v", err)
|
||||
}
|
||||
|
||||
keyStore, err := fi.NewVFSCAStore(p, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building key store: %v", err)
|
||||
}
|
||||
t.keyStore = keyStore
|
||||
}
|
||||
|
||||
dest["CACertificatePool"] = t.CACertificatePool
|
||||
dest["CACertificate"] = t.CACertificate
|
||||
dest["PrivateKey"] = t.PrivateKey
|
||||
dest["Certificate"] = t.Certificate
|
||||
dest["AllTokens"] = t.AllTokens
|
||||
dest["GetToken"] = t.GetToken
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CACertificatePool returns the set of valid CA certificates for the cluster
|
||||
func (c *templateFunctions) CACertificatePool() (*fi.CertificatePool, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.CertificatePool(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
cert, err := c.CACertificate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cert == nil {
|
||||
return nil, fmt.Errorf("CA certificate not found (with fallback)")
|
||||
}
|
||||
pool := &fi.CertificatePool{}
|
||||
pool.Primary = cert
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
// CACertificate returns the primary CA certificate for the cluster
|
||||
func (c *templateFunctions) CACertificate() (*fi.Certificate, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.Cert(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
return c.Certificate(fi.CertificateId_CA)
|
||||
}
|
||||
|
||||
// PrivateKey returns the specified private key
|
||||
func (c *templateFunctions) PrivateKey(id string) (*fi.PrivateKey, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.PrivateKey(id)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
k := c.config.PrivateKeys[id]
|
||||
if k == nil {
|
||||
return nil, fmt.Errorf("private key not found: %q (with fallback)", id)
|
||||
}
|
||||
return k, nil
|
||||
}
|
||||
|
||||
// Certificate returns the specified private key
|
||||
func (c *templateFunctions) Certificate(id string) (*fi.Certificate, error) {
|
||||
if c.keyStore != nil {
|
||||
return c.keyStore.Cert(id)
|
||||
}
|
||||
|
||||
// Fallback to direct properties
|
||||
glog.Infof("Falling back to direct configuration for keystore")
|
||||
cert := c.config.Certificates[id]
|
||||
if cert == nil {
|
||||
return nil, fmt.Errorf("certificate not found: %q (with fallback)", id)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// AllTokens returns a map of all tokens
|
||||
func (n *templateFunctions) AllTokens() (map[string]string, error) {
|
||||
if n.secretStore != nil {
|
||||
tokens := make(map[string]string)
|
||||
ids, err := n.secretStore.ListSecrets()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, id := range ids {
|
||||
token, err := n.secretStore.FindSecret(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokens[id] = string(token.Data)
|
||||
}
|
||||
return tokens, nil
|
||||
}
|
||||
|
||||
// Fallback to direct configuration
|
||||
glog.Infof("Falling back to direct configuration for secrets")
|
||||
return n.config.Tokens, nil
|
||||
}
|
||||
|
||||
// GetToken returns the specified token
|
||||
func (n *templateFunctions) GetToken(key string) (string, error) {
|
||||
if n.secretStore != nil {
|
||||
token, err := n.secretStore.FindSecret(key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if token == nil {
|
||||
return "", fmt.Errorf("token not found: %q", key)
|
||||
}
|
||||
return string(token.Data), nil
|
||||
}
|
||||
|
||||
// Fallback to direct configuration
|
||||
glog.Infof("Falling back to direct configuration for secrets")
|
||||
token := n.config.Tokens[key]
|
||||
if token == "" {
|
||||
return "", fmt.Errorf("token not found: %q", key)
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
|
@ -2,10 +2,8 @@ package fi
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
|
@ -19,45 +17,6 @@ type TemplateResource interface {
|
|||
Curry(args []string) TemplateResource
|
||||
}
|
||||
|
||||
func HashForResource(r Resource, hashAlgorithm HashAlgorithm) (string, error) {
|
||||
hasher := NewHasher(hashAlgorithm)
|
||||
_, err := CopyResource(hasher, r)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
return "", fmt.Errorf("error while hashing resource: %v", err)
|
||||
}
|
||||
return hex.EncodeToString(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func HashesForResource(r Resource, hashAlgorithms []HashAlgorithm) (map[HashAlgorithm]string, error) {
|
||||
hashers := make(map[HashAlgorithm]hash.Hash)
|
||||
var writers []io.Writer
|
||||
for _, hashAlgorithm := range hashAlgorithms {
|
||||
if hashers[hashAlgorithm] != nil {
|
||||
continue
|
||||
}
|
||||
hasher := NewHasher(hashAlgorithm)
|
||||
hashers[hashAlgorithm] = hasher
|
||||
writers = append(writers, hasher)
|
||||
}
|
||||
|
||||
w := io.MultiWriter(writers...)
|
||||
|
||||
_, err := CopyResource(w, r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while hashing resource: %v", err)
|
||||
}
|
||||
|
||||
hashes := make(map[HashAlgorithm]string)
|
||||
for k, hasher := range hashers {
|
||||
hashes[k] = hex.EncodeToString(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func ResourcesMatch(a, b Resource) (bool, error) {
|
||||
aReader, err := a.Open()
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
crypto_rand "crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -16,6 +17,9 @@ type SecretStore interface {
|
|||
GetOrCreateSecret(id string) (secret *Secret, created bool, err error)
|
||||
// Lists the ids of all known secrets
|
||||
ListSecrets() ([]string, error)
|
||||
|
||||
// VFSPath returns the path where the SecretStore is stored
|
||||
VFSPath() vfs.Path
|
||||
}
|
||||
|
||||
type Secret struct {
|
||||
|
|
|
|||
|
|
@ -2,13 +2,21 @@ package fi
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/utils"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type StateStore interface {
|
||||
// VFSPath returns the path where the StateStore is stored
|
||||
VFSPath() vfs.Path
|
||||
|
||||
CA() CAStore
|
||||
Secrets() SecretStore
|
||||
|
||||
|
|
@ -45,6 +53,10 @@ func (s *VFSStateStore) CA() CAStore {
|
|||
return s.ca
|
||||
}
|
||||
|
||||
func (s *VFSStateStore) VFSPath() vfs.Path {
|
||||
return s.location
|
||||
}
|
||||
|
||||
func (s *VFSStateStore) Secrets() SecretStore {
|
||||
return s.secrets
|
||||
}
|
||||
|
|
@ -87,3 +99,51 @@ func (s *VFSStateStore) WriteConfig(config interface{}) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func BuildVfsPath(p string) (vfs.Path, error) {
|
||||
if strings.HasPrefix(p, "s3://") {
|
||||
u, err := url.Parse(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid s3 path: %q", err)
|
||||
}
|
||||
|
||||
var region string
|
||||
{
|
||||
config := aws.NewConfig().WithRegion("us-east-1")
|
||||
session := session.New()
|
||||
s3Client := s3.New(session, config)
|
||||
|
||||
bucket := strings.TrimSuffix(u.Host, "/")
|
||||
request := &s3.GetBucketLocationInput{}
|
||||
request.Bucket = aws.String(bucket)
|
||||
|
||||
response, err := s3Client.GetBucketLocation(request)
|
||||
if err != nil {
|
||||
// TODO: Auto-create bucket?
|
||||
return nil, fmt.Errorf("error getting location for S3 bucket %q: %v", bucket, err)
|
||||
}
|
||||
if response.LocationConstraint == nil {
|
||||
// US Classic does not return a region
|
||||
region = "us-east-1"
|
||||
} else {
|
||||
region = *response.LocationConstraint
|
||||
// Another special case: "EU" can mean eu-west-1
|
||||
if region == "EU" {
|
||||
region = "eu-west-1"
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("Found bucket %q in region %q", bucket, region)
|
||||
}
|
||||
|
||||
{
|
||||
config := aws.NewConfig().WithRegion(region)
|
||||
session := session.New()
|
||||
s3Client := s3.New(session, config)
|
||||
|
||||
s3path := vfs.NewS3Path(s3Client, u.Host, u.Path)
|
||||
return s3path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown / unhandled path type: %q", p)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
|
@ -15,6 +16,7 @@ type FSPath struct {
|
|||
}
|
||||
|
||||
var _ Path = &FSPath{}
|
||||
var _ HasHash = &FSPath{}
|
||||
|
||||
func NewFSPath(location string) *FSPath {
|
||||
return &FSPath{location: location}
|
||||
|
|
@ -100,6 +102,9 @@ func (p *FSPath) ReadFile() ([]byte, error) {
|
|||
func (p *FSPath) ReadDir() ([]Path, error) {
|
||||
files, err := ioutil.ReadDir(p.location)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var paths []Path
|
||||
|
|
@ -109,6 +114,55 @@ func (p *FSPath) ReadDir() ([]Path, error) {
|
|||
return paths, nil
|
||||
}
|
||||
|
||||
func (p *FSPath) ReadTree() ([]Path, error) {
|
||||
var paths []Path
|
||||
err := readTree(p.location, &paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func readTree(base string, dest *[]Path) error {
|
||||
files, err := ioutil.ReadDir(base)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
p := path.Join(base, f.Name())
|
||||
*dest = append(*dest, NewFSPath(p))
|
||||
if f.IsDir() {
|
||||
err = readTree(p, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *FSPath) Base() string {
|
||||
return path.Base(p.location)
|
||||
}
|
||||
|
||||
func (p *FSPath) Path() string {
|
||||
return p.location
|
||||
}
|
||||
|
||||
func (p *FSPath) String() string {
|
||||
return p.Path()
|
||||
}
|
||||
|
||||
func (p *FSPath) Remove() error {
|
||||
return os.Remove(p.location)
|
||||
}
|
||||
|
||||
func (p *FSPath) PreferredHash() (*hashing.Hash, error) {
|
||||
return p.Hash(hashing.HashAlgorithmSHA256)
|
||||
}
|
||||
|
||||
func (p *FSPath) Hash(a hashing.HashAlgorithm) (*hashing.Hash, error) {
|
||||
glog.V(2).Infof("hashing file %q", p.location)
|
||||
|
||||
return a.HashFile(p.location)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,12 +2,17 @@ package vfs
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
|
@ -15,11 +20,16 @@ type S3Path struct {
|
|||
client *s3.S3
|
||||
bucket string
|
||||
key string
|
||||
etag *string
|
||||
}
|
||||
|
||||
var _ Path = &S3Path{}
|
||||
var _ HasHash = &S3Path{}
|
||||
|
||||
func NewS3Path(client *s3.S3, bucket string, key string) *S3Path {
|
||||
bucket = strings.TrimSuffix(bucket, "/")
|
||||
key = strings.TrimPrefix(key, "/")
|
||||
|
||||
return &S3Path{
|
||||
client: client,
|
||||
bucket: bucket,
|
||||
|
|
@ -27,10 +37,33 @@ func NewS3Path(client *s3.S3, bucket string, key string) *S3Path {
|
|||
}
|
||||
}
|
||||
|
||||
func (p *S3Path) String() string {
|
||||
func (p *S3Path) Path() string {
|
||||
return "s3://" + p.bucket + "/" + p.key
|
||||
}
|
||||
|
||||
func (p *S3Path) Bucket() string {
|
||||
return p.bucket
|
||||
}
|
||||
|
||||
func (p *S3Path) String() string {
|
||||
return p.Path()
|
||||
}
|
||||
|
||||
func (p *S3Path) Remove() error {
|
||||
request := &s3.DeleteObjectInput{}
|
||||
request.Bucket = aws.String(p.bucket)
|
||||
request.Key = aws.String(p.key)
|
||||
|
||||
_, err := p.client.DeleteObject(request)
|
||||
if err != nil {
|
||||
// TODO: Check for not-exists, return os.NotExist
|
||||
|
||||
return fmt.Errorf("error deleting %s: %v", p, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *S3Path) Join(relativePath ...string) Path {
|
||||
args := []string{p.key}
|
||||
args = append(args, relativePath...)
|
||||
|
|
@ -43,6 +76,8 @@ func (p *S3Path) Join(relativePath ...string) Path {
|
|||
}
|
||||
|
||||
func (p *S3Path) WriteFile(data []byte) error {
|
||||
glog.V(4).Infof("Writing file %q", p)
|
||||
|
||||
request := &s3.PutObjectInput{}
|
||||
request.Body = bytes.NewReader(data)
|
||||
request.Bucket = aws.String(p.bucket)
|
||||
|
|
@ -55,7 +90,7 @@ func (p *S3Path) WriteFile(data []byte) error {
|
|||
return fmt.Errorf("error writing %s: %v", p, err)
|
||||
}
|
||||
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
// To prevent concurrent creates on the same file while maintaining atomicity of writes,
|
||||
|
|
@ -82,13 +117,17 @@ func (p *S3Path) CreateFile(data []byte) error {
|
|||
}
|
||||
|
||||
func (p *S3Path) ReadFile() ([]byte, error) {
|
||||
glog.V(4).Infof("Reading file %q", p)
|
||||
|
||||
request := &s3.GetObjectInput{}
|
||||
request.Bucket = aws.String(p.bucket)
|
||||
request.Key = aws.String(p.key)
|
||||
|
||||
response, err := p.client.GetObject(request)
|
||||
if err != nil {
|
||||
// TODO: If not found, return os.NotExist
|
||||
if AWSErrorCode(err) == "NoSuchKey" {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return nil, fmt.Errorf("error fetching %s: %v", p, err)
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
|
@ -101,9 +140,13 @@ func (p *S3Path) ReadFile() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (p *S3Path) ReadDir() ([]Path, error) {
|
||||
prefix := p.key
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
request := &s3.ListObjectsInput{}
|
||||
request.Bucket = aws.String(p.bucket)
|
||||
request.Prefix = aws.String(p.key)
|
||||
request.Prefix = aws.String(prefix)
|
||||
request.Delimiter = aws.String("/")
|
||||
|
||||
var paths []Path
|
||||
|
|
@ -114,6 +157,34 @@ func (p *S3Path) ReadDir() ([]Path, error) {
|
|||
client: p.client,
|
||||
bucket: p.bucket,
|
||||
key: key,
|
||||
etag: o.ETag,
|
||||
}
|
||||
paths = append(paths, child)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing %s: %v", p, err)
|
||||
}
|
||||
glog.V(8).Infof("Listed files in %v: %v", p, paths)
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (p *S3Path) ReadTree() ([]Path, error) {
|
||||
request := &s3.ListObjectsInput{}
|
||||
request.Bucket = aws.String(p.bucket)
|
||||
request.Prefix = aws.String(p.key)
|
||||
// No delimiter for recursive search
|
||||
|
||||
var paths []Path
|
||||
err := p.client.ListObjectsPages(request, func(page *s3.ListObjectsOutput, lastPage bool) bool {
|
||||
for _, o := range page.Contents {
|
||||
key := aws.StringValue(o.Key)
|
||||
child := &S3Path{
|
||||
client: p.client,
|
||||
bucket: p.bucket,
|
||||
key: key,
|
||||
etag: o.ETag,
|
||||
}
|
||||
paths = append(paths, child)
|
||||
}
|
||||
|
|
@ -128,3 +199,34 @@ func (p *S3Path) ReadDir() ([]Path, error) {
|
|||
func (p *S3Path) Base() string {
|
||||
return path.Base(p.key)
|
||||
}
|
||||
|
||||
func (p *S3Path) PreferredHash() (*hashing.Hash, error) {
|
||||
return p.Hash(hashing.HashAlgorithmMD5)
|
||||
}
|
||||
|
||||
func (p *S3Path) Hash(a hashing.HashAlgorithm) (*hashing.Hash, error) {
|
||||
if a != hashing.HashAlgorithmMD5 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if p.etag == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
md5 := strings.Trim(*p.etag, "\"")
|
||||
|
||||
md5Bytes, err := hex.DecodeString(md5)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Etag was not a valid MD5 sum: %q", *p.etag)
|
||||
}
|
||||
|
||||
return &hashing.Hash{Algorithm: hashing.HashAlgorithmMD5, HashValue: md5Bytes}, nil
|
||||
}
|
||||
|
||||
// AWSErrorCode returns the aws error code, if it is an awserr.Error, otherwise ""
|
||||
func AWSErrorCode(err error) string {
|
||||
if awsError, ok := err.(awserr.Error); ok {
|
||||
return awsError.Code()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,10 +66,32 @@ func (p *SSHPath) newClient() (*sftp.Client, error) {
|
|||
}
|
||||
|
||||
}
|
||||
func (p *SSHPath) String() string {
|
||||
func (p *SSHPath) Path() string {
|
||||
return "ssh://" + p.server + p.path
|
||||
}
|
||||
|
||||
func (p *SSHPath) String() string {
|
||||
return p.Path()
|
||||
}
|
||||
|
||||
func (p *SSHPath) Remove() error {
|
||||
sftpClient, err := p.newClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer sftpClient.Close()
|
||||
|
||||
err = sftpClient.Remove(p.path)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("error deleting %s: %v", p, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SSHPath) Join(relativePath ...string) Path {
|
||||
args := []string{p.path}
|
||||
args = append(args, relativePath...)
|
||||
|
|
@ -216,7 +238,41 @@ func (p *SSHPath) ReadDir() ([]Path, error) {
|
|||
children = append(children, child)
|
||||
}
|
||||
return children, nil
|
||||
}
|
||||
|
||||
func (p *SSHPath) ReadTree() ([]Path, error) {
|
||||
sftpClient, err := p.newClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer sftpClient.Close()
|
||||
|
||||
var paths []Path
|
||||
err = readSFTPTree(sftpClient, p, &paths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func readSFTPTree(sftpClient *sftp.Client, p *SSHPath, dest *[]Path) error {
|
||||
files, err := sftpClient.ReadDir(p.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, f := range files {
|
||||
child := NewSSHPath(p.client, p.server, path.Join(p.path, f.Name()), p.sudo)
|
||||
|
||||
*dest = append(*dest, child)
|
||||
|
||||
if f.IsDir() {
|
||||
err = readSFTPTree(sftpClient, child, dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *SSHPath) Base() string {
|
||||
|
|
|
|||
|
|
@ -1,5 +1,12 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Yet another VFS package
|
||||
// If there's a "winning" VFS implementation in go, we should switch to it!
|
||||
|
||||
|
|
@ -14,8 +21,58 @@ type Path interface {
|
|||
// CreateFile writes the file contents, but only if the file does not already exist
|
||||
CreateFile(data []byte) error
|
||||
|
||||
// Remove deletes the file
|
||||
Remove() error
|
||||
|
||||
// Base returns the base name (last element)
|
||||
Base() string
|
||||
|
||||
// Path returns a string representing the full path
|
||||
Path() string
|
||||
|
||||
// ReadDir lists the files in a particular Pathss
|
||||
ReadDir() ([]Path, error)
|
||||
|
||||
// ReadTree lists all files in the subtree rooted at the current Path
|
||||
ReadTree() ([]Path, error)
|
||||
}
|
||||
|
||||
type HasHash interface {
|
||||
// Returns the hash of the file contents, with the preferred hash algorithm
|
||||
PreferredHash() (*hashing.Hash, error)
|
||||
|
||||
// Gets the hash, or nil if the hash cannot be (easily) computed
|
||||
Hash(algorithm hashing.HashAlgorithm) (*hashing.Hash, error)
|
||||
}
|
||||
|
||||
func RelativePath(base Path, child Path) (string, error) {
|
||||
basePath := base.Path()
|
||||
childPath := child.Path()
|
||||
if !strings.HasSuffix(basePath, "/") {
|
||||
basePath += "/"
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(basePath, childPath) {
|
||||
return "", fmt.Errorf("Path %q is not a child of %q", child, base)
|
||||
}
|
||||
|
||||
relativePath := childPath[len(basePath):]
|
||||
return relativePath, nil
|
||||
}
|
||||
|
||||
func IsClusterReadable(p Path) bool {
|
||||
switch p.(type) {
|
||||
case *S3Path:
|
||||
return true
|
||||
|
||||
case *SSHPath:
|
||||
return false
|
||||
|
||||
case *FSPath:
|
||||
return false
|
||||
|
||||
default:
|
||||
glog.Fatalf("IsClusterReadable not implemented for type %T", p)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,199 @@
|
|||
package vfs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/hashing"
|
||||
"os"
|
||||
)
|
||||
|
||||
// VFSScan scans a source Path for changes files
|
||||
type VFSScan struct {
|
||||
Base Path
|
||||
hashes map[string]*hashing.Hash
|
||||
}
|
||||
|
||||
func NewVFSScan(base Path) *VFSScan {
|
||||
return &VFSScan{Base: base}
|
||||
}
|
||||
|
||||
type ChangeType string
|
||||
|
||||
const ChangeType_Added ChangeType = "ADDED"
|
||||
const ChangeType_Removed ChangeType = "REMOVED"
|
||||
const ChangeType_Modified ChangeType = "MODIFIED"
|
||||
|
||||
type Change struct {
|
||||
ChangeType ChangeType
|
||||
Path Path
|
||||
Hash *hashing.Hash
|
||||
}
|
||||
|
||||
// Scans for changes files. On the first call will return all files as ChangeType_Added.
|
||||
// On subsequent calls will return any changed files (using their hashes)
|
||||
func (v *VFSScan) Scan() ([]Change, error) {
|
||||
allFiles, err := v.Base.ReadTree()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading dir %q: %v", v.Base, err)
|
||||
}
|
||||
|
||||
files := make(map[string]Path)
|
||||
hashes := make(map[string]*hashing.Hash)
|
||||
for _, f := range allFiles {
|
||||
key := f.Path()
|
||||
files[key] = f
|
||||
hasHash, ok := f.(HasHash)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Source must support hashing: %T", f)
|
||||
}
|
||||
hash, err := hasHash.PreferredHash()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error hashing %q: %v", key, err)
|
||||
}
|
||||
|
||||
hashes[key] = hash
|
||||
}
|
||||
|
||||
if v.hashes == nil {
|
||||
v.hashes = hashes
|
||||
var changes []Change
|
||||
for k, f := range files {
|
||||
hash := hashes[k]
|
||||
changes = append(changes, Change{ChangeType: ChangeType_Added, Path: f, Hash: hash})
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
var changes []Change
|
||||
for k, f := range files {
|
||||
oldHash := v.hashes[k]
|
||||
newHash := hashes[k]
|
||||
|
||||
if oldHash == nil {
|
||||
changes = append(changes, Change{ChangeType: ChangeType_Added, Path: f, Hash: newHash})
|
||||
} else if !oldHash.Equal(newHash) {
|
||||
changes = append(changes, Change{ChangeType: ChangeType_Modified, Path: f, Hash: newHash})
|
||||
}
|
||||
}
|
||||
|
||||
for k := range v.hashes {
|
||||
newHash := hashes[k]
|
||||
f := files[k]
|
||||
if newHash == nil {
|
||||
changes = append(changes, Change{ChangeType: ChangeType_Removed, Path: f, Hash: newHash})
|
||||
}
|
||||
}
|
||||
|
||||
v.hashes = hashes
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func SyncDir(src *VFSScan, destBase Path) error {
|
||||
changes, err := src.Scan()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error scanning source dir %q: %v", src, err)
|
||||
}
|
||||
|
||||
for _, change := range changes {
|
||||
f := change.Path
|
||||
|
||||
relativePath, err := RelativePath(f, src.Base)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
destFile := destBase.Join(relativePath)
|
||||
|
||||
switch change.ChangeType {
|
||||
case ChangeType_Removed:
|
||||
err := destFile.Remove()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error removing file %q: %v", destFile, err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
||||
case ChangeType_Modified, ChangeType_Added:
|
||||
break
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown change type: %q", change.ChangeType)
|
||||
}
|
||||
|
||||
hashMatch, err := hashesMatch(f, destFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hashMatch {
|
||||
glog.V(2).Infof("File hashes match: %s and %s", f, destFile)
|
||||
continue
|
||||
}
|
||||
|
||||
srcData, err := f.ReadFile()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading source file %q: %v", f, err)
|
||||
}
|
||||
|
||||
destData, err := destFile.ReadFile()
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("error reading dest file %q: %v", f, err)
|
||||
}
|
||||
}
|
||||
|
||||
if destData == nil || !bytes.Equal(srcData, destData) {
|
||||
glog.V(2).Infof("Copying data from %s to %s", f, destFile)
|
||||
err = destFile.WriteFile(srcData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing dest file %q: %v", f, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func hashesMatch(src, dest Path) (bool, error) {
|
||||
sh, ok := src.(HasHash)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
dh, ok := dest.(HasHash)
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
{
|
||||
srcHash, err := sh.PreferredHash()
|
||||
if err != nil {
|
||||
glog.Warningf("error getting hash of source file %s: %v", src, err)
|
||||
} else if srcHash != nil {
|
||||
destHash, err := dh.Hash(srcHash.Algorithm)
|
||||
if err != nil {
|
||||
glog.Warningf("error comparing hash of dest file %s: %v", dest, err)
|
||||
} else if destHash != nil {
|
||||
return destHash.Equal(srcHash), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
destHash, err := dh.PreferredHash()
|
||||
if err != nil {
|
||||
glog.Warningf("error getting hash of dest file %s: %v", src, err)
|
||||
} else if destHash != nil {
|
||||
srcHash, err := dh.Hash(destHash.Algorithm)
|
||||
if err != nil {
|
||||
glog.Warningf("error comparing hash of src file %s: %v", dest, err)
|
||||
} else if srcHash != nil {
|
||||
return srcHash.Equal(destHash), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
glog.Infof("No compatible hash: %s and %s", src, dest)
|
||||
return false, nil
|
||||
}
|
||||
|
|
@ -9,15 +9,17 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kube-deploy/upup/pkg/fi/vfs"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type VFSCAStore struct {
|
||||
dryrun bool
|
||||
basedir vfs.Path
|
||||
caCertificate *Certificate
|
||||
caPrivateKey *PrivateKey
|
||||
caCertificates *certificates
|
||||
caPrivateKeys *privateKeys
|
||||
}
|
||||
|
||||
var _ CAStore = &VFSCAStore{}
|
||||
|
|
@ -35,23 +37,22 @@ func NewVFSCAStore(basedir vfs.Path, dryrun bool) (CAStore, error) {
|
|||
//if err != nil {
|
||||
// return nil, fmt.Errorf("error creating directory: %v", err)
|
||||
//}
|
||||
caCertificate, err := c.loadCertificate(basedir.Join("ca.crt"))
|
||||
caCertificates, err := c.loadCertificates(c.buildCertificatePoolPath(CertificateId_CA))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if caCertificate != nil {
|
||||
privateKeyPath := basedir.Join("private", "ca.key")
|
||||
caPrivateKey, err := c.loadPrivateKey(privateKeyPath)
|
||||
if caCertificates != nil {
|
||||
caPrivateKeys, err := c.loadPrivateKeys(c.buildPrivateKeyPoolPath(CertificateId_CA))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if caPrivateKey == nil {
|
||||
glog.Warningf("CA private key was not found %q", privateKeyPath)
|
||||
if caPrivateKeys == nil {
|
||||
glog.Warningf("CA private key was not found")
|
||||
//return nil, fmt.Errorf("error loading CA private key - key not found")
|
||||
}
|
||||
c.caCertificate = caCertificate
|
||||
c.caPrivateKey = caPrivateKey
|
||||
c.caCertificates = caCertificates
|
||||
c.caPrivateKeys = caPrivateKeys
|
||||
} else {
|
||||
err := c.generateCACertificate()
|
||||
if err != nil {
|
||||
|
|
@ -61,10 +62,15 @@ func NewVFSCAStore(basedir vfs.Path, dryrun bool) (CAStore, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (s *VFSCAStore) VFSPath() vfs.Path {
|
||||
return s.basedir
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) generateCACertificate() error {
|
||||
subject := &pkix.Name{
|
||||
CommonName: "kubernetes",
|
||||
}
|
||||
serial := c.buildSerial()
|
||||
template := &x509.Certificate{
|
||||
Subject: *subject,
|
||||
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
|
||||
|
|
@ -85,43 +91,121 @@ func (c *VFSCAStore) generateCACertificate() error {
|
|||
return err
|
||||
}
|
||||
|
||||
keyPath := c.basedir.Join("private", "ca.key")
|
||||
keyPath := c.buildPrivateKeyPath(CertificateId_CA, serial)
|
||||
err = c.storePrivateKey(caPrivateKey, keyPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certPath := c.basedir.Join("ca.crt")
|
||||
// Make double-sure it round-trips
|
||||
privateKeys, err := c.loadPrivateKeys(c.buildPrivateKeyPoolPath(CertificateId_CA))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if privateKeys == nil || privateKeys.primary != serial.Text(10) {
|
||||
return fmt.Errorf("failed to round-trip CA private key")
|
||||
}
|
||||
|
||||
certPath := c.buildCertificatePath(CertificateId_CA, serial)
|
||||
err = c.storeCertificate(caCertificate, certPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make double-sure it round-trips
|
||||
caCertificate, err = c.loadCertificate(certPath)
|
||||
certificates, err := c.loadCertificates(c.buildCertificatePoolPath(CertificateId_CA))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.caPrivateKey = caPrivateKey
|
||||
c.caCertificate = caCertificate
|
||||
if certificates == nil || certificates.primary != serial.Text(10) {
|
||||
return fmt.Errorf("failed to round-trip CA certifiacate")
|
||||
}
|
||||
|
||||
c.caPrivateKeys = privateKeys
|
||||
c.caCertificates = certificates
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) buildCertificatePath(id string) vfs.Path {
|
||||
return c.basedir.Join("issued", id+".crt")
|
||||
func (c *VFSCAStore) buildCertificatePoolPath(id string) vfs.Path {
|
||||
return c.basedir.Join("issued", id)
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) buildPrivateKeyPath(id string) vfs.Path {
|
||||
return c.basedir.Join("private", id+".key")
|
||||
func (c *VFSCAStore) buildCertificatePath(id string, serial *big.Int) vfs.Path {
|
||||
return c.basedir.Join("issued", id, serial.Text(10)+".crt")
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadCertificate(p vfs.Path) (*Certificate, error) {
|
||||
func (c *VFSCAStore) buildPrivateKeyPoolPath(id string) vfs.Path {
|
||||
return c.basedir.Join("private", id)
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) buildPrivateKeyPath(id string, serial *big.Int) vfs.Path {
|
||||
return c.basedir.Join("private", id, serial.Text(10)+".key")
|
||||
}
|
||||
|
||||
type certificates struct {
|
||||
certificates map[string]*Certificate
|
||||
primary string
|
||||
}
|
||||
|
||||
func (p *certificates) Primary() *Certificate {
|
||||
if p.primary == "" {
|
||||
return nil
|
||||
}
|
||||
return p.certificates[p.primary]
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadCertificates(p vfs.Path) (*certificates, error) {
|
||||
files, err := p.ReadDir()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
certs := &certificates{
|
||||
certificates: make(map[string]*Certificate),
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
cert, err := c.loadOneCertificate(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading certificate %q: %v", f, err)
|
||||
}
|
||||
name := f.Base()
|
||||
name = strings.TrimSuffix(name, ".crt")
|
||||
certs.certificates[name] = cert
|
||||
}
|
||||
|
||||
if len(certs.certificates) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var primaryVersion *big.Int
|
||||
for k := range certs.certificates {
|
||||
version, ok := big.NewInt(0).SetString(k, 10)
|
||||
if !ok {
|
||||
glog.Warningf("Ignoring certificate with non-integer version: %q", k)
|
||||
continue
|
||||
}
|
||||
|
||||
if primaryVersion == nil || version.Cmp(primaryVersion) > 0 {
|
||||
certs.primary = k
|
||||
primaryVersion = version
|
||||
}
|
||||
}
|
||||
|
||||
return certs, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadOneCertificate(p vfs.Path) (*Certificate, error) {
|
||||
data, err := p.ReadFile()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
cert, err := LoadPEMCertificate(data)
|
||||
if err != nil {
|
||||
|
|
@ -140,32 +224,78 @@ func (c *VFSCAStore) Cert(id string) (*Certificate, error) {
|
|||
glog.Warningf("using empty certificate, because --dryrun specified")
|
||||
return &Certificate{}, err
|
||||
}
|
||||
return nil, fmt.Errorf("cannot find cert %q", id)
|
||||
return nil, fmt.Errorf("cannot find certificate %q", id)
|
||||
}
|
||||
return cert, err
|
||||
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) CertificatePool(id string) (*CertificatePool, error) {
|
||||
cert, err := c.FindCertificatePool(id)
|
||||
if err == nil && cert == nil {
|
||||
if c.dryrun {
|
||||
glog.Warningf("using empty certificate, because --dryrun specified")
|
||||
return &CertificatePool{}, err
|
||||
}
|
||||
return nil, fmt.Errorf("cannot find certificate pool %q", id)
|
||||
}
|
||||
return cert, err
|
||||
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) FindCert(id string) (*Certificate, error) {
|
||||
var cert *Certificate
|
||||
var certs *certificates
|
||||
|
||||
if id == CertificateId_CA {
|
||||
cert = c.caCertificate
|
||||
certs = c.caCertificates
|
||||
} else {
|
||||
var err error
|
||||
p := c.buildCertificatePath(id)
|
||||
cert, err = c.loadCertificate(p)
|
||||
p := c.buildCertificatePoolPath(id)
|
||||
certs, err = c.loadCertificates(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var cert *Certificate
|
||||
if certs != nil && certs.primary != "" {
|
||||
cert = certs.certificates[certs.primary]
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) FindCertificatePool(id string) (*CertificatePool, error) {
|
||||
var certs *certificates
|
||||
|
||||
if id == CertificateId_CA {
|
||||
certs = c.caCertificates
|
||||
} else {
|
||||
var err error
|
||||
p := c.buildCertificatePoolPath(id)
|
||||
certs, err = c.loadCertificates(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
pool := &CertificatePool{}
|
||||
|
||||
if certs != nil {
|
||||
pool.Primary = certs.Primary()
|
||||
|
||||
for k, cert := range certs.certificates {
|
||||
if k == certs.primary {
|
||||
continue
|
||||
}
|
||||
pool.Secondary = append(pool.Secondary, cert)
|
||||
}
|
||||
}
|
||||
return pool, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) List() ([]string, error) {
|
||||
var ids []string
|
||||
if c.caCertificate != nil {
|
||||
ids = append(ids, "ca")
|
||||
}
|
||||
|
||||
issuedDir := c.basedir.Join("issued")
|
||||
files, err := issuedDir.ReadDir()
|
||||
|
|
@ -175,21 +305,22 @@ func (c *VFSCAStore) List() ([]string, error) {
|
|||
|
||||
for _, f := range files {
|
||||
name := f.Base()
|
||||
name = strings.TrimSuffix(name, ".crt")
|
||||
ids = append(ids, name)
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) IssueCert(id string, privateKey *PrivateKey, template *x509.Certificate) (*Certificate, error) {
|
||||
func (c *VFSCAStore) IssueCert(id string, serial *big.Int, privateKey *PrivateKey, template *x509.Certificate) (*Certificate, error) {
|
||||
glog.Infof("Issuing new certificate: %q", id)
|
||||
|
||||
p := c.buildCertificatePath(id)
|
||||
template.SerialNumber = serial
|
||||
|
||||
if c.caPrivateKey == nil {
|
||||
p := c.buildCertificatePath(id, serial)
|
||||
|
||||
if c.caPrivateKeys == nil || c.caPrivateKeys.Primary() == nil {
|
||||
return nil, fmt.Errorf("ca.key was not found; cannot issue certificates")
|
||||
}
|
||||
cert, err := SignNewCertificate(privateKey, template, c.caCertificate.Certificate, c.caPrivateKey)
|
||||
cert, err := SignNewCertificate(privateKey, template, c.caCertificates.Primary().Certificate, c.caPrivateKeys.Primary())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -200,20 +331,103 @@ func (c *VFSCAStore) IssueCert(id string, privateKey *PrivateKey, template *x509
|
|||
}
|
||||
|
||||
// Make double-sure it round-trips
|
||||
return c.loadCertificate(p)
|
||||
return c.loadOneCertificate(p)
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadPrivateKey(p vfs.Path) (*PrivateKey, error) {
|
||||
func (c *VFSCAStore) AddCert(id string, cert *Certificate) error {
|
||||
glog.Infof("Issuing new certificate: %q", id)
|
||||
|
||||
// We add with a timestamp of zero so this will never be the newest cert
|
||||
serial := buildSerial(0)
|
||||
|
||||
p := c.buildCertificatePath(id, serial)
|
||||
|
||||
err := c.storeCertificate(cert, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make double-sure it round-trips
|
||||
_, err = c.loadOneCertificate(p)
|
||||
return err
|
||||
}
|
||||
|
||||
type privateKeys struct {
|
||||
keys map[string]*PrivateKey
|
||||
primary string
|
||||
}
|
||||
|
||||
func (p *privateKeys) Primary() *PrivateKey {
|
||||
if p.primary == "" {
|
||||
return nil
|
||||
}
|
||||
return p.keys[p.primary]
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadPrivateKeys(p vfs.Path) (*privateKeys, error) {
|
||||
files, err := p.ReadDir()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keys := &privateKeys{
|
||||
keys: make(map[string]*PrivateKey),
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
key, err := c.loadOnePrivateKey(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading private key %q: %v", f, err)
|
||||
}
|
||||
name := f.Base()
|
||||
name = strings.TrimSuffix(name, ".key")
|
||||
keys.keys[name] = key
|
||||
}
|
||||
|
||||
if len(keys.keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var primaryVersion *big.Int
|
||||
for k := range keys.keys {
|
||||
version, ok := big.NewInt(0).SetString(k, 10)
|
||||
if !ok {
|
||||
glog.Warningf("Ignoring private key with non-integer version: %q", k)
|
||||
continue
|
||||
}
|
||||
|
||||
if primaryVersion == nil || version.Cmp(primaryVersion) > 0 {
|
||||
keys.primary = k
|
||||
primaryVersion = version
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) loadOnePrivateKey(p vfs.Path) (*PrivateKey, error) {
|
||||
data, err := p.ReadFile()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
k, err := parsePEMPrivateKey(data)
|
||||
k, err := ParsePEMPrivateKey(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing private key from %q: %v", p, err)
|
||||
}
|
||||
return k, err
|
||||
}
|
||||
|
||||
func ParsePEMPrivateKey(data []byte) (*PrivateKey, error) {
|
||||
k, err := parsePEMPrivateKey(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -221,16 +435,22 @@ func (c *VFSCAStore) loadPrivateKey(p vfs.Path) (*PrivateKey, error) {
|
|||
}
|
||||
|
||||
func (c *VFSCAStore) FindPrivateKey(id string) (*PrivateKey, error) {
|
||||
var key *PrivateKey
|
||||
var keys *privateKeys
|
||||
if id == CertificateId_CA {
|
||||
key = c.caPrivateKey
|
||||
keys = c.caPrivateKeys
|
||||
} else {
|
||||
var err error
|
||||
p := c.buildPrivateKeyPath(id)
|
||||
key, err = c.loadPrivateKey(p)
|
||||
p := c.buildPrivateKeyPoolPath(id)
|
||||
keys, err = c.loadPrivateKeys(p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var key *PrivateKey
|
||||
if keys != nil && keys.primary != "" {
|
||||
key = keys.keys[keys.primary]
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
|
@ -248,8 +468,25 @@ func (c *VFSCAStore) PrivateKey(id string) (*PrivateKey, error) {
|
|||
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) CreatePrivateKey(id string) (*PrivateKey, error) {
|
||||
p := c.buildPrivateKeyPath(id)
|
||||
func (c *VFSCAStore) CreateKeypair(id string, template *x509.Certificate) (*Certificate, *PrivateKey, error) {
|
||||
serial := c.buildSerial()
|
||||
|
||||
privateKey, err := c.CreatePrivateKey(id, serial)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cert, err := c.IssueCert(id, serial, privateKey, template)
|
||||
if err != nil {
|
||||
// TODO: Delete cert?
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return cert, privateKey, nil
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) CreatePrivateKey(id string, serial *big.Int) (*PrivateKey, error) {
|
||||
p := c.buildPrivateKeyPath(id, serial)
|
||||
|
||||
rsaKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||
if err != nil {
|
||||
|
|
@ -285,3 +522,22 @@ func (c *VFSCAStore) storeCertificate(cert *Certificate, p vfs.Path) error {
|
|||
|
||||
return p.WriteFile(data.Bytes())
|
||||
}
|
||||
|
||||
func (c *VFSCAStore) buildSerial() *big.Int {
|
||||
t := time.Now().UnixNano()
|
||||
return buildSerial(t)
|
||||
}
|
||||
|
||||
func buildSerial(timestamp int64) *big.Int {
|
||||
randomLimit := new(big.Int).Lsh(big.NewInt(1), 32)
|
||||
randomComponent, err := crypto_rand.Int(crypto_rand.Reader, randomLimit)
|
||||
if err != nil {
|
||||
glog.Fatalf("error generating random number: %v", err)
|
||||
}
|
||||
|
||||
serial := big.NewInt(timestamp)
|
||||
serial.Lsh(serial, 32)
|
||||
serial.Or(serial, randomComponent)
|
||||
|
||||
return serial
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@ func NewVFSSecretStore(basedir vfs.Path) (SecretStore, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
func (s *VFSSecretStore) VFSPath() vfs.Path {
|
||||
return s.basedir
|
||||
}
|
||||
|
||||
func (c *VFSSecretStore) buildSecretPath(id string) vfs.Path {
|
||||
return c.basedir.Join(id)
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue