mirror of https://github.com/kubernetes/kops.git
				
				
				
			Merge remote-tracking branch 'upstream/master' into update_aws-sdk
This commit is contained in:
		
						commit
						498e3b29d7
					
				
							
								
								
									
										2
									
								
								OWNERS
								
								
								
								
							
							
						
						
									
										2
									
								
								OWNERS
								
								
								
								
							| 
						 | 
					@ -5,3 +5,5 @@ approvers:
 | 
				
			||||||
  - zmerlynn
 | 
					  - zmerlynn
 | 
				
			||||||
  - andrewsykim
 | 
					  - andrewsykim
 | 
				
			||||||
  - geojaz
 | 
					  - geojaz
 | 
				
			||||||
 | 
					  - kashifsaadat
 | 
				
			||||||
 | 
					  - gambol99
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,7 +32,7 @@ import (
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var (
 | 
					var (
 | 
				
			||||||
	create_secret_dockerconfig_long = templates.LongDesc(i18n.T(`
 | 
						create_secret_dockerconfig_long = templates.LongDesc(i18n.T(`
 | 
				
			||||||
	Create a new docker config, and store it in the state store. 
 | 
						Create a new docker config, and store it in the state store.
 | 
				
			||||||
	Used to configure docker on each master or node (ie. for auth)
 | 
						Used to configure docker on each master or node (ie. for auth)
 | 
				
			||||||
	Use update to modify it, this command will only create a new entry.`))
 | 
						Use update to modify it, this command will only create a new entry.`))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -40,6 +40,9 @@ var (
 | 
				
			||||||
	# Create an new docker config.
 | 
						# Create an new docker config.
 | 
				
			||||||
	kops create secret dockerconfig -f /path/to/docker/config.json \
 | 
						kops create secret dockerconfig -f /path/to/docker/config.json \
 | 
				
			||||||
		--name k8s-cluster.example.com --state s3://example.com
 | 
							--name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
 | 
						# Replace an existing docker config secret.
 | 
				
			||||||
 | 
						kops create secret dockerconfig -f /path/to/docker/config.json --force \
 | 
				
			||||||
 | 
							--name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
	`))
 | 
						`))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	create_secret_dockerconfig_short = i18n.T(`Create a docker config.`)
 | 
						create_secret_dockerconfig_short = i18n.T(`Create a docker config.`)
 | 
				
			||||||
| 
						 | 
					@ -48,6 +51,7 @@ var (
 | 
				
			||||||
type CreateSecretDockerConfigOptions struct {
 | 
					type CreateSecretDockerConfigOptions struct {
 | 
				
			||||||
	ClusterName      string
 | 
						ClusterName      string
 | 
				
			||||||
	DockerConfigPath string
 | 
						DockerConfigPath string
 | 
				
			||||||
 | 
						Force            bool
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Command {
 | 
					func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Command {
 | 
				
			||||||
| 
						 | 
					@ -78,6 +82,7 @@ func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Comma
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd.Flags().StringVarP(&options.DockerConfigPath, "", "f", "", "Path to docker config JSON file")
 | 
						cmd.Flags().StringVarP(&options.DockerConfigPath, "", "f", "", "Path to docker config JSON file")
 | 
				
			||||||
 | 
						cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cmd
 | 
						return cmd
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -119,9 +124,19 @@ func RunCreateSecretDockerConfig(f *util.Factory, out io.Writer, options *Create
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	secret.Data = data
 | 
						secret.Data = data
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	_, _, err = secretStore.GetOrCreateSecret("dockerconfig", secret)
 | 
						if !options.Force {
 | 
				
			||||||
	if err != nil {
 | 
							_, created, err := secretStore.GetOrCreateSecret("dockerconfig", secret)
 | 
				
			||||||
		return fmt.Errorf("error adding docker config secret: %v", err)
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return fmt.Errorf("error adding dockerconfig secret: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if !created {
 | 
				
			||||||
 | 
								return fmt.Errorf("failed to create the dockerconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err := secretStore.ReplaceSecret("dockerconfig", secret)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return fmt.Errorf("error updating dockerconfig secret: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -40,6 +40,9 @@ var (
 | 
				
			||||||
	# Create a new encryption config.
 | 
						# Create a new encryption config.
 | 
				
			||||||
	kops create secret encryptionconfig -f config.yaml \
 | 
						kops create secret encryptionconfig -f config.yaml \
 | 
				
			||||||
		--name k8s-cluster.example.com --state s3://example.com
 | 
							--name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
 | 
						# Replace an existing encryption config secret.
 | 
				
			||||||
 | 
						kops create secret encryptionconfig -f config.yaml --force \
 | 
				
			||||||
 | 
							--name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
	`))
 | 
						`))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	create_secret_encryptionconfig_short = i18n.T(`Create an encryption config.`)
 | 
						create_secret_encryptionconfig_short = i18n.T(`Create an encryption config.`)
 | 
				
			||||||
| 
						 | 
					@ -48,6 +51,7 @@ var (
 | 
				
			||||||
type CreateSecretEncryptionConfigOptions struct {
 | 
					type CreateSecretEncryptionConfigOptions struct {
 | 
				
			||||||
	ClusterName          string
 | 
						ClusterName          string
 | 
				
			||||||
	EncryptionConfigPath string
 | 
						EncryptionConfigPath string
 | 
				
			||||||
 | 
						Force                bool
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.Command {
 | 
					func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.Command {
 | 
				
			||||||
| 
						 | 
					@ -78,6 +82,7 @@ func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.C
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	cmd.Flags().StringVarP(&options.EncryptionConfigPath, "", "f", "", "Path to encryption config yaml file")
 | 
						cmd.Flags().StringVarP(&options.EncryptionConfigPath, "", "f", "", "Path to encryption config yaml file")
 | 
				
			||||||
 | 
						cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return cmd
 | 
						return cmd
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -120,9 +125,19 @@ func RunCreateSecretEncryptionConfig(f *util.Factory, out io.Writer, options *Cr
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	secret.Data = data
 | 
						secret.Data = data
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	_, _, err = secretStore.GetOrCreateSecret("encryptionconfig", secret)
 | 
						if !options.Force {
 | 
				
			||||||
	if err != nil {
 | 
							_, created, err := secretStore.GetOrCreateSecret("encryptionconfig", secret)
 | 
				
			||||||
		return fmt.Errorf("error adding encryption config secret: %v", err)
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return fmt.Errorf("error adding encryptionconfig secret: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if !created {
 | 
				
			||||||
 | 
								return fmt.Errorf("failed to create the encryptionconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							_, err := secretStore.ReplaceSecret("encryptionconfig", secret)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return fmt.Errorf("error updating encryptionconfig secret: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return nil
 | 
						return nil
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -20,6 +20,7 @@ import (
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
	"io"
 | 
						"io"
 | 
				
			||||||
	"os"
 | 
						"os"
 | 
				
			||||||
 | 
						"runtime"
 | 
				
			||||||
	"strings"
 | 
						"strings"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"github.com/golang/glog"
 | 
						"github.com/golang/glog"
 | 
				
			||||||
| 
						 | 
					@ -31,10 +32,18 @@ import (
 | 
				
			||||||
	"k8s.io/kops/cmd/kops/util"
 | 
						"k8s.io/kops/cmd/kops/util"
 | 
				
			||||||
	api "k8s.io/kops/pkg/apis/kops"
 | 
						api "k8s.io/kops/pkg/apis/kops"
 | 
				
			||||||
	apiutil "k8s.io/kops/pkg/apis/kops/util"
 | 
						apiutil "k8s.io/kops/pkg/apis/kops/util"
 | 
				
			||||||
 | 
						"k8s.io/kops/pkg/dns"
 | 
				
			||||||
	"k8s.io/kops/pkg/validation"
 | 
						"k8s.io/kops/pkg/validation"
 | 
				
			||||||
	"k8s.io/kops/util/pkg/tables"
 | 
						"k8s.io/kops/util/pkg/tables"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func init() {
 | 
				
			||||||
 | 
						if runtime.GOOS == "darwin" {
 | 
				
			||||||
 | 
							// In order for  net.LookupHost(apiAddr.Host) to lookup our placeholder address on darwin, we have to
 | 
				
			||||||
 | 
							os.Setenv("GODEBUG", "netdns=go")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type ValidateClusterOptions struct {
 | 
					type ValidateClusterOptions struct {
 | 
				
			||||||
	// No options yet
 | 
						// No options yet
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -105,6 +114,25 @@ func RunValidateCluster(f *util.Factory, cmd *cobra.Command, args []string, out
 | 
				
			||||||
		return fmt.Errorf("Cannot build kube api client for %q: %v\n", contextName, err)
 | 
							return fmt.Errorf("Cannot build kube api client for %q: %v\n", contextName, err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Do not use if we are running gossip
 | 
				
			||||||
 | 
						if !dns.IsGossipHostname(cluster.ObjectMeta.Name) {
 | 
				
			||||||
 | 
							hasPlaceHolderIPAddress, err := validation.HasPlaceHolderIP(contextName)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return err
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							if hasPlaceHolderIPAddress {
 | 
				
			||||||
 | 
								fmt.Println(
 | 
				
			||||||
 | 
									"Validation Failed\n\n" +
 | 
				
			||||||
 | 
										"The dns-controller Kubernetes deployment has not updated the Kubernetes cluster's API DNS entry to the correct IP address." +
 | 
				
			||||||
 | 
										"  The API DNS IP address is the placeholder address that kops creates: 203.0.113.123." +
 | 
				
			||||||
 | 
										"  Please wait about 5-10 minutes for a master to start, dns-controller to launch, and DNS to propagate." +
 | 
				
			||||||
 | 
										"  The protokube container and dns-controller deployment logs may contain more diagnostic information." +
 | 
				
			||||||
 | 
										"  Etcd and the API DNS entries must be updated for a kops Kubernetes cluster to start.")
 | 
				
			||||||
 | 
								return fmt.Errorf("\nCannot reach cluster's API server: unable to Validate Cluster: %s", cluster.ObjectMeta.Name)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	validationCluster, validationFailed := validation.ValidateCluster(cluster.ObjectMeta.Name, list, k8sClient)
 | 
						validationCluster, validationFailed := validation.ValidateCluster(cluster.ObjectMeta.Name, list, k8sClient)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if validationCluster == nil || validationCluster.NodeList == nil || validationCluster.NodeList.Items == nil {
 | 
						if validationCluster == nil || validationCluster.NodeList == nil || validationCluster.NodeList.Items == nil {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -20,12 +20,16 @@ kops create secret dockerconfig
 | 
				
			||||||
  # Create an new docker config.
 | 
					  # Create an new docker config.
 | 
				
			||||||
  kops create secret dockerconfig -f /path/to/docker/config.json \
 | 
					  kops create secret dockerconfig -f /path/to/docker/config.json \
 | 
				
			||||||
  --name k8s-cluster.example.com --state s3://example.com
 | 
					  --name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
 | 
					  # Replace an existing docker config secret.
 | 
				
			||||||
 | 
					  kops create secret dockerconfig -f /path/to/docker/config.json --force \
 | 
				
			||||||
 | 
					  --name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### Options
 | 
					### Options
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
  -f, -- string   Path to docker config JSON file
 | 
					  -f, -- string   Path to docker config JSON file
 | 
				
			||||||
 | 
					      --force     Force replace the kops secret if it already exists
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### Options inherited from parent commands
 | 
					### Options inherited from parent commands
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -20,12 +20,16 @@ kops create secret encryptionconfig
 | 
				
			||||||
  # Create a new encryption config.
 | 
					  # Create a new encryption config.
 | 
				
			||||||
  kops create secret encryptionconfig -f config.yaml \
 | 
					  kops create secret encryptionconfig -f config.yaml \
 | 
				
			||||||
  --name k8s-cluster.example.com --state s3://example.com
 | 
					  --name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
 | 
					  # Replace an existing encryption config secret.
 | 
				
			||||||
 | 
					  kops create secret encryptionconfig -f config.yaml --force \
 | 
				
			||||||
 | 
					  --name k8s-cluster.example.com --state s3://example.com
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### Options
 | 
					### Options
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
  -f, -- string   Path to encryption config yaml file
 | 
					  -f, -- string   Path to encryption config yaml file
 | 
				
			||||||
 | 
					      --force     Force replace the kops secret if it already exists
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### Options inherited from parent commands
 | 
					### Options inherited from parent commands
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,6 +1,6 @@
 | 
				
			||||||
# Cluster Templating
 | 
					# Cluster Templating
 | 
				
			||||||
 | 
					
 | 
				
			||||||
The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [/cli/kops_replace.md](/cli/kops_replace.md)).
 | 
					The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [cli/kops_replace.md](cli/kops_replace.md)).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
It is possible to generate that yaml file from a template, using the command `kops toolbox template` (see [cli/kops_toolbox_template.md](cli/kops_toolbox_template.md)).
 | 
					It is possible to generate that yaml file from a template, using the command `kops toolbox template` (see [cli/kops_toolbox_template.md](cli/kops_toolbox_template.md)).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -45,7 +45,7 @@ Running `kops toolbox template` replaces the placeholders in the template by val
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Note: when creating a cluster desired configuration template, you can
 | 
					Note: when creating a cluster desired configuration template, you can
 | 
				
			||||||
 | 
					
 | 
				
			||||||
- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cli/cluster_spec.md](cli/cluster_spec.md).
 | 
					- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cluster_spec.md](cluster_spec.md).
 | 
				
			||||||
- replace values by placeholders in that file to create the template.
 | 
					- replace values by placeholders in that file to create the template.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### Templates
 | 
					### Templates
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,3 +1,26 @@
 | 
				
			||||||
 | 
					# Installing Kops via Hombrew
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Homebrew makes installing kops [very simple for MacOS.](../install.md)
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					brew update && brew install kops
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Development Releases and master can also be installed via Homebrew very easily:
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					# Development Release
 | 
				
			||||||
 | 
					brew update && brew install kops --devel
 | 
				
			||||||
 | 
					# HEAD of master
 | 
				
			||||||
 | 
					brew update && brew install kops --HEAD
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Note: if you already have kops installed, you need to substitute `upgrade` for `install`. 
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					You can switch between development and stable releases with:
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					brew switch kops 1.7.1
 | 
				
			||||||
 | 
					brew switch kops 1.8.0-beta.1
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Releasing kops to Brew
 | 
					# Releasing kops to Brew
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Submitting a new release of kops to Homebrew is very simple.
 | 
					Submitting a new release of kops to Homebrew is very simple.
 | 
				
			||||||
| 
						 | 
					@ -8,11 +31,20 @@ Submitting a new release of kops to Homebrew is very simple.
 | 
				
			||||||
This will automatically update the provided fields and open a PR for you. 
 | 
					This will automatically update the provided fields and open a PR for you. 
 | 
				
			||||||
More details on this script are located [here.](https://github.com/Homebrew/brew/blob/master/Library/Homebrew/dev-cmd/bump-formula-pr.rb)
 | 
					More details on this script are located [here.](https://github.com/Homebrew/brew/blob/master/Library/Homebrew/dev-cmd/bump-formula-pr.rb)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					We now include both major and development releases in homebrew.  A development version can be updated by adding the `--devel` flag.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Example usage:
 | 
					Example usage:
 | 
				
			||||||
```
 | 
					```bash
 | 
				
			||||||
 | 
					# Major Version
 | 
				
			||||||
brew bump-formula-pr \
 | 
					brew bump-formula-pr \
 | 
				
			||||||
       --url=https://github.com/kubernetes/kops/archive/1.7.1.tar.gz \
 | 
					       --url=https://github.com/kubernetes/kops/archive/1.7.1.tar.gz \
 | 
				
			||||||
       --sha256=044c5c7a737ed3acf53517e64bb27d3da8f7517d2914df89efeeaf84bc8a722a
 | 
					       --sha256=044c5c7a737ed3acf53517e64bb27d3da8f7517d2914df89efeeaf84bc8a722a
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# Development Version
 | 
				
			||||||
 | 
					brew bump-formula-pr \
 | 
				
			||||||
 | 
					       --devel \
 | 
				
			||||||
 | 
					       --url=https://github.com/kubernetes/kops/archive/1.8.0-beta.1.tar.gz \
 | 
				
			||||||
 | 
					       --sha256=81026d6c1cd7b3898a88275538a7842b4bd8387775937e0528ccb7b83948abf1
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
* Update the URL variable to the tar.gz of the new release source code
 | 
					* Update the URL variable to the tar.gz of the new release source code
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -8,6 +8,8 @@ From Homebrew:
 | 
				
			||||||
brew update && brew install kops
 | 
					brew update && brew install kops
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Developers can also easily install [development releases](development/homebrew.md).
 | 
				
			||||||
 | 
					
 | 
				
			||||||
From Github:
 | 
					From Github:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```bash
 | 
					```bash
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -14,6 +14,13 @@ or `--networking flannel-udp` can be specified to explicitly choose a backend mo
 | 
				
			||||||
  See the *Changes to k8s-policy* section in the
 | 
					  See the *Changes to k8s-policy* section in the
 | 
				
			||||||
  [Calico release notes](https://github.com/projectcalico/calico/releases/tag/v2.4.0)
 | 
					  [Calico release notes](https://github.com/projectcalico/calico/releases/tag/v2.4.0)
 | 
				
			||||||
  for help.
 | 
					  for help.
 | 
				
			||||||
 | 
					* Due to `ThirdPartyResources` becoming fully deprecated in Kubernetes v1.8 (replaced by `CustomResourceDefinitions`), existing Canal users upgrading their Clusters to Kubernetes v1.8 must follow the below TPR->CRD migration steps:
 | 
				
			||||||
 | 
					  1. Run: `kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v2.6.2/upgrade/v2.5/manifests/upgrade-job.yaml`
 | 
				
			||||||
 | 
					  2. Retrieve the pod name from describing the job: `kubectl describe job/calico-upgrade-v2.5`
 | 
				
			||||||
 | 
					  3. Validate the last log line from the pod reports that it completed successfully: `kubectl logs calico-upgrade-v2.5-<random-id>`
 | 
				
			||||||
 | 
					  4. Update the `KubernetesVersion` within your ClusterSpec to v1.8 (or above), performing an update & rolling-update to all nodes (will involve downtime)
 | 
				
			||||||
 | 
					  5. Confirm cluster is back up and all canal pods are running successfully: `kops validate cluster` (this may take a few minutes for the cluster to fully validate)
 | 
				
			||||||
 | 
					  6. Delete the upgrade job as it is no longer required: `kubectl delete job calico-upgrade-v2.5` (you can also safely delete the `clusterrole`, `clusterrolebinding` and `serviceaccount` resources that were created by the above manifest file)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# Full changelist
 | 
					# Full changelist
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,13 +1,33 @@
 | 
				
			||||||
## How to update Kops - Kubernetes Ops
 | 
					# Updating kops (Binaries)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Update the latest source code from kubernetes/kops
 | 
					## MacOS
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```
 | 
					From Homebrew:
 | 
				
			||||||
cd ${GOPATH}/src/k8s.io/kops/
 | 
					 | 
				
			||||||
git pull && make
 | 
					 | 
				
			||||||
```
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
Alternatively, if you installed from Homebrew
 | 
					```bash
 | 
				
			||||||
```
 | 
					 | 
				
			||||||
brew update && brew upgrade kops
 | 
					brew update && brew upgrade kops
 | 
				
			||||||
```
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					From Github:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					rm -rf /usr/local/bin/kops
 | 
				
			||||||
 | 
					wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64
 | 
				
			||||||
 | 
					chmod +x ./kops
 | 
				
			||||||
 | 
					sudo mv ./kops /usr/local/bin/
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					You can also rerun rerun [these steps](development/building.md) if previously built from source.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					## Linux
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					From Github:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					```bash
 | 
				
			||||||
 | 
					rm -rf /usr/local/bin/kops
 | 
				
			||||||
 | 
					wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
 | 
				
			||||||
 | 
					chmod +x ./kops
 | 
				
			||||||
 | 
					sudo mv ./kops /usr/local/bin/
 | 
				
			||||||
 | 
					```
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					You can also rerun rerun [these steps](development/building.md) if previously built from source.
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -123,6 +123,7 @@ k8s.io/kops/upup/pkg/fi/cloudup/dotasks
 | 
				
			||||||
k8s.io/kops/upup/pkg/fi/cloudup/gce
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/gce
 | 
				
			||||||
k8s.io/kops/upup/pkg/fi/cloudup/gcetasks
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/gcetasks
 | 
				
			||||||
k8s.io/kops/upup/pkg/fi/cloudup/openstack
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/openstack
 | 
				
			||||||
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks
 | 
				
			||||||
k8s.io/kops/upup/pkg/fi/cloudup/terraform
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/terraform
 | 
				
			||||||
k8s.io/kops/upup/pkg/fi/cloudup/vsphere
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/vsphere
 | 
				
			||||||
k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks
 | 
					k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -431,6 +431,57 @@ var dockerVersions = []dockerVersion{
 | 
				
			||||||
		Hash:          "4659c937b66519c88ef2a82a906bb156db29d191",
 | 
							Hash:          "4659c937b66519c88ef2a82a906bb156db29d191",
 | 
				
			||||||
		Dependencies:  []string{"policycoreutils-python"},
 | 
							Dependencies:  []string{"policycoreutils-python"},
 | 
				
			||||||
	},
 | 
						},
 | 
				
			||||||
 | 
						// 17.09.0 - k8s 1.8
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// 17.09.0 - Jessie
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
							DockerVersion: "17.09.0",
 | 
				
			||||||
 | 
							Name:          "docker-ce",
 | 
				
			||||||
 | 
							Distros:       []distros.Distribution{distros.DistributionJessie},
 | 
				
			||||||
 | 
							Architectures: []Architecture{ArchitectureAmd64},
 | 
				
			||||||
 | 
							Version:       "17.09.0~ce-0~debian",
 | 
				
			||||||
 | 
							Source:        "http://download.docker.com/linux/debian/dists/jessie/pool/stable/amd64/docker-ce_17.09.0~ce-0~debian_amd64.deb",
 | 
				
			||||||
 | 
							Hash:          "430ba87f8aa36fedcac1a48e909cbe1830b53845",
 | 
				
			||||||
 | 
							Dependencies:  []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// 17.09.0 - Jessie on ARM
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
							DockerVersion: "17.09.0",
 | 
				
			||||||
 | 
							Name:          "docker-ce",
 | 
				
			||||||
 | 
							Distros:       []distros.Distribution{distros.DistributionJessie},
 | 
				
			||||||
 | 
							Architectures: []Architecture{ArchitectureArm},
 | 
				
			||||||
 | 
							Version:       "17.09.0~ce-0~debian",
 | 
				
			||||||
 | 
							Source:        "http://download.docker.com/linux/debian/dists/jessie/pool/stable/armhf/docker-ce_17.09.0~ce-0~debian_armhf.deb",
 | 
				
			||||||
 | 
							Hash:          "5001a1defec7c33aa58ddebbd3eae6ebb5f36479",
 | 
				
			||||||
 | 
							Dependencies:  []string{"bridge-utils", "libapparmor1", "libltdl7", "perl"},
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// 17.09.0 - Xenial
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
							DockerVersion: "17.09.0",
 | 
				
			||||||
 | 
							Name:          "docker-ce",
 | 
				
			||||||
 | 
							Distros:       []distros.Distribution{distros.DistributionXenial},
 | 
				
			||||||
 | 
							Architectures: []Architecture{ArchitectureAmd64},
 | 
				
			||||||
 | 
							Version:       "17.09.0~ce-0~ubuntu",
 | 
				
			||||||
 | 
							Source:        "http://download.docker.com/linux/ubuntu/dists/xenial/pool/stable/amd64/docker-ce_17.09.0~ce-0~ubuntu_amd64.deb",
 | 
				
			||||||
 | 
							Hash:          "94f6e89be6d45d9988269a237eb27c7d6a844d7f",
 | 
				
			||||||
 | 
							Dependencies:  []string{"bridge-utils", "iptables", "libapparmor1", "libltdl7", "perl"},
 | 
				
			||||||
 | 
							//Depends: iptables, init-system-helpers, lsb-base, libapparmor1, libc6, libdevmapper1.02.1, libltdl7, libeseccomp2, libsystemd0
 | 
				
			||||||
 | 
							//Recommends: aufs-tools, ca-certificates, cgroupfs-mount | cgroup-lite, git, xz-utils, apparmor
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// 17.09.0 - Centos / Rhel7
 | 
				
			||||||
 | 
						{
 | 
				
			||||||
 | 
							DockerVersion: "17.09.0",
 | 
				
			||||||
 | 
							Name:          "docker-ce",
 | 
				
			||||||
 | 
							Distros:       []distros.Distribution{distros.DistributionRhel7, distros.DistributionCentos7},
 | 
				
			||||||
 | 
							Architectures: []Architecture{ArchitectureAmd64},
 | 
				
			||||||
 | 
							Version:       "17.09.0.ce",
 | 
				
			||||||
 | 
							Source:        "https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-17.09.0.ce-1.el7.centos.x86_64.rpm",
 | 
				
			||||||
 | 
							Hash:          "b4ce72e80ff02926de943082821bbbe73958f87a",
 | 
				
			||||||
 | 
							Dependencies:  []string{"libtool-ltdl", "libseccomp", "libgcroup"},
 | 
				
			||||||
 | 
						},
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro distros.Distribution) bool {
 | 
					func (d *dockerVersion) matches(arch Architecture, dockerVersion string, distro distros.Distribution) bool {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -322,7 +322,7 @@ type KubeControllerManagerConfig struct {
 | 
				
			||||||
	// HorizontalPodAutoscalerUpscaleDelay is a duration that specifies how
 | 
						// HorizontalPodAutoscalerUpscaleDelay is a duration that specifies how
 | 
				
			||||||
	// long the autoscaler has to wait before another upscale operation can
 | 
						// long the autoscaler has to wait before another upscale operation can
 | 
				
			||||||
	// be performed after the current one has completed.
 | 
						// be performed after the current one has completed.
 | 
				
			||||||
	HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-downscale-delay"`
 | 
						HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
 | 
				
			||||||
	// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
 | 
						// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
 | 
				
			||||||
	FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
 | 
						FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -322,7 +322,7 @@ type KubeControllerManagerConfig struct {
 | 
				
			||||||
	// HorizontalPodAutoscalerUpscaleDelay is a duration that specifies how
 | 
						// HorizontalPodAutoscalerUpscaleDelay is a duration that specifies how
 | 
				
			||||||
	// long the autoscaler has to wait before another upscale operation can
 | 
						// long the autoscaler has to wait before another upscale operation can
 | 
				
			||||||
	// be performed after the current one has completed.
 | 
						// be performed after the current one has completed.
 | 
				
			||||||
	HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-downscale-delay"`
 | 
						HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
 | 
				
			||||||
	// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
 | 
						// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
 | 
				
			||||||
	FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
 | 
						FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -469,9 +469,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if kubernetesRelease.LT(semver.MustParse("1.6.0")) {
 | 
						if kubernetesRelease.LT(semver.MustParse("1.7.0")) {
 | 
				
			||||||
		if c.Spec.Networking != nil && c.Spec.Networking.Romana != nil {
 | 
							if c.Spec.Networking != nil && c.Spec.Networking.Romana != nil {
 | 
				
			||||||
			return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.5 or lower")
 | 
								return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.6 or lower")
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,6 +36,8 @@ go_library(
 | 
				
			||||||
        "//upup/pkg/fi/cloudup/dotasks:go_default_library",
 | 
					        "//upup/pkg/fi/cloudup/dotasks:go_default_library",
 | 
				
			||||||
        "//upup/pkg/fi/cloudup/gce:go_default_library",
 | 
					        "//upup/pkg/fi/cloudup/gce:go_default_library",
 | 
				
			||||||
        "//upup/pkg/fi/cloudup/gcetasks:go_default_library",
 | 
					        "//upup/pkg/fi/cloudup/gcetasks:go_default_library",
 | 
				
			||||||
 | 
					        "//upup/pkg/fi/cloudup/openstack:go_default_library",
 | 
				
			||||||
 | 
					        "//upup/pkg/fi/cloudup/openstacktasks:go_default_library",
 | 
				
			||||||
        "//upup/pkg/fi/fitasks:go_default_library",
 | 
					        "//upup/pkg/fi/fitasks:go_default_library",
 | 
				
			||||||
        "//util/pkg/vfs:go_default_library",
 | 
					        "//util/pkg/vfs:go_default_library",
 | 
				
			||||||
        "//vendor/github.com/blang/semver:go_default_library",
 | 
					        "//vendor/github.com/blang/semver:go_default_library",
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -661,7 +661,7 @@ func addMasterASPolicies(p *Policy, resource stringorslice.StringOrSlice, legacy
 | 
				
			||||||
				Resource: resource,
 | 
									Resource: resource,
 | 
				
			||||||
				Condition: Condition{
 | 
									Condition: Condition{
 | 
				
			||||||
					"StringEquals": map[string]string{
 | 
										"StringEquals": map[string]string{
 | 
				
			||||||
						"ec2:ResourceTag/KubernetesCluster": clusterName,
 | 
											"autoscaling:ResourceTag/KubernetesCluster": clusterName,
 | 
				
			||||||
					},
 | 
										},
 | 
				
			||||||
				},
 | 
									},
 | 
				
			||||||
			},
 | 
								},
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,7 +75,7 @@
 | 
				
			||||||
      ],
 | 
					      ],
 | 
				
			||||||
      "Condition": {
 | 
					      "Condition": {
 | 
				
			||||||
        "StringEquals": {
 | 
					        "StringEquals": {
 | 
				
			||||||
          "ec2:ResourceTag/KubernetesCluster": "iam-builder-test.k8s.local"
 | 
					          "autoscaling:ResourceTag/KubernetesCluster": "iam-builder-test.k8s.local"
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
    },
 | 
					    },
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -75,7 +75,7 @@
 | 
				
			||||||
      ],
 | 
					      ],
 | 
				
			||||||
      "Condition": {
 | 
					      "Condition": {
 | 
				
			||||||
        "StringEquals": {
 | 
					        "StringEquals": {
 | 
				
			||||||
          "ec2:ResourceTag/KubernetesCluster": "iam-builder-test.k8s.local"
 | 
					          "autoscaling:ResourceTag/KubernetesCluster": "iam-builder-test.k8s.local"
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
      }
 | 
					      }
 | 
				
			||||||
    },
 | 
					    },
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -30,6 +30,8 @@ import (
 | 
				
			||||||
	"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
 | 
						"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
 | 
				
			||||||
	"k8s.io/kops/upup/pkg/fi/cloudup/gce"
 | 
						"k8s.io/kops/upup/pkg/fi/cloudup/gce"
 | 
				
			||||||
	"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
 | 
						"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
 | 
				
			||||||
 | 
						"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
 | 
				
			||||||
 | 
						"k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
const (
 | 
					const (
 | 
				
			||||||
| 
						 | 
					@ -95,6 +97,11 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error {
 | 
				
			||||||
				b.addVSphereVolume(c, name, volumeSize, zone, etcd, m, allMembers)
 | 
									b.addVSphereVolume(c, name, volumeSize, zone, etcd, m, allMembers)
 | 
				
			||||||
			case kops.CloudProviderBareMetal:
 | 
								case kops.CloudProviderBareMetal:
 | 
				
			||||||
				glog.Fatalf("BareMetal not implemented")
 | 
									glog.Fatalf("BareMetal not implemented")
 | 
				
			||||||
 | 
								case kops.CloudProviderOpenstack:
 | 
				
			||||||
 | 
									err = b.addOpenstackVolume(c, name, volumeSize, zone, etcd, m, allMembers)
 | 
				
			||||||
 | 
									if err != nil {
 | 
				
			||||||
 | 
										return err
 | 
				
			||||||
 | 
									}
 | 
				
			||||||
			default:
 | 
								default:
 | 
				
			||||||
				return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider)
 | 
									return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider)
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					@ -205,3 +212,33 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, name strin
 | 
				
			||||||
func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) {
 | 
					func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) {
 | 
				
			||||||
	fmt.Print("addVSphereVolume to be implemented")
 | 
						fmt.Print("addVSphereVolume to be implemented")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) error {
 | 
				
			||||||
 | 
						volumeType := fi.StringValue(m.VolumeType)
 | 
				
			||||||
 | 
						if volumeType == "" {
 | 
				
			||||||
 | 
							return fmt.Errorf("must set ETCDMemberSpec.VolumeType on Openstack platform")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// The tags are how protokube knows to mount the volume and use it for etcd
 | 
				
			||||||
 | 
						tags := make(map[string]string)
 | 
				
			||||||
 | 
						// Apply all user defined labels on the volumes
 | 
				
			||||||
 | 
						for k, v := range b.Cluster.Spec.CloudLabels {
 | 
				
			||||||
 | 
							tags[k] = v
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						// This is the configuration of the etcd cluster
 | 
				
			||||||
 | 
						tags[openstack.TagNameEtcdClusterPrefix+etcd.Name] = m.Name + "/" + strings.Join(allMembers, ",")
 | 
				
			||||||
 | 
						// This says "only mount on a master"
 | 
				
			||||||
 | 
						tags[openstack.TagNameRolePrefix+"master"] = "1"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						t := &openstacktasks.Volume{
 | 
				
			||||||
 | 
							Name:             s(name),
 | 
				
			||||||
 | 
							AvailabilityZone: s(zone),
 | 
				
			||||||
 | 
							VolumeType:       s(volumeType),
 | 
				
			||||||
 | 
							SizeGB:           fi.Int64(int64(volumeSize)),
 | 
				
			||||||
 | 
							Tags:             tags,
 | 
				
			||||||
 | 
							Lifecycle:        b.Lifecycle,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						c.AddTask(t)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -220,7 +220,7 @@ func addUntaggedRouteTables(cloud awsup.AWSCloud, clusterName string, resources
 | 
				
			||||||
			continue
 | 
								continue
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		t := buildTrackerForRouteTable(rt)
 | 
							t := buildTrackerForRouteTable(rt, clusterName)
 | 
				
			||||||
		if resources[t.Type+":"+t.ID] == nil {
 | 
							if resources[t.Type+":"+t.ID] == nil {
 | 
				
			||||||
			resources[t.Type+":"+t.ID] = t
 | 
								resources[t.Type+":"+t.ID] = t
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
| 
						 | 
					@ -973,19 +973,20 @@ func ListRouteTables(cloud fi.Cloud, clusterName string) ([]*Resource, error) {
 | 
				
			||||||
	var resourceTrackers []*Resource
 | 
						var resourceTrackers []*Resource
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	for _, rt := range routeTables {
 | 
						for _, rt := range routeTables {
 | 
				
			||||||
		resourceTracker := buildTrackerForRouteTable(rt)
 | 
							resourceTracker := buildTrackerForRouteTable(rt, clusterName)
 | 
				
			||||||
		resourceTrackers = append(resourceTrackers, resourceTracker)
 | 
							resourceTrackers = append(resourceTrackers, resourceTracker)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	return resourceTrackers, nil
 | 
						return resourceTrackers, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func buildTrackerForRouteTable(rt *ec2.RouteTable) *Resource {
 | 
					func buildTrackerForRouteTable(rt *ec2.RouteTable, clusterName string) *Resource {
 | 
				
			||||||
	resourceTracker := &Resource{
 | 
						resourceTracker := &Resource{
 | 
				
			||||||
		Name:    FindName(rt.Tags),
 | 
							Name:    FindName(rt.Tags),
 | 
				
			||||||
		ID:      aws.StringValue(rt.RouteTableId),
 | 
							ID:      aws.StringValue(rt.RouteTableId),
 | 
				
			||||||
		Type:    ec2.ResourceTypeRouteTable,
 | 
							Type:    ec2.ResourceTypeRouteTable,
 | 
				
			||||||
		Deleter: DeleteRouteTable,
 | 
							Deleter: DeleteRouteTable,
 | 
				
			||||||
 | 
							Shared:  HasSharedTag(ec2.ResourceTypeRouteTable+":"+*rt.RouteTableId, rt.Tags, clusterName),
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	var blocks []string
 | 
						var blocks []string
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -88,3 +88,55 @@ func TestAddUntaggedRouteTables(t *testing.T) {
 | 
				
			||||||
		t.Fatalf("expected=%q, actual=%q", expected, keys)
 | 
							t.Fatalf("expected=%q, actual=%q", expected, keys)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func TestListRouteTables(t *testing.T) {
 | 
				
			||||||
 | 
						cloud := awsup.BuildMockAWSCloud("us-east-1", "abc")
 | 
				
			||||||
 | 
						//resources := make(map[string]*Resource)
 | 
				
			||||||
 | 
						clusterName := "me.example.com"
 | 
				
			||||||
 | 
						ownershipTagKey := "kubernetes.io/cluster/" + clusterName
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						c := &mockec2.MockEC2{}
 | 
				
			||||||
 | 
						cloud.MockEC2 = c
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						c.RouteTables = append(c.RouteTables, &ec2.RouteTable{
 | 
				
			||||||
 | 
							VpcId:        aws.String("vpc-1234"),
 | 
				
			||||||
 | 
							RouteTableId: aws.String("rt-shared"),
 | 
				
			||||||
 | 
							Tags: []*ec2.Tag{
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
									Key:   aws.String("KubernetesCluster"),
 | 
				
			||||||
 | 
									Value: aws.String(clusterName),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
									Key:   aws.String(ownershipTagKey),
 | 
				
			||||||
 | 
									Value: aws.String("shared"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
						c.RouteTables = append(c.RouteTables, &ec2.RouteTable{
 | 
				
			||||||
 | 
							VpcId:        aws.String("vpc-1234"),
 | 
				
			||||||
 | 
							RouteTableId: aws.String("rt-owned"),
 | 
				
			||||||
 | 
							Tags: []*ec2.Tag{
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
									Key:   aws.String("KubernetesCluster"),
 | 
				
			||||||
 | 
									Value: aws.String(clusterName),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
								{
 | 
				
			||||||
 | 
									Key:   aws.String(ownershipTagKey),
 | 
				
			||||||
 | 
									Value: aws.String("owned"),
 | 
				
			||||||
 | 
								},
 | 
				
			||||||
 | 
							},
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						resources, err := ListRouteTables(cloud, clusterName)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							t.Fatalf("error listing route tables: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						for _, rt := range resources {
 | 
				
			||||||
 | 
							if rt.ID == "rt-shared" && !rt.Shared {
 | 
				
			||||||
 | 
								t.Fatalf("expected Shared: true, got: %v", rt.Shared)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if rt.ID == "rt-owned" && rt.Shared {
 | 
				
			||||||
 | 
								t.Fatalf("expected Shared: false, got: %v", rt.Shared)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -17,6 +17,7 @@ go_library(
 | 
				
			||||||
        "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
 | 
					        "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
 | 
				
			||||||
        "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
					        "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
				
			||||||
        "//vendor/k8s.io/client-go/kubernetes:go_default_library",
 | 
					        "//vendor/k8s.io/client-go/kubernetes:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
 | 
				
			||||||
    ],
 | 
					    ],
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,11 +18,15 @@ package validation
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
 | 
						"net/url"
 | 
				
			||||||
	"time"
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"net"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
				
			||||||
	"k8s.io/client-go/kubernetes"
 | 
						"k8s.io/client-go/kubernetes"
 | 
				
			||||||
 | 
						"k8s.io/client-go/tools/clientcmd"
 | 
				
			||||||
	"k8s.io/kops/pkg/apis/kops"
 | 
						"k8s.io/kops/pkg/apis/kops"
 | 
				
			||||||
	"k8s.io/kops/pkg/apis/kops/util"
 | 
						"k8s.io/kops/pkg/apis/kops/util"
 | 
				
			||||||
	"k8s.io/kops/upup/pkg/fi"
 | 
						"k8s.io/kops/upup/pkg/fi"
 | 
				
			||||||
| 
						 | 
					@ -54,6 +58,32 @@ type ValidationNode struct {
 | 
				
			||||||
	Status   v1.ConditionStatus `json:"status,omitempty"`
 | 
						Status   v1.ConditionStatus `json:"status,omitempty"`
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// HasPlaceHolderIP checks if the API DNS has been updated
 | 
				
			||||||
 | 
					func HasPlaceHolderIP(clusterName string) (bool, error) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						config, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
 | 
				
			||||||
 | 
							clientcmd.NewDefaultClientConfigLoadingRules(),
 | 
				
			||||||
 | 
							&clientcmd.ConfigOverrides{CurrentContext: clusterName}).ClientConfig()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						apiAddr, err := url.Parse(config.Host)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return true, fmt.Errorf("unable to parse Kubernetes cluster API URL: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						hostAddrs, err := net.LookupHost(apiAddr.Host)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return true, fmt.Errorf("unable to resolve Kubernetes cluster API URL dns: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						for _, h := range hostAddrs {
 | 
				
			||||||
 | 
							if h == "203.0.113.123" {
 | 
				
			||||||
 | 
								return true, nil
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return false, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// ValidateCluster validate a k8s cluster with a provided instance group list
 | 
					// ValidateCluster validate a k8s cluster with a provided instance group list
 | 
				
			||||||
func ValidateCluster(clusterName string, instanceGroupList *kops.InstanceGroupList, clusterKubernetesClient kubernetes.Interface) (*ValidationCluster, error) {
 | 
					func ValidateCluster(clusterName string, instanceGroupList *kops.InstanceGroupList, clusterKubernetesClient kubernetes.Interface) (*ValidationCluster, error) {
 | 
				
			||||||
	var instanceGroups []*kops.InstanceGroup
 | 
						var instanceGroups []*kops.InstanceGroup
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -99,7 +99,7 @@ spec:
 | 
				
			||||||
      - operator: Exists
 | 
					      - operator: Exists
 | 
				
			||||||
      initContainers:
 | 
					      initContainers:
 | 
				
			||||||
      - name: install-cni
 | 
					      - name: install-cni
 | 
				
			||||||
        image: quay.io/coreos/flannel:v0.9.0-amd64
 | 
					        image: quay.io/coreos/flannel:v0.9.1-amd64
 | 
				
			||||||
        command:
 | 
					        command:
 | 
				
			||||||
        - cp
 | 
					        - cp
 | 
				
			||||||
        args:
 | 
					        args:
 | 
				
			||||||
| 
						 | 
					@ -113,7 +113,7 @@ spec:
 | 
				
			||||||
          mountPath: /etc/kube-flannel/
 | 
					          mountPath: /etc/kube-flannel/
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: kube-flannel
 | 
					      - name: kube-flannel
 | 
				
			||||||
        image: quay.io/coreos/flannel:v0.9.0-amd64
 | 
					        image: quay.io/coreos/flannel:v0.9.1-amd64
 | 
				
			||||||
        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
					        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
				
			||||||
        securityContext:
 | 
					        securityContext:
 | 
				
			||||||
          privileged: true
 | 
					          privileged: true
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -54,7 +54,7 @@ spec:
 | 
				
			||||||
      serviceAccountName: flannel
 | 
					      serviceAccountName: flannel
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: kube-flannel
 | 
					      - name: kube-flannel
 | 
				
			||||||
        image: quay.io/coreos/flannel:v0.7.1
 | 
					        image: quay.io/coreos/flannel:v0.9.1
 | 
				
			||||||
        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
					        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
				
			||||||
        securityContext:
 | 
					        securityContext:
 | 
				
			||||||
          privileged: true
 | 
					          privileged: true
 | 
				
			||||||
| 
						 | 
					@ -79,7 +79,7 @@ spec:
 | 
				
			||||||
        - name: flannel-cfg
 | 
					        - name: flannel-cfg
 | 
				
			||||||
          mountPath: /etc/kube-flannel/
 | 
					          mountPath: /etc/kube-flannel/
 | 
				
			||||||
      - name: install-cni
 | 
					      - name: install-cni
 | 
				
			||||||
        image: quay.io/coreos/flannel:v0.7.1
 | 
					        image: quay.io/coreos/flannel:v0.9.1
 | 
				
			||||||
        command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
 | 
					        command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
 | 
				
			||||||
        resources:
 | 
					        resources:
 | 
				
			||||||
          limits:
 | 
					          limits:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -171,7 +171,7 @@ spec:
 | 
				
			||||||
        # This container runs flannel using the kube-subnet-mgr backend
 | 
					        # This container runs flannel using the kube-subnet-mgr backend
 | 
				
			||||||
        # for allocating subnets.
 | 
					        # for allocating subnets.
 | 
				
			||||||
        - name: kube-flannel
 | 
					        - name: kube-flannel
 | 
				
			||||||
          image: quay.io/coreos/flannel:v0.8.0
 | 
					          image: quay.io/coreos/flannel:v0.9.1
 | 
				
			||||||
          command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
					          command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
				
			||||||
          securityContext:
 | 
					          securityContext:
 | 
				
			||||||
            privileged: true
 | 
					            privileged: true
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -3,7 +3,7 @@
 | 
				
			||||||
# This manifest includes the following component versions:
 | 
					# This manifest includes the following component versions:
 | 
				
			||||||
#   calico/node:v2.6.2
 | 
					#   calico/node:v2.6.2
 | 
				
			||||||
#   calico/cni:v1.11.0
 | 
					#   calico/cni:v1.11.0
 | 
				
			||||||
#   coreos/flannel:v0.9.0
 | 
					#   coreos/flannel:v0.9.1
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# This ConfigMap can be used to configure a self-hosted Canal installation.
 | 
					# This ConfigMap can be used to configure a self-hosted Canal installation.
 | 
				
			||||||
kind: ConfigMap
 | 
					kind: ConfigMap
 | 
				
			||||||
| 
						 | 
					@ -194,7 +194,7 @@ spec:
 | 
				
			||||||
        # This container runs flannel using the kube-subnet-mgr backend
 | 
					        # This container runs flannel using the kube-subnet-mgr backend
 | 
				
			||||||
        # for allocating subnets.
 | 
					        # for allocating subnets.
 | 
				
			||||||
        - name: kube-flannel
 | 
					        - name: kube-flannel
 | 
				
			||||||
          image: quay.io/coreos/flannel:v0.9.0
 | 
					          image: quay.io/coreos/flannel:v0.9.1
 | 
				
			||||||
          command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
					          command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
				
			||||||
          securityContext:
 | 
					          securityContext:
 | 
				
			||||||
            privileged: true
 | 
					            privileged: true
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -163,7 +163,7 @@ spec:
 | 
				
			||||||
        # This container runs flannel using the kube-subnet-mgr backend
 | 
					        # This container runs flannel using the kube-subnet-mgr backend
 | 
				
			||||||
        # for allocating subnets.
 | 
					        # for allocating subnets.
 | 
				
			||||||
        - name: kube-flannel
 | 
					        - name: kube-flannel
 | 
				
			||||||
          image: quay.io/coreos/flannel:v0.8.0
 | 
					          image: quay.io/coreos/flannel:v0.9.1
 | 
				
			||||||
          command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
					          command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
 | 
				
			||||||
          securityContext:
 | 
					          securityContext:
 | 
				
			||||||
            privileged: true
 | 
					            privileged: true
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -5,7 +5,7 @@ metadata:
 | 
				
			||||||
  name: calico-config
 | 
					  name: calico-config
 | 
				
			||||||
  namespace: kube-system
 | 
					  namespace: kube-system
 | 
				
			||||||
data:
 | 
					data:
 | 
				
			||||||
  # The calico-etcd PetSet service IP:port
 | 
					  # etcd servers
 | 
				
			||||||
  etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
 | 
					  etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
 | 
				
			||||||
                      {{- range $j, $member := $cluster.Members -}}
 | 
					                      {{- range $j, $member := $cluster.Members -}}
 | 
				
			||||||
                          {{- if $j }},{{ end -}}
 | 
					                          {{- if $j }},{{ end -}}
 | 
				
			||||||
| 
						 | 
					@ -18,33 +18,22 @@ data:
 | 
				
			||||||
  # The CNI network configuration to install on each node.
 | 
					  # The CNI network configuration to install on each node.
 | 
				
			||||||
  cni_network_config: |-
 | 
					  cni_network_config: |-
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
      "name": "k8s-pod-network",
 | 
					        "name": "k8s-pod-network",
 | 
				
			||||||
      "cniVersion": "0.3.0",
 | 
					        "type": "calico",
 | 
				
			||||||
      "plugins": [
 | 
					        "etcd_endpoints": "__ETCD_ENDPOINTS__",
 | 
				
			||||||
        {
 | 
					        "log_level": "info",
 | 
				
			||||||
          "type": "calico",
 | 
					        "ipam": {
 | 
				
			||||||
          "etcd_endpoints": "__ETCD_ENDPOINTS__",
 | 
					 | 
				
			||||||
          "log_level": "info",
 | 
					 | 
				
			||||||
          "ipam": {
 | 
					 | 
				
			||||||
            "type": "calico-ipam"
 | 
					            "type": "calico-ipam"
 | 
				
			||||||
          },
 | 
					 | 
				
			||||||
          "policy": {
 | 
					 | 
				
			||||||
            "type": "k8s",
 | 
					 | 
				
			||||||
            "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
 | 
					 | 
				
			||||||
            "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
 | 
					 | 
				
			||||||
          },
 | 
					 | 
				
			||||||
          "kubernetes": {
 | 
					 | 
				
			||||||
            "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
 | 
					 | 
				
			||||||
          }
 | 
					 | 
				
			||||||
        },
 | 
					        },
 | 
				
			||||||
        {
 | 
					        "policy": {
 | 
				
			||||||
          "type": "portmap",
 | 
					            "type": "k8s",
 | 
				
			||||||
          "snat": true,
 | 
					             "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
 | 
				
			||||||
          "capabilities": {"portMappings": true}
 | 
					             "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
 | 
				
			||||||
 | 
					        },
 | 
				
			||||||
 | 
					        "kubernetes": {
 | 
				
			||||||
 | 
					            "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
      ]
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					 | 
				
			||||||
---
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
kind: ClusterRole
 | 
					kind: ClusterRole
 | 
				
			||||||
| 
						 | 
					@ -133,12 +122,15 @@ spec:
 | 
				
			||||||
        operator: Exists
 | 
					        operator: Exists
 | 
				
			||||||
      - effect: NoSchedule
 | 
					      - effect: NoSchedule
 | 
				
			||||||
        operator: Exists
 | 
					        operator: Exists
 | 
				
			||||||
 | 
					      # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
 | 
				
			||||||
 | 
					      # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
 | 
				
			||||||
 | 
					      terminationGracePeriodSeconds: 0
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
        # Runs calico/node container on each Kubernetes node.  This
 | 
					        # Runs calico/node container on each Kubernetes node.  This
 | 
				
			||||||
        # container programs network policy and routes on each
 | 
					        # container programs network policy and routes on each
 | 
				
			||||||
        # host.
 | 
					        # host.
 | 
				
			||||||
        - name: calico-node
 | 
					        - name: calico-node
 | 
				
			||||||
          image: quay.io/calico/node:v2.4.1
 | 
					          image: quay.io/calico/node:v2.6.2
 | 
				
			||||||
          resources:
 | 
					          resources:
 | 
				
			||||||
            requests:
 | 
					            requests:
 | 
				
			||||||
              cpu: 10m
 | 
					              cpu: 10m
 | 
				
			||||||
| 
						 | 
					@ -169,6 +161,14 @@ spec:
 | 
				
			||||||
            # Auto-detect the BGP IP address.
 | 
					            # Auto-detect the BGP IP address.
 | 
				
			||||||
            - name: IP
 | 
					            - name: IP
 | 
				
			||||||
              value: ""
 | 
					              value: ""
 | 
				
			||||||
 | 
					            # Disable IPv6 on Kubernetes.
 | 
				
			||||||
 | 
					            - name: FELIX_IPV6SUPPORT
 | 
				
			||||||
 | 
					              value: "false"
 | 
				
			||||||
 | 
					            # Set Felix logging to "info"
 | 
				
			||||||
 | 
					            - name: FELIX_LOGSEVERITYSCREEN
 | 
				
			||||||
 | 
					              value: "info"
 | 
				
			||||||
 | 
					            - name: FELIX_HEALTHENABLED
 | 
				
			||||||
 | 
					              value: "true"
 | 
				
			||||||
          securityContext:
 | 
					          securityContext:
 | 
				
			||||||
            privileged: true
 | 
					            privileged: true
 | 
				
			||||||
          volumeMounts:
 | 
					          volumeMounts:
 | 
				
			||||||
| 
						 | 
					@ -185,7 +185,7 @@ spec:
 | 
				
			||||||
        # This container installs the Calico CNI binaries
 | 
					        # This container installs the Calico CNI binaries
 | 
				
			||||||
        # and CNI network config file on each node.
 | 
					        # and CNI network config file on each node.
 | 
				
			||||||
        - name: install-cni
 | 
					        - name: install-cni
 | 
				
			||||||
          image: quay.io/calico/cni:v1.10.0
 | 
					          image: quay.io/calico/cni:v1.11.0
 | 
				
			||||||
          resources:
 | 
					          resources:
 | 
				
			||||||
            requests:
 | 
					            requests:
 | 
				
			||||||
              cpu: 10m
 | 
					              cpu: 10m
 | 
				
			||||||
| 
						 | 
					@ -194,7 +194,7 @@ spec:
 | 
				
			||||||
          env:
 | 
					          env:
 | 
				
			||||||
            # The name of calico config file
 | 
					            # The name of calico config file
 | 
				
			||||||
            - name: CNI_CONF_NAME
 | 
					            - name: CNI_CONF_NAME
 | 
				
			||||||
              value: 10-calico.conflist
 | 
					              value: 10-calico.conf
 | 
				
			||||||
            # The location of the Calico etcd cluster.
 | 
					            # The location of the Calico etcd cluster.
 | 
				
			||||||
            - name: ETCD_ENDPOINTS
 | 
					            - name: ETCD_ENDPOINTS
 | 
				
			||||||
              valueFrom:
 | 
					              valueFrom:
 | 
				
			||||||
| 
						 | 
					@ -237,8 +237,8 @@ spec:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
---
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# This manifest deploys the Calico policy controller on Kubernetes.
 | 
					# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
 | 
				
			||||||
# See https://github.com/projectcalico/k8s-policy
 | 
					# be removed entirely once the new kube-controllers deployment has been deployed above.
 | 
				
			||||||
apiVersion: extensions/v1beta1
 | 
					apiVersion: extensions/v1beta1
 | 
				
			||||||
kind: Deployment
 | 
					kind: Deployment
 | 
				
			||||||
metadata:
 | 
					metadata:
 | 
				
			||||||
| 
						 | 
					@ -246,35 +246,23 @@ metadata:
 | 
				
			||||||
  namespace: kube-system
 | 
					  namespace: kube-system
 | 
				
			||||||
  labels:
 | 
					  labels:
 | 
				
			||||||
    k8s-app: calico-policy
 | 
					    k8s-app: calico-policy
 | 
				
			||||||
    role.kubernetes.io/networking: "1"
 | 
					 | 
				
			||||||
spec:
 | 
					spec:
 | 
				
			||||||
  # The policy controller can only have a single active instance.
 | 
					  # Turn this deployment off in favor of the kube-controllers deployment above.
 | 
				
			||||||
  replicas: 1
 | 
					  replicas: 0
 | 
				
			||||||
 | 
					  strategy:
 | 
				
			||||||
 | 
					    type: Recreate
 | 
				
			||||||
  template:
 | 
					  template:
 | 
				
			||||||
    metadata:
 | 
					    metadata:
 | 
				
			||||||
      name: calico-policy-controller
 | 
					      name: calico-policy-controller
 | 
				
			||||||
      namespace: kube-system
 | 
					      namespace: kube-system
 | 
				
			||||||
      labels:
 | 
					      labels:
 | 
				
			||||||
        k8s-app: calico-policy-controller
 | 
					        k8s-app: calico-policy
 | 
				
			||||||
        role.kubernetes.io/networking: "1"
 | 
					 | 
				
			||||||
      annotations:
 | 
					 | 
				
			||||||
        scheduler.alpha.kubernetes.io/critical-pod: ''
 | 
					 | 
				
			||||||
    spec:
 | 
					    spec:
 | 
				
			||||||
      # The policy controller must run in the host network namespace so that
 | 
					 | 
				
			||||||
      # it isn't governed by policy that would prevent it from working.
 | 
					 | 
				
			||||||
      hostNetwork: true
 | 
					      hostNetwork: true
 | 
				
			||||||
      serviceAccountName: calico
 | 
					      serviceAccountName: calico
 | 
				
			||||||
      tolerations:
 | 
					 | 
				
			||||||
      - key: node-role.kubernetes.io/master
 | 
					 | 
				
			||||||
        effect: NoSchedule
 | 
					 | 
				
			||||||
      - key: CriticalAddonsOnly
 | 
					 | 
				
			||||||
        operator: Exists
 | 
					 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
        - name: calico-policy-controller
 | 
					        - name: calico-policy-controller
 | 
				
			||||||
          image: quay.io/calico/kube-policy-controller:v0.7.0
 | 
					          image: quay.io/calico/kube-controllers:v1.0.0
 | 
				
			||||||
          resources:
 | 
					 | 
				
			||||||
            requests:
 | 
					 | 
				
			||||||
              cpu: 10m
 | 
					 | 
				
			||||||
          env:
 | 
					          env:
 | 
				
			||||||
            # The location of the Calico etcd cluster.
 | 
					            # The location of the Calico etcd cluster.
 | 
				
			||||||
            - name: ETCD_ENDPOINTS
 | 
					            - name: ETCD_ENDPOINTS
 | 
				
			||||||
| 
						 | 
					@ -282,15 +270,6 @@ spec:
 | 
				
			||||||
                configMapKeyRef:
 | 
					                configMapKeyRef:
 | 
				
			||||||
                  name: calico-config
 | 
					                  name: calico-config
 | 
				
			||||||
                  key: etcd_endpoints
 | 
					                  key: etcd_endpoints
 | 
				
			||||||
            # The location of the Kubernetes API.  Use the default Kubernetes
 | 
					 | 
				
			||||||
            # service for API access.
 | 
					 | 
				
			||||||
            - name: K8S_API
 | 
					 | 
				
			||||||
              value: "https://kubernetes.default:443"
 | 
					 | 
				
			||||||
            # Since we're running in the host namespace and might not have KubeDNS
 | 
					 | 
				
			||||||
            # access, configure the container's /etc/hosts to resolve
 | 
					 | 
				
			||||||
            # kubernetes.default to the correct service clusterIP.
 | 
					 | 
				
			||||||
            - name: CONFIGURE_ETC_HOSTS
 | 
					 | 
				
			||||||
              value: "true"
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
          volumeMounts:
 | 
					          volumeMounts:
 | 
				
			||||||
            # Necessary for gossip based DNS
 | 
					            # Necessary for gossip based DNS
 | 
				
			||||||
| 
						 | 
					@ -301,6 +280,55 @@ spec:
 | 
				
			||||||
        - name: etc-hosts
 | 
					        - name: etc-hosts
 | 
				
			||||||
          hostPath:
 | 
					          hostPath:
 | 
				
			||||||
            path: /etc/hosts
 | 
					            path: /etc/hosts
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					# This manifest deploys the Calico Kubernetes controllers.
 | 
				
			||||||
 | 
					# See https://github.com/projectcalico/kube-controllers
 | 
				
			||||||
 | 
					apiVersion: extensions/v1beta1
 | 
				
			||||||
 | 
					kind: Deployment
 | 
				
			||||||
 | 
					metadata:
 | 
				
			||||||
 | 
					  name: calico-kube-controllers
 | 
				
			||||||
 | 
					  namespace: kube-system
 | 
				
			||||||
 | 
					  labels:
 | 
				
			||||||
 | 
					    k8s-app: calico-kube-controllers
 | 
				
			||||||
 | 
					    role.kubernetes.io/networking: "1"
 | 
				
			||||||
 | 
					spec:
 | 
				
			||||||
 | 
					  # The controllers can only have a single active instance.
 | 
				
			||||||
 | 
					  replicas: 1
 | 
				
			||||||
 | 
					  template:
 | 
				
			||||||
 | 
					    metadata:
 | 
				
			||||||
 | 
					      name: calico-kube-controllers
 | 
				
			||||||
 | 
					      namespace: kube-system
 | 
				
			||||||
 | 
					      labels:
 | 
				
			||||||
 | 
					        k8s-app: calico-kube-controllers
 | 
				
			||||||
 | 
					        role.kubernetes.io/networking: "1"
 | 
				
			||||||
 | 
					      annotations:
 | 
				
			||||||
 | 
					        scheduler.alpha.kubernetes.io/critical-pod: ''
 | 
				
			||||||
 | 
					    spec:
 | 
				
			||||||
 | 
					      # The controllers must run in the host network namespace so that
 | 
				
			||||||
 | 
					      # it isn't governed by policy that would prevent it from working.
 | 
				
			||||||
 | 
					      hostNetwork: true
 | 
				
			||||||
 | 
					      serviceAccountName: calico
 | 
				
			||||||
 | 
					      tolerations:
 | 
				
			||||||
 | 
					      - key: node-role.kubernetes.io/master
 | 
				
			||||||
 | 
					        effect: NoSchedule
 | 
				
			||||||
 | 
					      - key: CriticalAddonsOnly
 | 
				
			||||||
 | 
					        operator: Exists
 | 
				
			||||||
 | 
					      containers:
 | 
				
			||||||
 | 
					        - name: calico-kube-controllers
 | 
				
			||||||
 | 
					          image: quay.io/calico/kube-controllers:v1.0.0
 | 
				
			||||||
 | 
					          resources:
 | 
				
			||||||
 | 
					            requests:
 | 
				
			||||||
 | 
					              cpu: 10m
 | 
				
			||||||
 | 
					          env:
 | 
				
			||||||
 | 
					            # The location of the Calico etcd cluster.
 | 
				
			||||||
 | 
					            - name: ETCD_ENDPOINTS
 | 
				
			||||||
 | 
					              valueFrom:
 | 
				
			||||||
 | 
					                configMapKeyRef:
 | 
				
			||||||
 | 
					                  name: calico-config
 | 
				
			||||||
 | 
					                  key: etcd_endpoints
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
 | 
					{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
 | 
				
			||||||
# This manifest installs the k8s-ec2-srcdst container, which disables
 | 
					# This manifest installs the k8s-ec2-srcdst container, which disables
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -16,11 +16,10 @@ data:
 | 
				
			||||||
  calico_backend: "bird"
 | 
					  calico_backend: "bird"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
  # The CNI network configuration to install on each node.
 | 
					  # The CNI network configuration to install on each node.
 | 
				
			||||||
  # cniVersion should be 0.1.0 on k8s: https://github.com/projectcalico/calico/issues/742
 | 
					 | 
				
			||||||
  cni_network_config: |-
 | 
					  cni_network_config: |-
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
      "name": "k8s-pod-network",
 | 
					      "name": "k8s-pod-network",
 | 
				
			||||||
      "cniVersion": "0.1.0",
 | 
					      "cniVersion": "0.3.0",
 | 
				
			||||||
      "plugins": [
 | 
					      "plugins": [
 | 
				
			||||||
        {
 | 
					        {
 | 
				
			||||||
          "type": "calico",
 | 
					          "type": "calico",
 | 
				
			||||||
| 
						 | 
					@ -137,7 +137,7 @@ spec:
 | 
				
			||||||
        effect: NoSchedule
 | 
					        effect: NoSchedule
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: romana-daemon
 | 
					      - name: romana-daemon
 | 
				
			||||||
        image: quay.io/romana/daemon:v2.0-preview.2
 | 
					        image: quay.io/romana/daemon:v2.0.0
 | 
				
			||||||
        imagePullPolicy: Always
 | 
					        imagePullPolicy: Always
 | 
				
			||||||
        resources:
 | 
					        resources:
 | 
				
			||||||
          requests:
 | 
					          requests:
 | 
				
			||||||
| 
						 | 
					@ -170,7 +170,7 @@ spec:
 | 
				
			||||||
        effect: NoSchedule
 | 
					        effect: NoSchedule
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: romana-listener
 | 
					      - name: romana-listener
 | 
				
			||||||
        image: quay.io/romana/listener:v2.0-preview.2
 | 
					        image: quay.io/romana/listener:v2.0.0
 | 
				
			||||||
        imagePullPolicy: Always
 | 
					        imagePullPolicy: Always
 | 
				
			||||||
        resources:
 | 
					        resources:
 | 
				
			||||||
          requests:
 | 
					          requests:
 | 
				
			||||||
| 
						 | 
					@ -185,6 +185,8 @@ metadata:
 | 
				
			||||||
  name: romana-agent
 | 
					  name: romana-agent
 | 
				
			||||||
  namespace: kube-system
 | 
					  namespace: kube-system
 | 
				
			||||||
spec:
 | 
					spec:
 | 
				
			||||||
 | 
					  updateStrategy:
 | 
				
			||||||
 | 
					    type: RollingUpdate
 | 
				
			||||||
  template:
 | 
					  template:
 | 
				
			||||||
    metadata:
 | 
					    metadata:
 | 
				
			||||||
      labels:
 | 
					      labels:
 | 
				
			||||||
| 
						 | 
					@ -200,7 +202,7 @@ spec:
 | 
				
			||||||
        effect: NoSchedule
 | 
					        effect: NoSchedule
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: romana-agent
 | 
					      - name: romana-agent
 | 
				
			||||||
        image: quay.io/romana/agent:v2.0-preview.2
 | 
					        image: quay.io/romana/agent:v2.0.0
 | 
				
			||||||
        imagePullPolicy: Always
 | 
					        imagePullPolicy: Always
 | 
				
			||||||
        resources:
 | 
					        resources:
 | 
				
			||||||
          requests:
 | 
					          requests:
 | 
				
			||||||
| 
						 | 
					@ -213,6 +215,10 @@ spec:
 | 
				
			||||||
          valueFrom:
 | 
					          valueFrom:
 | 
				
			||||||
            fieldRef:
 | 
					            fieldRef:
 | 
				
			||||||
              fieldPath: spec.nodeName
 | 
					              fieldPath: spec.nodeName
 | 
				
			||||||
 | 
					        - name: NODEIP
 | 
				
			||||||
 | 
					          valueFrom:
 | 
				
			||||||
 | 
					            fieldRef:
 | 
				
			||||||
 | 
					              fieldPath: status.hostIP
 | 
				
			||||||
        args:
 | 
					        args:
 | 
				
			||||||
        - --service-cluster-ip-range={{ .ServiceClusterIPRange }}
 | 
					        - --service-cluster-ip-range={{ .ServiceClusterIPRange }}
 | 
				
			||||||
        securityContext:
 | 
					        securityContext:
 | 
				
			||||||
| 
						 | 
					@ -299,7 +305,7 @@ spec:
 | 
				
			||||||
        effect: NoSchedule
 | 
					        effect: NoSchedule
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: romana-aws
 | 
					      - name: romana-aws
 | 
				
			||||||
        image: quay.io/romana/aws:v2.0-preview.2
 | 
					        image: quay.io/romana/aws:v2.0.0
 | 
				
			||||||
        imagePullPolicy: Always
 | 
					        imagePullPolicy: Always
 | 
				
			||||||
        resources:
 | 
					        resources:
 | 
				
			||||||
          requests:
 | 
					          requests:
 | 
				
			||||||
| 
						 | 
					@ -328,7 +334,7 @@ spec:
 | 
				
			||||||
        effect: NoSchedule
 | 
					        effect: NoSchedule
 | 
				
			||||||
      containers:
 | 
					      containers:
 | 
				
			||||||
      - name: romana-vpcrouter
 | 
					      - name: romana-vpcrouter
 | 
				
			||||||
        image: quay.io/romana/vpcrouter-romana-plugin
 | 
					        image: quay.io/romana/vpcrouter-romana-plugin:1.1.12
 | 
				
			||||||
        imagePullPolicy: Always
 | 
					        imagePullPolicy: Always
 | 
				
			||||||
        resources:
 | 
					        resources:
 | 
				
			||||||
          requests:
 | 
					          requests:
 | 
				
			||||||
| 
						 | 
					@ -185,6 +185,14 @@ func NewAWSCloud(region string, tags map[string]string) (AWSCloud, error) {
 | 
				
			||||||
		config = config.WithCredentialsChainVerboseErrors(true)
 | 
							config = config.WithCredentialsChainVerboseErrors(true)
 | 
				
			||||||
		config = request.WithRetryer(config, newLoggingRetryer(ClientMaxRetries))
 | 
							config = request.WithRetryer(config, newLoggingRetryer(ClientMaxRetries))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							// We have the updated aws sdk from 1.9, but don't have https://github.com/kubernetes/kubernetes/pull/55307
 | 
				
			||||||
 | 
							// Set the SleepDelay function to work around this
 | 
				
			||||||
 | 
							// TODO: Remove once we update to k8s >= 1.9 (or a version of the retry delayer than includes this)
 | 
				
			||||||
 | 
							config.SleepDelay = func(d time.Duration) {
 | 
				
			||||||
 | 
								glog.V(6).Infof("aws request sleeping for %v", d)
 | 
				
			||||||
 | 
								time.Sleep(d)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		requestLogger := newRequestLogger(2)
 | 
							requestLogger := newRequestLogger(2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		sess, err := session.NewSession(config)
 | 
							sess, err := session.NewSession(config)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -133,8 +133,11 @@ func (c *MockAWSCloud) BuildTags(name *string) map[string]string {
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *MockAWSCloud) Tags() map[string]string {
 | 
					func (c *MockAWSCloud) Tags() map[string]string {
 | 
				
			||||||
	glog.Fatalf("MockAWSCloud Tags not implemented")
 | 
						tags := make(map[string]string)
 | 
				
			||||||
	return nil
 | 
						for k, v := range c.tags {
 | 
				
			||||||
 | 
							tags[k] = v
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return tags
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *MockAWSCloud) CreateTags(resourceId string, tags map[string]string) error {
 | 
					func (c *MockAWSCloud) CreateTags(resourceId string, tags map[string]string) error {
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -468,11 +468,10 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if b.cluster.Spec.Networking.Calico != nil {
 | 
						if b.cluster.Spec.Networking.Calico != nil {
 | 
				
			||||||
		key := "networking.projectcalico.org"
 | 
							key := "networking.projectcalico.org"
 | 
				
			||||||
		// 2.6.3-kops.1 = 2.6.2 with kops manifest tweaks.  This should go away with the next version bump.
 | 
					 | 
				
			||||||
		versions := map[string]string{
 | 
							versions := map[string]string{
 | 
				
			||||||
			"pre-k8s-1.6": "2.4.1",
 | 
								"pre-k8s-1.6": "2.4.1",
 | 
				
			||||||
			"k8s-1.6":     "2.4.2-kops.1",
 | 
								"k8s-1.6":     "2.6.2",
 | 
				
			||||||
			"k8s-1.8":     "2.6.3-kops.1",
 | 
								"k8s-1.7":     "2.6.2",
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
| 
						 | 
					@ -499,14 +498,14 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
 | 
				
			||||||
				Version:           fi.String(versions[id]),
 | 
									Version:           fi.String(versions[id]),
 | 
				
			||||||
				Selector:          networkingSelector,
 | 
									Selector:          networkingSelector,
 | 
				
			||||||
				Manifest:          fi.String(location),
 | 
									Manifest:          fi.String(location),
 | 
				
			||||||
				KubernetesVersion: ">=1.6.0 <1.8.0",
 | 
									KubernetesVersion: ">=1.6.0 <1.7.0",
 | 
				
			||||||
				Id:                id,
 | 
									Id:                id,
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
			manifests[key+"-"+id] = "addons/" + location
 | 
								manifests[key+"-"+id] = "addons/" + location
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			id := "k8s-1.8"
 | 
								id := "k8s-1.7"
 | 
				
			||||||
			location := key + "/" + id + ".yaml"
 | 
								location := key + "/" + id + ".yaml"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
 | 
								addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
 | 
				
			||||||
| 
						 | 
					@ -514,7 +513,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
 | 
				
			||||||
				Version:           fi.String(versions[id]),
 | 
									Version:           fi.String(versions[id]),
 | 
				
			||||||
				Selector:          networkingSelector,
 | 
									Selector:          networkingSelector,
 | 
				
			||||||
				Manifest:          fi.String(location),
 | 
									Manifest:          fi.String(location),
 | 
				
			||||||
				KubernetesVersion: ">=1.8.0",
 | 
									KubernetesVersion: ">=1.7.0",
 | 
				
			||||||
				Id:                id,
 | 
									Id:                id,
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
			manifests[key+"-"+id] = "addons/" + location
 | 
								manifests[key+"-"+id] = "addons/" + location
 | 
				
			||||||
| 
						 | 
					@ -598,18 +597,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if b.cluster.Spec.Networking.Romana != nil {
 | 
						if b.cluster.Spec.Networking.Romana != nil {
 | 
				
			||||||
		key := "networking.romana"
 | 
							key := "networking.romana"
 | 
				
			||||||
		version := "v2.0-preview.3"
 | 
							version := "v2.0.0"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			location := key + "/k8s-1.6.yaml"
 | 
								location := key + "/k8s-1.7.yaml"
 | 
				
			||||||
			id := "k8s-1.6"
 | 
								id := "k8s-1.7"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
			addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
 | 
								addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
 | 
				
			||||||
				Name:              fi.String(key),
 | 
									Name:              fi.String(key),
 | 
				
			||||||
				Version:           fi.String(version),
 | 
									Version:           fi.String(version),
 | 
				
			||||||
				Selector:          networkingSelector,
 | 
									Selector:          networkingSelector,
 | 
				
			||||||
				Manifest:          fi.String(location),
 | 
									Manifest:          fi.String(location),
 | 
				
			||||||
				KubernetesVersion: ">=1.6.0",
 | 
									KubernetesVersion: ">=1.7.0",
 | 
				
			||||||
				Id:                id,
 | 
									Id:                id,
 | 
				
			||||||
			})
 | 
								})
 | 
				
			||||||
			manifests[key+"-"+id] = "addons/" + location
 | 
								manifests[key+"-"+id] = "addons/" + location
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -12,7 +12,13 @@ go_library(
 | 
				
			||||||
        "//pkg/apis/kops:go_default_library",
 | 
					        "//pkg/apis/kops:go_default_library",
 | 
				
			||||||
        "//pkg/cloudinstances:go_default_library",
 | 
					        "//pkg/cloudinstances:go_default_library",
 | 
				
			||||||
        "//upup/pkg/fi:go_default_library",
 | 
					        "//upup/pkg/fi:go_default_library",
 | 
				
			||||||
 | 
					        "//util/pkg/vfs:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/github.com/golang/glog:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/github.com/gophercloud/gophercloud:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
 | 
				
			||||||
        "//vendor/k8s.io/api/core/v1:go_default_library",
 | 
					        "//vendor/k8s.io/api/core/v1:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
 | 
				
			||||||
        "//vendor/k8s.io/kubernetes/federation/pkg/dnsprovider:go_default_library",
 | 
					        "//vendor/k8s.io/kubernetes/federation/pkg/dnsprovider:go_default_library",
 | 
				
			||||||
    ],
 | 
					    ],
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -18,25 +18,90 @@ package openstack
 | 
				
			||||||
 | 
					
 | 
				
			||||||
import (
 | 
					import (
 | 
				
			||||||
	"fmt"
 | 
						"fmt"
 | 
				
			||||||
 | 
						"time"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"github.com/golang/glog"
 | 
				
			||||||
 | 
						"github.com/gophercloud/gophercloud"
 | 
				
			||||||
 | 
						os "github.com/gophercloud/gophercloud/openstack"
 | 
				
			||||||
 | 
						cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
 | 
				
			||||||
	"k8s.io/api/core/v1"
 | 
						"k8s.io/api/core/v1"
 | 
				
			||||||
 | 
						"k8s.io/apimachinery/pkg/util/wait"
 | 
				
			||||||
	"k8s.io/kops/pkg/apis/kops"
 | 
						"k8s.io/kops/pkg/apis/kops"
 | 
				
			||||||
	"k8s.io/kops/pkg/cloudinstances"
 | 
						"k8s.io/kops/pkg/cloudinstances"
 | 
				
			||||||
	"k8s.io/kops/upup/pkg/fi"
 | 
						"k8s.io/kops/upup/pkg/fi"
 | 
				
			||||||
 | 
						"k8s.io/kops/util/pkg/vfs"
 | 
				
			||||||
	"k8s.io/kubernetes/federation/pkg/dnsprovider"
 | 
						"k8s.io/kubernetes/federation/pkg/dnsprovider"
 | 
				
			||||||
)
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					const TagNameEtcdClusterPrefix = "k8s.io/etcd/"
 | 
				
			||||||
 | 
					const TagNameRolePrefix = "k8s.io/role/"
 | 
				
			||||||
 | 
					const TagClusterName = "KubernetesCluster"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// readBackoff is the backoff strategy for openstack read retries.
 | 
				
			||||||
 | 
					var readBackoff = wait.Backoff{
 | 
				
			||||||
 | 
						Duration: time.Second,
 | 
				
			||||||
 | 
						Factor:   1.5,
 | 
				
			||||||
 | 
						Jitter:   0.1,
 | 
				
			||||||
 | 
						Steps:    4,
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// writeBackoff is the backoff strategy for openstack write retries.
 | 
				
			||||||
 | 
					var writeBackoff = wait.Backoff{
 | 
				
			||||||
 | 
						Duration: time.Second,
 | 
				
			||||||
 | 
						Factor:   1.5,
 | 
				
			||||||
 | 
						Jitter:   0.1,
 | 
				
			||||||
 | 
						Steps:    5,
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type OpenstackCloud interface {
 | 
					type OpenstackCloud interface {
 | 
				
			||||||
	fi.Cloud
 | 
						fi.Cloud
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// SetVolumeTags will set the tags for the Cinder volume
 | 
				
			||||||
 | 
						SetVolumeTags(id string, tags map[string]string) error
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// GetCloudTags will return the tags attached on cloud
 | 
				
			||||||
 | 
						GetCloudTags() map[string]string
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// ListVolumes will return the Cinder volumes which match the options
 | 
				
			||||||
 | 
						ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// CreateVolume will create a new Cinder Volume
 | 
				
			||||||
 | 
						CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
type openstackCloud struct {
 | 
					type openstackCloud struct {
 | 
				
			||||||
 | 
						cinderClient *gophercloud.ServiceClient
 | 
				
			||||||
 | 
						tags         map[string]string
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
var _ fi.Cloud = &openstackCloud{}
 | 
					var _ fi.Cloud = &openstackCloud{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func NewOpenstackCloud() (OpenstackCloud, error) {
 | 
					func NewOpenstackCloud(tags map[string]string) (OpenstackCloud, error) {
 | 
				
			||||||
	return &openstackCloud{}, nil
 | 
						config := vfs.OpenstackConfig{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						authOption, err := config.GetCredential()
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						provider, err := os.AuthenticatedClient(authOption)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("error building openstack authenticated client: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						endpointOpt, err := config.GetServiceConfig("Cinder")
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						cinderClient, err := os.NewBlockStorageV2(provider, endpointOpt)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("error building swift client: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						c := &openstackCloud{
 | 
				
			||||||
 | 
							cinderClient: cinderClient,
 | 
				
			||||||
 | 
							tags:         tags,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return c, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *openstackCloud) ProviderID() kops.CloudProviderID {
 | 
					func (c *openstackCloud) ProviderID() kops.CloudProviderID {
 | 
				
			||||||
| 
						 | 
					@ -62,3 +127,78 @@ func (c *openstackCloud) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error
 | 
				
			||||||
func (c *openstackCloud) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) {
 | 
					func (c *openstackCloud) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) {
 | 
				
			||||||
	return nil, fmt.Errorf("openstackCloud::GetCloudGroups not implemented")
 | 
						return nil, fmt.Errorf("openstackCloud::GetCloudGroups not implemented")
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *openstackCloud) SetVolumeTags(id string, tags map[string]string) error {
 | 
				
			||||||
 | 
						if len(tags) == 0 {
 | 
				
			||||||
 | 
							return nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						if id == "" {
 | 
				
			||||||
 | 
							return fmt.Errorf("error setting tags to unknown volume")
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						glog.V(4).Infof("setting tags to cinder volume %q: %v", id, tags)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						opt := cinder.UpdateOpts{Metadata: tags}
 | 
				
			||||||
 | 
						done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
 | 
				
			||||||
 | 
							_, err := cinder.Update(c.cinderClient, id, opt).Extract()
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return false, fmt.Errorf("error setting tags to cinder volume %q: %v", id, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							return true, nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return err
 | 
				
			||||||
 | 
						} else if done {
 | 
				
			||||||
 | 
							return nil
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							return wait.ErrWaitTimeout
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *openstackCloud) GetCloudTags() map[string]string {
 | 
				
			||||||
 | 
						return c.tags
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *openstackCloud) ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error) {
 | 
				
			||||||
 | 
						var volumes []cinder.Volume
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) {
 | 
				
			||||||
 | 
							allPages, err := cinder.List(c.cinderClient, opt).AllPages()
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return false, fmt.Errorf("error listing volumes %v: %v", opt, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							vs, err := cinder.ExtractVolumes(allPages)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return false, fmt.Errorf("error extracting volumes from pages: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							volumes = vs
 | 
				
			||||||
 | 
							return true, nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return volumes, err
 | 
				
			||||||
 | 
						} else if done {
 | 
				
			||||||
 | 
							return volumes, nil
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							return volumes, wait.ErrWaitTimeout
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *openstackCloud) CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error) {
 | 
				
			||||||
 | 
						var volume *cinder.Volume
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
 | 
				
			||||||
 | 
							v, err := cinder.Create(c.cinderClient, opt).Extract()
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return false, fmt.Errorf("error creating volume %v: %v", opt, err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							volume = v
 | 
				
			||||||
 | 
							return true, nil
 | 
				
			||||||
 | 
						})
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return volume, err
 | 
				
			||||||
 | 
						} else if done {
 | 
				
			||||||
 | 
							return volume, nil
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							return volume, wait.ErrWaitTimeout
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,14 @@
 | 
				
			||||||
 | 
					load("@io_bazel_rules_go//go:def.bzl", "go_library")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					go_library(
 | 
				
			||||||
 | 
					    name = "go_default_library",
 | 
				
			||||||
 | 
					    srcs = ["volume.go"],
 | 
				
			||||||
 | 
					    importpath = "k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks",
 | 
				
			||||||
 | 
					    visibility = ["//visibility:public"],
 | 
				
			||||||
 | 
					    deps = [
 | 
				
			||||||
 | 
					        "//upup/pkg/fi:go_default_library",
 | 
				
			||||||
 | 
					        "//upup/pkg/fi/cloudup/openstack:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/github.com/golang/glog:go_default_library",
 | 
				
			||||||
 | 
					        "//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
 | 
				
			||||||
 | 
					    ],
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
| 
						 | 
					@ -0,0 +1,145 @@
 | 
				
			||||||
 | 
					/*
 | 
				
			||||||
 | 
					Copyright 2017 The Kubernetes Authors.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Licensed under the Apache License, Version 2.0 (the "License");
 | 
				
			||||||
 | 
					you may not use this file except in compliance with the License.
 | 
				
			||||||
 | 
					You may obtain a copy of the License at
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    http://www.apache.org/licenses/LICENSE-2.0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					Unless required by applicable law or agreed to in writing, software
 | 
				
			||||||
 | 
					distributed under the License is distributed on an "AS IS" BASIS,
 | 
				
			||||||
 | 
					WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
				
			||||||
 | 
					See the License for the specific language governing permissions and
 | 
				
			||||||
 | 
					limitations under the License.
 | 
				
			||||||
 | 
					*/
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					package openstacktasks
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					import (
 | 
				
			||||||
 | 
						"fmt"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						"github.com/golang/glog"
 | 
				
			||||||
 | 
						cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
 | 
				
			||||||
 | 
						"k8s.io/kops/upup/pkg/fi"
 | 
				
			||||||
 | 
						"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
 | 
				
			||||||
 | 
					)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					type Volume struct {
 | 
				
			||||||
 | 
						ID               *string
 | 
				
			||||||
 | 
						Name             *string
 | 
				
			||||||
 | 
						AvailabilityZone *string
 | 
				
			||||||
 | 
						VolumeType       *string
 | 
				
			||||||
 | 
						SizeGB           *int64
 | 
				
			||||||
 | 
						Tags             map[string]string
 | 
				
			||||||
 | 
						Lifecycle        *fi.Lifecycle
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					var _ fi.CompareWithID = &Volume{}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *Volume) CompareWithID() *string {
 | 
				
			||||||
 | 
						return c.ID
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *Volume) Find(context *fi.Context) (*Volume, error) {
 | 
				
			||||||
 | 
						cloud := context.Cloud.(openstack.OpenstackCloud)
 | 
				
			||||||
 | 
						opt := cinder.ListOpts{
 | 
				
			||||||
 | 
							Name:     fi.StringValue(c.Name),
 | 
				
			||||||
 | 
							Metadata: cloud.GetCloudTags(),
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						volumes, err := cloud.ListVolumes(opt)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						n := len(volumes)
 | 
				
			||||||
 | 
						if n == 0 {
 | 
				
			||||||
 | 
							return nil, nil
 | 
				
			||||||
 | 
						} else if n != 1 {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("found multiple Volumes with name: %s", fi.StringValue(c.Name))
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						v := volumes[0]
 | 
				
			||||||
 | 
						actual := &Volume{
 | 
				
			||||||
 | 
							ID:               fi.String(v.ID),
 | 
				
			||||||
 | 
							Name:             fi.String(v.Name),
 | 
				
			||||||
 | 
							AvailabilityZone: fi.String(v.AvailabilityZone),
 | 
				
			||||||
 | 
							VolumeType:       fi.String(v.VolumeType),
 | 
				
			||||||
 | 
							SizeGB:           fi.Int64(int64(v.Size)),
 | 
				
			||||||
 | 
							Tags:             v.Metadata,
 | 
				
			||||||
 | 
							Lifecycle:        c.Lifecycle,
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return actual, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *Volume) Run(context *fi.Context) error {
 | 
				
			||||||
 | 
						cloud := context.Cloud.(openstack.OpenstackCloud)
 | 
				
			||||||
 | 
						for k, v := range cloud.GetCloudTags() {
 | 
				
			||||||
 | 
							c.Tags[k] = v
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						return fi.DefaultDeltaRunMethod(c, context)
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (_ *Volume) CheckChanges(a, e, changes *Volume) error {
 | 
				
			||||||
 | 
						if a == nil {
 | 
				
			||||||
 | 
							if e.Name == nil {
 | 
				
			||||||
 | 
								return fi.RequiredField("Name")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if e.AvailabilityZone == nil {
 | 
				
			||||||
 | 
								return fi.RequiredField("AvailabilityZone")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if e.VolumeType == nil {
 | 
				
			||||||
 | 
								return fi.RequiredField("VolumeType")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if e.SizeGB == nil {
 | 
				
			||||||
 | 
								return fi.RequiredField("SizeGB")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						} else {
 | 
				
			||||||
 | 
							if changes.ID != nil {
 | 
				
			||||||
 | 
								return fi.CannotChangeField("ID")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if changes.AvailabilityZone != nil {
 | 
				
			||||||
 | 
								return fi.CannotChangeField("AvailabilityZone")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if changes.VolumeType != nil {
 | 
				
			||||||
 | 
								return fi.CannotChangeField("VolumeType")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
							if changes.SizeGB != nil {
 | 
				
			||||||
 | 
								return fi.CannotChangeField("SizeGB")
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Volume) error {
 | 
				
			||||||
 | 
						if a == nil {
 | 
				
			||||||
 | 
							glog.V(2).Infof("Creating PersistentVolume with Name:%q", fi.StringValue(e.Name))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							opt := cinder.CreateOpts{
 | 
				
			||||||
 | 
								Size:             int(*e.SizeGB),
 | 
				
			||||||
 | 
								AvailabilityZone: fi.StringValue(e.AvailabilityZone),
 | 
				
			||||||
 | 
								Metadata:         e.Tags,
 | 
				
			||||||
 | 
								Name:             fi.StringValue(e.Name),
 | 
				
			||||||
 | 
								VolumeType:       fi.StringValue(e.VolumeType),
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							v, err := t.Cloud.CreateVolume(opt)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return fmt.Errorf("error creating PersistentVolume: %v", err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							e.ID = fi.String(v.ID)
 | 
				
			||||||
 | 
							return nil
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if changes != nil && changes.Tags != nil {
 | 
				
			||||||
 | 
							glog.V(2).Infof("Update the tags on volume %q: %v, the differences are %v", fi.StringValue(e.ID), e.Tags, changes.Tags)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
							err := t.Cloud.SetVolumeTags(fi.StringValue(e.ID), e.Tags)
 | 
				
			||||||
 | 
							if err != nil {
 | 
				
			||||||
 | 
								return fmt.Errorf("error updating the tags on volume %q: %v", fi.StringValue(e.ID), err)
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						glog.V(2).Infof("Openstack task Volume::RenderOpenstack did nothing")
 | 
				
			||||||
 | 
						return nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
| 
						 | 
					@ -133,7 +133,8 @@ func BuildCloud(cluster *kops.Cluster) (fi.Cloud, error) {
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
	case kops.CloudProviderOpenstack:
 | 
						case kops.CloudProviderOpenstack:
 | 
				
			||||||
		{
 | 
							{
 | 
				
			||||||
			osc, err := openstack.NewOpenstackCloud()
 | 
								cloudTags := map[string]string{openstack.TagClusterName: cluster.ObjectMeta.Name}
 | 
				
			||||||
 | 
								osc, err := openstack.NewOpenstackCloud(cloudTags)
 | 
				
			||||||
			if err != nil {
 | 
								if err != nil {
 | 
				
			||||||
				return nil, err
 | 
									return nil, err
 | 
				
			||||||
			}
 | 
								}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -32,8 +32,10 @@ type SecretStore interface {
 | 
				
			||||||
	DeleteSecret(item *KeystoreItem) error
 | 
						DeleteSecret(item *KeystoreItem) error
 | 
				
			||||||
	// FindSecret finds a secret, if exists.  Returns nil,nil if not found
 | 
						// FindSecret finds a secret, if exists.  Returns nil,nil if not found
 | 
				
			||||||
	FindSecret(id string) (*Secret, error)
 | 
						FindSecret(id string) (*Secret, error)
 | 
				
			||||||
	// GetOrCreateSecret creates or replace a secret
 | 
						// GetOrCreateSecret creates a secret
 | 
				
			||||||
	GetOrCreateSecret(id string, secret *Secret) (current *Secret, created bool, err error)
 | 
						GetOrCreateSecret(id string, secret *Secret) (current *Secret, created bool, err error)
 | 
				
			||||||
 | 
						// ReplaceSecret will forcefully update an existing secret if it exists
 | 
				
			||||||
 | 
						ReplaceSecret(id string, secret *Secret) (current *Secret, err error)
 | 
				
			||||||
	// ListSecrets lists the ids of all known secrets
 | 
						// ListSecrets lists the ids of all known secrets
 | 
				
			||||||
	ListSecrets() ([]string, error)
 | 
						ListSecrets() ([]string, error)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -157,7 +157,7 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret)
 | 
				
			||||||
			return s, false, nil
 | 
								return s, false, nil
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		_, err = c.createSecret(secret, name)
 | 
							_, err = c.createSecret(secret, name, false)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			if errors.IsAlreadyExists(err) && i == 0 {
 | 
								if errors.IsAlreadyExists(err) && i == 0 {
 | 
				
			||||||
				glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation.  Will retry")
 | 
									glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation.  Will retry")
 | 
				
			||||||
| 
						 | 
					@ -181,6 +181,21 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret)
 | 
				
			||||||
	return s, true, nil
 | 
						return s, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					// ReplaceSecret implements fi.SecretStore::ReplaceSecret
 | 
				
			||||||
 | 
					func (c *ClientsetSecretStore) ReplaceSecret(name string, secret *fi.Secret) (*fi.Secret, error) {
 | 
				
			||||||
 | 
						_, err := c.createSecret(secret, name, true)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("unable to write secret: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Confirm the secret exists
 | 
				
			||||||
 | 
						s, err := c.loadSecret(name)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("unable to load secret immmediately after creation: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return s, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// loadSecret returns the named secret, if it exists, otherwise returns nil
 | 
					// loadSecret returns the named secret, if it exists, otherwise returns nil
 | 
				
			||||||
func (c *ClientsetSecretStore) loadSecret(name string) (*fi.Secret, error) {
 | 
					func (c *ClientsetSecretStore) loadSecret(name string) (*fi.Secret, error) {
 | 
				
			||||||
	name = NamePrefix + name
 | 
						name = NamePrefix + name
 | 
				
			||||||
| 
						 | 
					@ -207,8 +222,8 @@ func parseSecret(keyset *kops.Keyset) (*fi.Secret, error) {
 | 
				
			||||||
	return s, nil
 | 
						return s, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// createSecret writes the secret, but only if it does not exist
 | 
					// createSecret will create the Secret, overwriting an existing secret if replace is true
 | 
				
			||||||
func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Keyset, error) {
 | 
					func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string, replace bool) (*kops.Keyset, error) {
 | 
				
			||||||
	keyset := &kops.Keyset{}
 | 
						keyset := &kops.Keyset{}
 | 
				
			||||||
	keyset.Name = NamePrefix + name
 | 
						keyset.Name = NamePrefix + name
 | 
				
			||||||
	keyset.Spec.Type = kops.SecretTypeSecret
 | 
						keyset.Spec.Type = kops.SecretTypeSecret
 | 
				
			||||||
| 
						 | 
					@ -221,5 +236,8 @@ func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Ke
 | 
				
			||||||
		PrivateMaterial: s.Data,
 | 
							PrivateMaterial: s.Data,
 | 
				
			||||||
	})
 | 
						})
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if replace {
 | 
				
			||||||
 | 
							return c.clientset.Keysets(c.namespace).Update(keyset)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	return c.clientset.Keysets(c.namespace).Create(keyset)
 | 
						return c.clientset.Keysets(c.namespace).Create(keyset)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -127,7 +127,7 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se
 | 
				
			||||||
			return nil, false, err
 | 
								return nil, false, err
 | 
				
			||||||
		}
 | 
							}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
		err = c.createSecret(secret, p, acl)
 | 
							err = c.createSecret(secret, p, acl, false)
 | 
				
			||||||
		if err != nil {
 | 
							if err != nil {
 | 
				
			||||||
			if os.IsExist(err) && i == 0 {
 | 
								if os.IsExist(err) && i == 0 {
 | 
				
			||||||
				glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation.  Will retry")
 | 
									glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation.  Will retry")
 | 
				
			||||||
| 
						 | 
					@ -151,6 +151,27 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se
 | 
				
			||||||
	return s, true, nil
 | 
						return s, true, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					func (c *VFSSecretStore) ReplaceSecret(id string, secret *fi.Secret) (*fi.Secret, error) {
 | 
				
			||||||
 | 
						p := c.buildSecretPath(id)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						acl, err := acls.GetACL(p, c.cluster)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, err
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						err = c.createSecret(secret, p, acl, true)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("unable to write secret: %v", err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						// Confirm the secret exists
 | 
				
			||||||
 | 
						s, err := c.loadSecret(p)
 | 
				
			||||||
 | 
						if err != nil {
 | 
				
			||||||
 | 
							return nil, fmt.Errorf("unable to load secret immmediately after creation %v: %v", p, err)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
 | 
						return s, nil
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
 | 
					func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
 | 
				
			||||||
	data, err := p.ReadFile()
 | 
						data, err := p.ReadFile()
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
| 
						 | 
					@ -166,11 +187,15 @@ func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
 | 
				
			||||||
	return s, nil
 | 
						return s, nil
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
// createSecret writes the secret, but only if it does not exists
 | 
					// createSecret will create the Secret, overwriting an existing secret if replace is true
 | 
				
			||||||
func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL) error {
 | 
					func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL, replace bool) error {
 | 
				
			||||||
	data, err := json.Marshal(s)
 | 
						data, err := json.Marshal(s)
 | 
				
			||||||
	if err != nil {
 | 
						if err != nil {
 | 
				
			||||||
		return fmt.Errorf("error serializing secret: %v", err)
 | 
							return fmt.Errorf("error serializing secret: %v", err)
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
						if replace {
 | 
				
			||||||
 | 
							return p.WriteFile(data, acl)
 | 
				
			||||||
 | 
						}
 | 
				
			||||||
	return p.CreateFile(data, acl)
 | 
						return p.CreateFile(data, acl)
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue