Merge remote-tracking branch 'upstream/master' into additional_cidr

This commit is contained in:
Rodrigo Menezes 2017-11-27 23:30:42 -08:00
commit 2594d382ae
74 changed files with 1332 additions and 342 deletions

View File

@ -587,7 +587,7 @@ kops-server-push: kops-server-build
.PHONY: bazel-test .PHONY: bazel-test
bazel-test: bazel-test:
bazel ${BAZEL_OPTIONS} test //cmd/... //pkg/... //channels/... //nodeup/... //channels/... //protokube/... //dns-controller/... //upup/... //util/... //hack:verify-all --test_output=errors bazel ${BAZEL_OPTIONS} test //cmd/... //pkg/... //channels/... //nodeup/... //channels/... //protokube/... //dns-controller/... //tests/... //upup/... //util/... //hack:verify-all --test_output=errors
.PHONY: bazel-build .PHONY: bazel-build
bazel-build: bazel-build:

2
OWNERS
View File

@ -5,3 +5,5 @@ approvers:
- zmerlynn - zmerlynn
- andrewsykim - andrewsykim
- geojaz - geojaz
- kashifsaadat
- gambol99

View File

@ -13,6 +13,9 @@ spec:
- name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2017-07-28 - name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2017-07-28
providerID: aws providerID: aws
kubernetesVersion: ">=1.7.0" kubernetesVersion: ">=1.7.0"
- name: kope.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2017-11-27
providerID: aws
kubernetesVersion: ">=1.8.0"
- providerID: gce - providerID: gce
name: "cos-cloud/cos-stable-60-9592-90-0" name: "cos-cloud/cos-stable-60-9592-90-0"
cluster: cluster:
@ -21,13 +24,13 @@ spec:
kubenet: {} kubenet: {}
kubernetesVersions: kubernetesVersions:
- range: ">=1.8.0" - range: ">=1.8.0"
recommendedVersion: 1.8.3 recommendedVersion: 1.8.4
requiredVersion: 1.8.0 requiredVersion: 1.8.0
- range: ">=1.7.0" - range: ">=1.7.0"
recommendedVersion: 1.7.10 recommendedVersion: 1.7.11
requiredVersion: 1.7.0 requiredVersion: 1.7.0
- range: ">=1.6.0" - range: ">=1.6.0"
recommendedVersion: 1.6.11 recommendedVersion: 1.6.13
requiredVersion: 1.6.0 requiredVersion: 1.6.0
- range: ">=1.5.0" - range: ">=1.5.0"
recommendedVersion: 1.5.8 recommendedVersion: 1.5.8
@ -39,15 +42,15 @@ spec:
- range: ">=1.8.0-alpha.1" - range: ">=1.8.0-alpha.1"
recommendedVersion: 1.8.0-beta.1 recommendedVersion: 1.8.0-beta.1
#requiredVersion: 1.8.0 #requiredVersion: 1.8.0
kubernetesVersion: 1.8.3 kubernetesVersion: 1.8.4
- range: ">=1.7.0-alpha.1" - range: ">=1.7.0-alpha.1"
recommendedVersion: 1.7.1 recommendedVersion: 1.7.1
#requiredVersion: 1.7.0 #requiredVersion: 1.7.0
kubernetesVersion: 1.7.10 kubernetesVersion: 1.7.11
- range: ">=1.6.0-alpha.1" - range: ">=1.6.0-alpha.1"
#recommendedVersion: 1.6.0 #recommendedVersion: 1.6.0
#requiredVersion: 1.6.0 #requiredVersion: 1.6.0
kubernetesVersion: 1.6.11 kubernetesVersion: 1.6.13
- range: ">=1.5.0-alpha1" - range: ">=1.5.0-alpha1"
recommendedVersion: 1.5.1 recommendedVersion: 1.5.1
#requiredVersion: 1.5.1 #requiredVersion: 1.5.1

View File

@ -24,10 +24,10 @@ spec:
recommendedVersion: 1.7.10 recommendedVersion: 1.7.10
requiredVersion: 1.7.0 requiredVersion: 1.7.0
- range: ">=1.6.0" - range: ">=1.6.0"
recommendedVersion: 1.6.7 recommendedVersion: 1.6.11
requiredVersion: 1.6.0 requiredVersion: 1.6.0
- range: ">=1.5.0" - range: ">=1.5.0"
recommendedVersion: 1.5.7 recommendedVersion: 1.5.8
requiredVersion: 1.5.1 requiredVersion: 1.5.1
- range: "<1.5.0" - range: "<1.5.0"
recommendedVersion: 1.4.12 recommendedVersion: 1.4.12
@ -40,11 +40,11 @@ spec:
- range: ">=1.6.0-alpha.1" - range: ">=1.6.0-alpha.1"
#recommendedVersion: 1.6.0 #recommendedVersion: 1.6.0
#requiredVersion: 1.6.0 #requiredVersion: 1.6.0
kubernetesVersion: 1.6.7 kubernetesVersion: 1.6.11
- range: ">=1.5.0-alpha1" - range: ">=1.5.0-alpha1"
recommendedVersion: 1.5.1 recommendedVersion: 1.5.1
#requiredVersion: 1.5.1 #requiredVersion: 1.5.1
kubernetesVersion: 1.5.7 kubernetesVersion: 1.5.8
- range: "<1.5.0" - range: "<1.5.0"
recommendedVersion: 1.4.4 recommendedVersion: 1.4.4
#requiredVersion: 1.4.4 #requiredVersion: 1.4.4

View File

@ -40,6 +40,9 @@ var (
# Create an new docker config. # Create an new docker config.
kops create secret dockerconfig -f /path/to/docker/config.json \ kops create secret dockerconfig -f /path/to/docker/config.json \
--name k8s-cluster.example.com --state s3://example.com --name k8s-cluster.example.com --state s3://example.com
# Replace an existing docker config secret.
kops create secret dockerconfig -f /path/to/docker/config.json --force \
--name k8s-cluster.example.com --state s3://example.com
`)) `))
create_secret_dockerconfig_short = i18n.T(`Create a docker config.`) create_secret_dockerconfig_short = i18n.T(`Create a docker config.`)
@ -48,6 +51,7 @@ var (
type CreateSecretDockerConfigOptions struct { type CreateSecretDockerConfigOptions struct {
ClusterName string ClusterName string
DockerConfigPath string DockerConfigPath string
Force bool
} }
func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Command { func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Command {
@ -78,6 +82,7 @@ func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Comma
} }
cmd.Flags().StringVarP(&options.DockerConfigPath, "", "f", "", "Path to docker config JSON file") cmd.Flags().StringVarP(&options.DockerConfigPath, "", "f", "", "Path to docker config JSON file")
cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists")
return cmd return cmd
} }
@ -119,9 +124,19 @@ func RunCreateSecretDockerConfig(f *util.Factory, out io.Writer, options *Create
secret.Data = data secret.Data = data
_, _, err = secretStore.GetOrCreateSecret("dockerconfig", secret) if !options.Force {
_, created, err := secretStore.GetOrCreateSecret("dockerconfig", secret)
if err != nil { if err != nil {
return fmt.Errorf("error adding docker config secret: %v", err) return fmt.Errorf("error adding dockerconfig secret: %v", err)
}
if !created {
return fmt.Errorf("failed to create the dockerconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.")
}
} else {
_, err := secretStore.ReplaceSecret("dockerconfig", secret)
if err != nil {
return fmt.Errorf("error updating dockerconfig secret: %v", err)
}
} }
return nil return nil

View File

@ -40,6 +40,9 @@ var (
# Create a new encryption config. # Create a new encryption config.
kops create secret encryptionconfig -f config.yaml \ kops create secret encryptionconfig -f config.yaml \
--name k8s-cluster.example.com --state s3://example.com --name k8s-cluster.example.com --state s3://example.com
# Replace an existing encryption config secret.
kops create secret encryptionconfig -f config.yaml --force \
--name k8s-cluster.example.com --state s3://example.com
`)) `))
create_secret_encryptionconfig_short = i18n.T(`Create an encryption config.`) create_secret_encryptionconfig_short = i18n.T(`Create an encryption config.`)
@ -48,6 +51,7 @@ var (
type CreateSecretEncryptionConfigOptions struct { type CreateSecretEncryptionConfigOptions struct {
ClusterName string ClusterName string
EncryptionConfigPath string EncryptionConfigPath string
Force bool
} }
func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.Command { func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.Command {
@ -78,6 +82,7 @@ func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.C
} }
cmd.Flags().StringVarP(&options.EncryptionConfigPath, "", "f", "", "Path to encryption config yaml file") cmd.Flags().StringVarP(&options.EncryptionConfigPath, "", "f", "", "Path to encryption config yaml file")
cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists")
return cmd return cmd
} }
@ -120,9 +125,19 @@ func RunCreateSecretEncryptionConfig(f *util.Factory, out io.Writer, options *Cr
secret.Data = data secret.Data = data
_, _, err = secretStore.GetOrCreateSecret("encryptionconfig", secret) if !options.Force {
_, created, err := secretStore.GetOrCreateSecret("encryptionconfig", secret)
if err != nil { if err != nil {
return fmt.Errorf("error adding encryption config secret: %v", err) return fmt.Errorf("error adding encryptionconfig secret: %v", err)
}
if !created {
return fmt.Errorf("failed to create the encryptionconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.")
}
} else {
_, err := secretStore.ReplaceSecret("encryptionconfig", secret)
if err != nil {
return fmt.Errorf("error updating encryptionconfig secret: %v", err)
}
} }
return nil return nil

34
docs/authentication.md Normal file
View File

@ -0,0 +1,34 @@
# Authentication
Kops has support for configuring authentication systems. This support is
currently highly experimental, and should not be used with kubernetes versions
before 1.8.5 because of a serious bug with apimachinery (#55022)[https://github.com/kubernetes/kubernetes/issues/55022].
## kopeio authentication
If you want to experiment with kopeio authentication, you can use
`--authentication kopeio`. However please be aware that kopeio authentication
has not yet been formally released, and thus there is not a lot of upstream
documentation.
Alternatively, you can add this block to your cluster:
```
authentication:
kopeio: {}
```
For example:
```
apiVersion: kops/v1alpha2
kind: Cluster
metadata:
name: cluster.example.com
spec:
authentication:
kopeio: {}
authorization:
rbac: {}
```

View File

@ -20,12 +20,16 @@ kops create secret dockerconfig
# Create an new docker config. # Create an new docker config.
kops create secret dockerconfig -f /path/to/docker/config.json \ kops create secret dockerconfig -f /path/to/docker/config.json \
--name k8s-cluster.example.com --state s3://example.com --name k8s-cluster.example.com --state s3://example.com
# Replace an existing docker config secret.
kops create secret dockerconfig -f /path/to/docker/config.json --force \
--name k8s-cluster.example.com --state s3://example.com
``` ```
### Options ### Options
``` ```
-f, -- string Path to docker config JSON file -f, -- string Path to docker config JSON file
--force Force replace the kops secret if it already exists
``` ```
### Options inherited from parent commands ### Options inherited from parent commands

View File

@ -20,12 +20,16 @@ kops create secret encryptionconfig
# Create a new encryption config. # Create a new encryption config.
kops create secret encryptionconfig -f config.yaml \ kops create secret encryptionconfig -f config.yaml \
--name k8s-cluster.example.com --state s3://example.com --name k8s-cluster.example.com --state s3://example.com
# Replace an existing encryption config secret.
kops create secret encryptionconfig -f config.yaml --force \
--name k8s-cluster.example.com --state s3://example.com
``` ```
### Options ### Options
``` ```
-f, -- string Path to encryption config yaml file -f, -- string Path to encryption config yaml file
--force Force replace the kops secret if it already exists
``` ```
### Options inherited from parent commands ### Options inherited from parent commands

View File

@ -1,6 +1,6 @@
# Cluster Templating # Cluster Templating
The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [/cli/kops_replace.md](/cli/kops_replace.md)). The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [cli/kops_replace.md](cli/kops_replace.md)).
It is possible to generate that yaml file from a template, using the command `kops toolbox template` (see [cli/kops_toolbox_template.md](cli/kops_toolbox_template.md)). It is possible to generate that yaml file from a template, using the command `kops toolbox template` (see [cli/kops_toolbox_template.md](cli/kops_toolbox_template.md)).
@ -45,7 +45,7 @@ Running `kops toolbox template` replaces the placeholders in the template by val
Note: when creating a cluster desired configuration template, you can Note: when creating a cluster desired configuration template, you can
- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cli/cluster_spec.md](cli/cluster_spec.md). - use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cluster_spec.md](cluster_spec.md).
- replace values by placeholders in that file to create the template. - replace values by placeholders in that file to create the template.
### Templates ### Templates

View File

@ -1,3 +1,26 @@
# Installing Kops via Hombrew
Homebrew makes installing kops [very simple for MacOS.](../install.md)
```bash
brew update && brew install kops
```
Development Releases and master can also be installed via Homebrew very easily:
```bash
# Development Release
brew update && brew install kops --devel
# HEAD of master
brew update && brew install kops --HEAD
```
Note: if you already have kops installed, you need to substitute `upgrade` for `install`.
You can switch between development and stable releases with:
```bash
brew switch kops 1.7.1
brew switch kops 1.8.0-beta.1
```
# Releasing kops to Brew # Releasing kops to Brew
Submitting a new release of kops to Homebrew is very simple. Submitting a new release of kops to Homebrew is very simple.
@ -8,11 +31,20 @@ Submitting a new release of kops to Homebrew is very simple.
This will automatically update the provided fields and open a PR for you. This will automatically update the provided fields and open a PR for you.
More details on this script are located [here.](https://github.com/Homebrew/brew/blob/master/Library/Homebrew/dev-cmd/bump-formula-pr.rb) More details on this script are located [here.](https://github.com/Homebrew/brew/blob/master/Library/Homebrew/dev-cmd/bump-formula-pr.rb)
We now include both major and development releases in homebrew. A development version can be updated by adding the `--devel` flag.
Example usage: Example usage:
``` ```bash
# Major Version
brew bump-formula-pr \ brew bump-formula-pr \
--url=https://github.com/kubernetes/kops/archive/1.7.1.tar.gz \ --url=https://github.com/kubernetes/kops/archive/1.7.1.tar.gz \
--sha256=044c5c7a737ed3acf53517e64bb27d3da8f7517d2914df89efeeaf84bc8a722a --sha256=044c5c7a737ed3acf53517e64bb27d3da8f7517d2914df89efeeaf84bc8a722a
# Development Version
brew bump-formula-pr \
--devel \
--url=https://github.com/kubernetes/kops/archive/1.8.0-beta.1.tar.gz \
--sha256=81026d6c1cd7b3898a88275538a7842b4bd8387775937e0528ccb7b83948abf1
``` ```
* Update the URL variable to the tar.gz of the new release source code * Update the URL variable to the tar.gz of the new release source code

View File

@ -8,6 +8,8 @@ From Homebrew:
brew update && brew install kops brew update && brew install kops
``` ```
Developers can also easily install [development releases](development/homebrew.md).
From Github: From Github:
```bash ```bash

View File

@ -14,6 +14,13 @@ or `--networking flannel-udp` can be specified to explicitly choose a backend mo
See the *Changes to k8s-policy* section in the See the *Changes to k8s-policy* section in the
[Calico release notes](https://github.com/projectcalico/calico/releases/tag/v2.4.0) [Calico release notes](https://github.com/projectcalico/calico/releases/tag/v2.4.0)
for help. for help.
* Due to `ThirdPartyResources` becoming fully deprecated in Kubernetes v1.8 (replaced by `CustomResourceDefinitions`), existing Canal users upgrading their Clusters to Kubernetes v1.8 must follow the below TPR->CRD migration steps:
1. Run: `kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v2.6.2/upgrade/v2.5/manifests/upgrade-job.yaml`
2. Retrieve the pod name from describing the job: `kubectl describe job/calico-upgrade-v2.5`
3. Validate the last log line from the pod reports that it completed successfully: `kubectl logs calico-upgrade-v2.5-<random-id>`
4. Update the `KubernetesVersion` within your ClusterSpec to v1.8 (or above), performing an update & rolling-update to all nodes (will involve downtime)
5. Confirm cluster is back up and all canal pods are running successfully: `kops validate cluster` (this may take a few minutes for the cluster to fully validate)
6. Delete the upgrade job as it is no longer required: `kubectl delete job calico-upgrade-v2.5` (you can also safely delete the `clusterrole`, `clusterrolebinding` and `serviceaccount` resources that were created by the above manifest file)
# Full changelist # Full changelist

View File

@ -1,13 +1,33 @@
## How to update Kops - Kubernetes Ops # Updating kops (Binaries)
Update the latest source code from kubernetes/kops ## MacOS
``` From Homebrew:
cd ${GOPATH}/src/k8s.io/kops/
git pull && make
```
Alternatively, if you installed from Homebrew ```bash
```
brew update && brew upgrade kops brew update && brew upgrade kops
``` ```
From Github:
```bash
rm -rf /usr/local/bin/kops
wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64
chmod +x ./kops
sudo mv ./kops /usr/local/bin/
```
You can also rerun rerun [these steps](development/building.md) if previously built from source.
## Linux
From Github:
```bash
rm -rf /usr/local/bin/kops
wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
chmod +x ./kops
sudo mv ./kops /usr/local/bin/
```
You can also rerun rerun [these steps](development/building.md) if previously built from source.

View File

@ -123,6 +123,7 @@ k8s.io/kops/upup/pkg/fi/cloudup/dotasks
k8s.io/kops/upup/pkg/fi/cloudup/gce k8s.io/kops/upup/pkg/fi/cloudup/gce
k8s.io/kops/upup/pkg/fi/cloudup/gcetasks k8s.io/kops/upup/pkg/fi/cloudup/gcetasks
k8s.io/kops/upup/pkg/fi/cloudup/openstack k8s.io/kops/upup/pkg/fi/cloudup/openstack
k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks
k8s.io/kops/upup/pkg/fi/cloudup/terraform k8s.io/kops/upup/pkg/fi/cloudup/terraform
k8s.io/kops/upup/pkg/fi/cloudup/vsphere k8s.io/kops/upup/pkg/fi/cloudup/vsphere
k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks

View File

@ -40,7 +40,7 @@ func (f *FileAssetsBuilder) Build(c *fi.ModelBuilderContext) error {
// used to keep track of previous file, so a instanceGroup can override a cluster wide one // used to keep track of previous file, so a instanceGroup can override a cluster wide one
tracker := make(map[string]bool, 0) tracker := make(map[string]bool, 0)
// ensure the default path exists // ensure the default path exists
c.AddTask(&nodetasks.File{ c.EnsureTask(&nodetasks.File{
Path: f.FileAssetsDefaultPath(), Path: f.FileAssetsDefaultPath(),
Type: nodetasks.FileType_Directory, Type: nodetasks.FileType_Directory,
Mode: s("0755"), Mode: s("0755"),
@ -88,8 +88,8 @@ func (f *FileAssetsBuilder) buildFileAssets(c *fi.ModelBuilderContext, assets []
content = string(decoded) content = string(decoded)
} }
// @check if the directory structure exist or create it // We use EnsureTask so that we don't have to check if the asset directories have already been done
c.AddTask(&nodetasks.File{ c.EnsureTask(&nodetasks.File{
Path: filepath.Dir(assetPath), Path: filepath.Dir(assetPath),
Type: nodetasks.FileType_Directory, Type: nodetasks.FileType_Directory,
Mode: s("0755"), Mode: s("0755"),

View File

@ -55,7 +55,7 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error {
case "": case "":
name = fmt.Sprintf("kops-hook-%d", j) name = fmt.Sprintf("kops-hook-%d", j)
if isInstanceGroup { if isInstanceGroup {
name = fmt.Sprintf("%s-ig", name) name += "-ig"
} }
default: default:
name = hook.Name name = hook.Name
@ -72,7 +72,7 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error {
enabled := false enabled := false
managed := true managed := true
c.AddTask(&nodetasks.Service{ c.AddTask(&nodetasks.Service{
Name: hook.Name, Name: ensureSystemdSuffix(name),
ManageState: &managed, ManageState: &managed,
Enabled: &enabled, Enabled: &enabled,
Running: &enabled, Running: &enabled,
@ -94,6 +94,14 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error {
return nil return nil
} }
// ensureSystemdSuffix makes sure that we have a .service suffix on the name, needed on needed versions of systems
func ensureSystemdSuffix(name string) string {
if !strings.HasSuffix(name, ".service") && !strings.HasSuffix(name, ".timer") {
name += ".service"
}
return name
}
// buildSystemdService is responsible for generating the service // buildSystemdService is responsible for generating the service
func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*nodetasks.Service, error) { func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*nodetasks.Service, error) {
// perform some basic validation // perform some basic validation
@ -130,7 +138,7 @@ func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*no
} }
service := &nodetasks.Service{ service := &nodetasks.Service{
Name: name, Name: ensureSystemdSuffix(name),
Definition: s(unit.Render()), Definition: s(unit.Render()),
} }

View File

@ -244,8 +244,11 @@ type RBACAuthorizationSpec struct {
type AlwaysAllowAuthorizationSpec struct { type AlwaysAllowAuthorizationSpec struct {
} }
// AccessSpec provides configuration details related to kubeapi dns and ELB access
type AccessSpec struct { type AccessSpec struct {
// DNS wil be used to provide config on kube-apiserver elb dns
DNS *DNSAccessSpec `json:"dns,omitempty"` DNS *DNSAccessSpec `json:"dns,omitempty"`
// LoadBalancer is the configuration for the kube-apiserver ELB
LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"`
} }
@ -285,6 +288,8 @@ type KubeDNSConfig struct {
// ExternalDNSConfig are options of the dns-controller // ExternalDNSConfig are options of the dns-controller
type ExternalDNSConfig struct { type ExternalDNSConfig struct {
// Disable indicates we do not wish to run the dns-controller addon
Disable bool `json:"disable,omitempty"`
// WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources // WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources
WatchIngress *bool `json:"watchIngress,omitempty"` WatchIngress *bool `json:"watchIngress,omitempty"`
// WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries) // WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries)

View File

@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct {
// long the autoscaler has to wait before another upscale operation can // long the autoscaler has to wait before another upscale operation can
// be performed after the current one has completed. // be performed after the current one has completed.
HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"` HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
// HorizontalPodAutoscalerUseRestClients determines if the new-style clients
// should be used if support for custom metrics is enabled.
HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"`
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features. // FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"` FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
} }

View File

@ -243,8 +243,11 @@ type RBACAuthorizationSpec struct {
type AlwaysAllowAuthorizationSpec struct { type AlwaysAllowAuthorizationSpec struct {
} }
// AccessSpec provides configuration details related to kubeapi dns and ELB access
type AccessSpec struct { type AccessSpec struct {
// DNS wil be used to provide config on kube-apiserver elb dns
DNS *DNSAccessSpec `json:"dns,omitempty"` DNS *DNSAccessSpec `json:"dns,omitempty"`
// LoadBalancer is the configuration for the kube-apiserver ELB
LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"`
} }
@ -284,6 +287,8 @@ type KubeDNSConfig struct {
// ExternalDNSConfig are options of the dns-controller // ExternalDNSConfig are options of the dns-controller
type ExternalDNSConfig struct { type ExternalDNSConfig struct {
// Disable indicates we do not wish to run the dns-controller addon
Disable bool `json:"disable,omitempty"`
// WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources // WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources
WatchIngress *bool `json:"watchIngress,omitempty"` WatchIngress *bool `json:"watchIngress,omitempty"`
// WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries) // WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries)

View File

@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct {
// long the autoscaler has to wait before another upscale operation can // long the autoscaler has to wait before another upscale operation can
// be performed after the current one has completed. // be performed after the current one has completed.
HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"` HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
// HorizontalPodAutoscalerUseRestClients determines if the new-style clients
// should be used if support for custom metrics is enabled.
HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"`
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features. // FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"` FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
} }

View File

@ -1266,6 +1266,7 @@ func Convert_kops_ExecContainerAction_To_v1alpha1_ExecContainerAction(in *kops.E
} }
func autoConvert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error { func autoConvert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error {
out.Disable = in.Disable
out.WatchIngress = in.WatchIngress out.WatchIngress = in.WatchIngress
out.WatchNamespace = in.WatchNamespace out.WatchNamespace = in.WatchNamespace
return nil return nil
@ -1277,6 +1278,7 @@ func Convert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDN
} }
func autoConvert_kops_ExternalDNSConfig_To_v1alpha1_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error { func autoConvert_kops_ExternalDNSConfig_To_v1alpha1_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error {
out.Disable = in.Disable
out.WatchIngress = in.WatchIngress out.WatchIngress = in.WatchIngress
out.WatchNamespace = in.WatchNamespace out.WatchNamespace = in.WatchNamespace
return nil return nil
@ -1944,6 +1946,7 @@ func autoConvert_v1alpha1_KubeControllerManagerConfig_To_kops_KubeControllerMana
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
out.FeatureGates = in.FeatureGates out.FeatureGates = in.FeatureGates
return nil return nil
} }
@ -1979,6 +1982,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha1_KubeControllerMana
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
out.FeatureGates = in.FeatureGates out.FeatureGates = in.FeatureGates
return nil return nil
} }

View File

@ -21,11 +21,10 @@ limitations under the License.
package v1alpha1 package v1alpha1
import ( import (
reflect "reflect"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion" conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
reflect "reflect"
) )
func init() { func init() {
@ -2179,6 +2178,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
**out = **in **out = **in
} }
} }
if in.HorizontalPodAutoscalerUseRestClients != nil {
in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.FeatureGates != nil { if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))

View File

@ -244,8 +244,11 @@ type RBACAuthorizationSpec struct {
type AlwaysAllowAuthorizationSpec struct { type AlwaysAllowAuthorizationSpec struct {
} }
// AccessSpec provides configuration details related to kubeapi dns and ELB access
type AccessSpec struct { type AccessSpec struct {
// DNS wil be used to provide config on kube-apiserver elb dns
DNS *DNSAccessSpec `json:"dns,omitempty"` DNS *DNSAccessSpec `json:"dns,omitempty"`
// LoadBalancer is the configuration for the kube-apiserver ELB
LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"` LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"`
} }
@ -282,6 +285,8 @@ type KubeDNSConfig struct {
// ExternalDNSConfig are options of the dns-controller // ExternalDNSConfig are options of the dns-controller
type ExternalDNSConfig struct { type ExternalDNSConfig struct {
// Disable indicates we do not wish to run the dns-controller addon
Disable bool `json:"disable,omitempty"`
// WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources // WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources
WatchIngress *bool `json:"watchIngress,omitempty"` WatchIngress *bool `json:"watchIngress,omitempty"`
// WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries) // WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries)

View File

@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct {
// long the autoscaler has to wait before another upscale operation can // long the autoscaler has to wait before another upscale operation can
// be performed after the current one has completed. // be performed after the current one has completed.
HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"` HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
// HorizontalPodAutoscalerUseRestClients determines if the new-style clients
// should be used if support for custom metrics is enabled.
HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"`
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features. // FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"` FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
} }

View File

@ -1375,6 +1375,7 @@ func Convert_kops_ExecContainerAction_To_v1alpha2_ExecContainerAction(in *kops.E
} }
func autoConvert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error { func autoConvert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error {
out.Disable = in.Disable
out.WatchIngress = in.WatchIngress out.WatchIngress = in.WatchIngress
out.WatchNamespace = in.WatchNamespace out.WatchNamespace = in.WatchNamespace
return nil return nil
@ -1386,6 +1387,7 @@ func Convert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDN
} }
func autoConvert_kops_ExternalDNSConfig_To_v1alpha2_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error { func autoConvert_kops_ExternalDNSConfig_To_v1alpha2_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error {
out.Disable = in.Disable
out.WatchIngress = in.WatchIngress out.WatchIngress = in.WatchIngress
out.WatchNamespace = in.WatchNamespace out.WatchNamespace = in.WatchNamespace
return nil return nil
@ -2206,6 +2208,7 @@ func autoConvert_v1alpha2_KubeControllerManagerConfig_To_kops_KubeControllerMana
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
out.FeatureGates = in.FeatureGates out.FeatureGates = in.FeatureGates
return nil return nil
} }
@ -2241,6 +2244,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha2_KubeControllerMana
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
out.FeatureGates = in.FeatureGates out.FeatureGates = in.FeatureGates
return nil return nil
} }

View File

@ -21,11 +21,10 @@ limitations under the License.
package v1alpha2 package v1alpha2
import ( import (
reflect "reflect"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion" conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
reflect "reflect"
) )
func init() { func init() {
@ -2305,6 +2304,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
**out = **in **out = **in
} }
} }
if in.HorizontalPodAutoscalerUseRestClients != nil {
in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.FeatureGates != nil { if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))

View File

@ -483,9 +483,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
} }
} }
if kubernetesRelease.LT(semver.MustParse("1.6.0")) { if kubernetesRelease.LT(semver.MustParse("1.7.0")) {
if c.Spec.Networking != nil && c.Spec.Networking.Romana != nil { if c.Spec.Networking != nil && c.Spec.Networking.Romana != nil {
return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.5 or lower") return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.6 or lower")
} }
} }

View File

@ -21,11 +21,10 @@ limitations under the License.
package kops package kops
import ( import (
reflect "reflect"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
conversion "k8s.io/apimachinery/pkg/conversion" conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
reflect "reflect"
) )
func init() { func init() {
@ -2524,6 +2523,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
**out = **in **out = **in
} }
} }
if in.HorizontalPodAutoscalerUseRestClients != nil {
in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients
if *in == nil {
*out = nil
} else {
*out = new(bool)
**out = **in
}
}
if in.FeatureGates != nil { if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]string, len(*in)) *out = make(map[string]string, len(*in))

View File

@ -17,8 +17,6 @@ limitations under the License.
package scheme package scheme
import ( import (
os "os"
announced "k8s.io/apimachinery/pkg/apimachinery/announced" announced "k8s.io/apimachinery/pkg/apimachinery/announced"
registered "k8s.io/apimachinery/pkg/apimachinery/registered" registered "k8s.io/apimachinery/pkg/apimachinery/registered"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -26,6 +24,7 @@ import (
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer" serializer "k8s.io/apimachinery/pkg/runtime/serializer"
kops "k8s.io/kops/pkg/apis/kops/install" kops "k8s.io/kops/pkg/apis/kops/install"
os "os"
) )
var Scheme = runtime.NewScheme() var Scheme = runtime.NewScheme()

View File

@ -17,8 +17,6 @@ limitations under the License.
package scheme package scheme
import ( import (
os "os"
announced "k8s.io/apimachinery/pkg/apimachinery/announced" announced "k8s.io/apimachinery/pkg/apimachinery/announced"
registered "k8s.io/apimachinery/pkg/apimachinery/registered" registered "k8s.io/apimachinery/pkg/apimachinery/registered"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -26,6 +24,7 @@ import (
schema "k8s.io/apimachinery/pkg/runtime/schema" schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer" serializer "k8s.io/apimachinery/pkg/runtime/serializer"
kops "k8s.io/kops/pkg/apis/kops/install" kops "k8s.io/kops/pkg/apis/kops/install"
os "os"
) )
var Scheme = runtime.NewScheme() var Scheme = runtime.NewScheme()

View File

@ -36,6 +36,8 @@ go_library(
"//upup/pkg/fi/cloudup/dotasks:go_default_library", "//upup/pkg/fi/cloudup/dotasks:go_default_library",
"//upup/pkg/fi/cloudup/gce:go_default_library", "//upup/pkg/fi/cloudup/gce:go_default_library",
"//upup/pkg/fi/cloudup/gcetasks:go_default_library", "//upup/pkg/fi/cloudup/gcetasks:go_default_library",
"//upup/pkg/fi/cloudup/openstack:go_default_library",
"//upup/pkg/fi/cloudup/openstacktasks:go_default_library",
"//upup/pkg/fi/fitasks:go_default_library", "//upup/pkg/fi/fitasks:go_default_library",
"//util/pkg/vfs:go_default_library", "//util/pkg/vfs:go_default_library",
"//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/blang/semver:go_default_library",

View File

@ -52,11 +52,9 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error {
} }
func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error { func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error {
name := "nodes." + b.ClusterName()
{ {
t := &awstasks.SecurityGroup{ t := &awstasks.SecurityGroup{
Name: s(name), Name: s(b.SecurityGroupName(kops.InstanceGroupRoleNode)),
Lifecycle: b.Lifecycle, Lifecycle: b.Lifecycle,
VPC: b.LinkToVPC(), VPC: b.LinkToVPC(),
Description: s("Security group for nodes"), Description: s("Security group for nodes"),
@ -211,7 +209,16 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
// TODO: Make less hacky // TODO: Make less hacky
// TODO: Fix management - we need a wildcard matcher now // TODO: Fix management - we need a wildcard matcher now
tcpRanges := []portRange{{From: 1, To: 4000}, {From: 4003, To: 65535}} tcpBlocked := make(map[int]bool)
// Don't allow nodes to access etcd client port
tcpBlocked[4001] = true
tcpBlocked[4002] = true
// Don't allow nodes to access etcd peer port
tcpBlocked[2380] = true
tcpBlocked[2381] = true
udpRanges := []portRange{{From: 1, To: 65535}} udpRanges := []portRange{{From: 1, To: 65535}}
protocols := []Protocol{} protocols := []Protocol{}
@ -219,14 +226,14 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
// Calico needs to access etcd // Calico needs to access etcd
// TODO: Remove, replace with etcd in calico manifest // TODO: Remove, replace with etcd in calico manifest
glog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.") glog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.")
tcpRanges = []portRange{{From: 1, To: 4001}, {From: 4003, To: 65535}} tcpBlocked[4001] = false
protocols = append(protocols, ProtocolIPIP) protocols = append(protocols, ProtocolIPIP)
} }
if b.Cluster.Spec.Networking.Romana != nil { if b.Cluster.Spec.Networking.Romana != nil {
// Romana needs to access etcd // Romana needs to access etcd
glog.Warningf("Opening etcd port on masters for access from the nodes, for romana. This is unsafe in untrusted environments.") glog.Warningf("Opening etcd port on masters for access from the nodes, for romana. This is unsafe in untrusted environments.")
tcpRanges = []portRange{{From: 1, To: 4001}, {From: 4003, To: 65535}} tcpBlocked[4001] = false
protocols = append(protocols, ProtocolIPIP) protocols = append(protocols, ProtocolIPIP)
} }
@ -245,6 +252,21 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
Protocol: s("udp"), Protocol: s("udp"),
}) })
} }
tcpRanges := []portRange{
{From: 1, To: 0},
}
for port := 1; port < 65536; port++ {
previous := &tcpRanges[len(tcpRanges)-1]
if !tcpBlocked[port] {
if (previous.To + 1) == port {
previous.To = port
} else {
tcpRanges = append(tcpRanges, portRange{From: port, To: port})
}
}
}
for _, r := range tcpRanges { for _, r := range tcpRanges {
c.AddTask(&awstasks.SecurityGroupRule{ c.AddTask(&awstasks.SecurityGroupRule{
Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d", r.From, r.To)), Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d", r.From, r.To)),
@ -277,18 +299,19 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
} }
func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error { func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error {
name := "masters." + b.ClusterName()
{ {
t := &awstasks.SecurityGroup{ t := &awstasks.SecurityGroup{
Name: s(name), Name: s(b.SecurityGroupName(kops.InstanceGroupRoleMaster)),
Lifecycle: b.Lifecycle, Lifecycle: b.Lifecycle,
VPC: b.LinkToVPC(), VPC: b.LinkToVPC(),
Description: s("Security group for masters"), Description: s("Security group for masters"),
RemoveExtraRules: []string{ RemoveExtraRules: []string{
"port=22", // SSH "port=22", // SSH
"port=443", // k8s api "port=443", // k8s api
"port=4001", // etcd main (etcd events is 4002) "port=2380", // etcd main peer
"port=2381", // etcd events peer
"port=4001", // etcd main
"port=4002", // etcd events
"port=4789", // VXLAN "port=4789", // VXLAN
"port=179", // Calico "port=179", // Calico

View File

@ -30,6 +30,8 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks" "k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
"k8s.io/kops/upup/pkg/fi/cloudup/gce" "k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks" "k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
"k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks"
) )
const ( const (
@ -95,6 +97,11 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error {
b.addVSphereVolume(c, name, volumeSize, zone, etcd, m, allMembers) b.addVSphereVolume(c, name, volumeSize, zone, etcd, m, allMembers)
case kops.CloudProviderBareMetal: case kops.CloudProviderBareMetal:
glog.Fatalf("BareMetal not implemented") glog.Fatalf("BareMetal not implemented")
case kops.CloudProviderOpenstack:
err = b.addOpenstackVolume(c, name, volumeSize, zone, etcd, m, allMembers)
if err != nil {
return err
}
default: default:
return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider) return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider)
} }
@ -205,3 +212,33 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, name strin
func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) { func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) {
fmt.Print("addVSphereVolume to be implemented") fmt.Print("addVSphereVolume to be implemented")
} }
func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) error {
volumeType := fi.StringValue(m.VolumeType)
if volumeType == "" {
return fmt.Errorf("must set ETCDMemberSpec.VolumeType on Openstack platform")
}
// The tags are how protokube knows to mount the volume and use it for etcd
tags := make(map[string]string)
// Apply all user defined labels on the volumes
for k, v := range b.Cluster.Spec.CloudLabels {
tags[k] = v
}
// This is the configuration of the etcd cluster
tags[openstack.TagNameEtcdClusterPrefix+etcd.Name] = m.Name + "/" + strings.Join(allMembers, ",")
// This says "only mount on a master"
tags[openstack.TagNameRolePrefix+"master"] = "1"
t := &openstacktasks.Volume{
Name: s(name),
AvailabilityZone: s(zone),
VolumeType: s(volumeType),
SizeGB: fi.Int64(int64(volumeSize)),
Tags: tags,
Lifecycle: b.Lifecycle,
}
c.AddTask(t)
return nil
}

View File

@ -220,7 +220,7 @@ func addUntaggedRouteTables(cloud awsup.AWSCloud, clusterName string, resources
continue continue
} }
t := buildTrackerForRouteTable(rt) t := buildTrackerForRouteTable(rt, clusterName)
if resources[t.Type+":"+t.ID] == nil { if resources[t.Type+":"+t.ID] == nil {
resources[t.Type+":"+t.ID] = t resources[t.Type+":"+t.ID] = t
} }
@ -973,19 +973,20 @@ func ListRouteTables(cloud fi.Cloud, clusterName string) ([]*Resource, error) {
var resourceTrackers []*Resource var resourceTrackers []*Resource
for _, rt := range routeTables { for _, rt := range routeTables {
resourceTracker := buildTrackerForRouteTable(rt) resourceTracker := buildTrackerForRouteTable(rt, clusterName)
resourceTrackers = append(resourceTrackers, resourceTracker) resourceTrackers = append(resourceTrackers, resourceTracker)
} }
return resourceTrackers, nil return resourceTrackers, nil
} }
func buildTrackerForRouteTable(rt *ec2.RouteTable) *Resource { func buildTrackerForRouteTable(rt *ec2.RouteTable, clusterName string) *Resource {
resourceTracker := &Resource{ resourceTracker := &Resource{
Name: FindName(rt.Tags), Name: FindName(rt.Tags),
ID: aws.StringValue(rt.RouteTableId), ID: aws.StringValue(rt.RouteTableId),
Type: ec2.ResourceTypeRouteTable, Type: ec2.ResourceTypeRouteTable,
Deleter: DeleteRouteTable, Deleter: DeleteRouteTable,
Shared: HasSharedTag(ec2.ResourceTypeRouteTable+":"+*rt.RouteTableId, rt.Tags, clusterName),
} }
var blocks []string var blocks []string

View File

@ -88,3 +88,55 @@ func TestAddUntaggedRouteTables(t *testing.T) {
t.Fatalf("expected=%q, actual=%q", expected, keys) t.Fatalf("expected=%q, actual=%q", expected, keys)
} }
} }
func TestListRouteTables(t *testing.T) {
cloud := awsup.BuildMockAWSCloud("us-east-1", "abc")
//resources := make(map[string]*Resource)
clusterName := "me.example.com"
ownershipTagKey := "kubernetes.io/cluster/" + clusterName
c := &mockec2.MockEC2{}
cloud.MockEC2 = c
c.RouteTables = append(c.RouteTables, &ec2.RouteTable{
VpcId: aws.String("vpc-1234"),
RouteTableId: aws.String("rt-shared"),
Tags: []*ec2.Tag{
{
Key: aws.String("KubernetesCluster"),
Value: aws.String(clusterName),
},
{
Key: aws.String(ownershipTagKey),
Value: aws.String("shared"),
},
},
})
c.RouteTables = append(c.RouteTables, &ec2.RouteTable{
VpcId: aws.String("vpc-1234"),
RouteTableId: aws.String("rt-owned"),
Tags: []*ec2.Tag{
{
Key: aws.String("KubernetesCluster"),
Value: aws.String(clusterName),
},
{
Key: aws.String(ownershipTagKey),
Value: aws.String("owned"),
},
},
})
resources, err := ListRouteTables(cloud, clusterName)
if err != nil {
t.Fatalf("error listing route tables: %v", err)
}
for _, rt := range resources {
if rt.ID == "rt-shared" && !rt.Shared {
t.Fatalf("expected Shared: true, got: %v", rt.Shared)
}
if rt.ID == "rt-owned" && rt.Shared {
t.Fatalf("expected Shared: false, got: %v", rt.Shared)
}
}
}

View File

@ -3,9 +3,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["integration_test.go"], srcs = ["integration_test.go"],
data = [
"exported_testdata", # keep
"//channels:channeldata", # keep
],
importpath = "k8s.io/kops/tests/integration/channel", importpath = "k8s.io/kops/tests/integration/channel",
deps = [ deps = [
"//pkg/apis/kops:go_default_library", "//pkg/apis/kops:go_default_library",
"//vendor/github.com/blang/semver:go_default_library", "//vendor/github.com/blang/semver:go_default_library",
], ],
) )
filegroup(
name = "exported_testdata",
srcs = glob(["simple/**"]),
visibility = ["//visibility:public"],
)

View File

@ -3,6 +3,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test")
go_test( go_test(
name = "go_default_test", name = "go_default_test",
srcs = ["integration_test.go"], srcs = ["integration_test.go"],
data = [
"exported_testdata", # keep
],
importpath = "k8s.io/kops/tests/integration/conversion", importpath = "k8s.io/kops/tests/integration/conversion",
deps = [ deps = [
"//pkg/apis/kops:go_default_library", "//pkg/apis/kops:go_default_library",
@ -14,3 +17,9 @@ go_test(
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
], ],
) )
filegroup(
name = "exported_testdata",
srcs = glob(["minimal/**"]),
visibility = ["//visibility:public"],
)

View File

@ -266,7 +266,7 @@
"CidrIp": "0.0.0.0/0" "CidrIp": "0.0.0.0/0"
} }
}, },
"AWSEC2SecurityGroupIngressnodetomastertcp14000": { "AWSEC2SecurityGroupIngressnodetomastertcp12379": {
"Type": "AWS::EC2::SecurityGroupIngress", "Type": "AWS::EC2::SecurityGroupIngress",
"Properties": { "Properties": {
"GroupId": { "GroupId": {
@ -276,6 +276,20 @@
"Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom" "Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom"
}, },
"FromPort": 1, "FromPort": 1,
"ToPort": 2379,
"IpProtocol": "tcp"
}
},
"AWSEC2SecurityGroupIngressnodetomastertcp23824000": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "AWSEC2SecurityGroupmastersadditionaluserdataexamplecom"
},
"SourceSecurityGroupId": {
"Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom"
},
"FromPort": 2382,
"ToPort": 4000, "ToPort": 4000,
"IpProtocol": "tcp" "IpProtocol": "tcp"
} }

View File

@ -339,11 +339,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-complex-example-com.id}" security_group_id = "${aws_security_group.masters-complex-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}" source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-complex-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -481,11 +481,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}" security_group_id = "${aws_security_group.masters-ha-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}" source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -250,11 +250,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}" security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -311,11 +311,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}" security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}" source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -266,7 +266,7 @@
"CidrIp": "0.0.0.0/0" "CidrIp": "0.0.0.0/0"
} }
}, },
"AWSEC2SecurityGroupIngressnodetomastertcp14000": { "AWSEC2SecurityGroupIngressnodetomastertcp12379": {
"Type": "AWS::EC2::SecurityGroupIngress", "Type": "AWS::EC2::SecurityGroupIngress",
"Properties": { "Properties": {
"GroupId": { "GroupId": {
@ -276,6 +276,20 @@
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom" "Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
}, },
"FromPort": 1, "FromPort": 1,
"ToPort": 2379,
"IpProtocol": "tcp"
}
},
"AWSEC2SecurityGroupIngressnodetomastertcp23824000": {
"Type": "AWS::EC2::SecurityGroupIngress",
"Properties": {
"GroupId": {
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
},
"SourceSecurityGroupId": {
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
},
"FromPort": 2382,
"ToPort": 4000, "ToPort": 4000,
"IpProtocol": "tcp" "IpProtocol": "tcp"
} }

View File

@ -311,11 +311,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-minimal-example-com.id}" security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}" source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -591,11 +591,20 @@ resource "aws_security_group_rule" "node-to-master-protocol-ipip" {
protocol = "4" protocol = "4"
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4001" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}" security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4001" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
from_port = 2382
to_port = 4001 to_port = 4001
protocol = "tcp" protocol = "tcp"
} }

View File

@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}" security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -587,11 +587,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}" security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -573,11 +573,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}" security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}" security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -573,11 +573,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}" security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}" security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}" source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -286,11 +286,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}" security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}" source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -302,11 +302,20 @@ resource "aws_security_group_rule" "node-egress" {
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
} }
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" { resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
type = "ingress" type = "ingress"
security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}" security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}" source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}"
from_port = 1 from_port = 1
to_port = 2379
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
type = "ingress"
security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}"
from_port = 2382
to_port = 4000 to_port = 4000
protocol = "tcp" protocol = "tcp"
} }

View File

@ -1,132 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: auth-portal
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
template:
metadata:
labels:
app: auth-portal
spec:
containers:
- name: auth-portal
image: kopeio/auth-portal:1.0.20170619
ports:
- containerPort: 8080
command:
- /auth-portal
---
apiVersion: v1
kind: Service
metadata:
name: auth-portal
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
selector:
app: auth-portal
ports:
- port: 80
targetPort: 8080
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
app: auth-api
spec:
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
containers:
- name: auth-api
image: kopeio/auth-api:1.0.20170619
imagePullPolicy: Always
ports:
- containerPort: 9001
command:
- /auth-api
- --listen=127.0.0.1:9001
- --secure-port=9002
- --server=https://127.0.0.1:9002
- --insecure-skip-tls-verify
- --etcd-servers=http://127.0.0.1:4001
- --v=8
- --storage-backend=etcd2
---
apiVersion: v1
kind: Service
metadata:
name: auth-api
namespace: kopeio-auth
spec:
selector:
app: auth-api
ports:
- port: 443
targetPort: 9002
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1alpha1.auth.kope.io
spec:
insecureSkipTLSVerify: true
group: auth.kope.io
priority: 150
service:
name: auth-api
namespace: kopeio-auth
version: v1alpha1
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1alpha1.config.auth.kope.io
spec:
insecureSkipTLSVerify: true
group: config.auth.kope.io
priority: 150
service:
name: auth-api
namespace: kopeio-auth
version: v1alpha1

View File

@ -0,0 +1,185 @@
apiVersion: v1
kind: Namespace
metadata:
name: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
---
apiVersion: v1
kind: Service
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
selector:
app: auth-api
ports:
- port: 443
targetPort: 9002
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
template:
metadata:
labels:
app: auth-api
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
serviceAccountName: auth-api
hostNetwork: true
nodeSelector:
node-role.kubernetes.io/master: ""
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
- name: auth-api
image: kopeio/auth-api:1.0.20171125
imagePullPolicy: Always
ports:
- containerPort: 9001
command:
- /auth-api
- --listen=127.0.0.1:9001
- --secure-port=9002
- --etcd-servers=http://127.0.0.1:4001
- --v=8
- --storage-backend=etcd2
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1alpha1.auth.kope.io
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
insecureSkipTLSVerify: true
group: auth.kope.io
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: auth-api
namespace: kopeio-auth
version: v1alpha1
---
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
name: v1alpha1.config.auth.kope.io
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
spec:
insecureSkipTLSVerify: true
group: config.auth.kope.io
groupPriorityMinimum: 1000
versionPriority: 15
service:
name: auth-api
namespace: kopeio-auth
version: v1alpha1
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kopeio-auth:auth-api:auth-reader
namespace: kube-system
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: extension-apiserver-authentication-reader
subjects:
- kind: ServiceAccount
name: auth-api
namespace: kopeio-auth
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kopeio-auth:system:auth-delegator
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:auth-delegator
subjects:
- kind: ServiceAccount
name: auth-api
namespace: kopeio-auth
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
rules:
- apiGroups: ["auth.kope.io"]
resources: ["users"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: auth-api
namespace: kopeio-auth
labels:
k8s-addon: authentication.kope.io
role.kubernetes.io/authentication: "1"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: auth-api
subjects:
- kind: ServiceAccount
name: auth-api
namespace: kopeio-auth

View File

@ -5,7 +5,7 @@ metadata:
name: calico-config name: calico-config
namespace: kube-system namespace: kube-system
data: data:
# The calico-etcd PetSet service IP:port # etcd servers
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}} etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
{{- range $j, $member := $cluster.Members -}} {{- range $j, $member := $cluster.Members -}}
{{- if $j }},{{ end -}} {{- if $j }},{{ end -}}
@ -19,9 +19,6 @@ data:
cni_network_config: |- cni_network_config: |-
{ {
"name": "k8s-pod-network", "name": "k8s-pod-network",
"cniVersion": "0.3.0",
"plugins": [
{
"type": "calico", "type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__", "etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info", "log_level": "info",
@ -36,15 +33,7 @@ data:
"kubernetes": { "kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__" "kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
} }
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
} }
]
}
--- ---
kind: ClusterRole kind: ClusterRole
@ -133,12 +122,15 @@ spec:
operator: Exists operator: Exists
- effect: NoSchedule - effect: NoSchedule
operator: Exists operator: Exists
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
containers: containers:
# Runs calico/node container on each Kubernetes node. This # Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each # container programs network policy and routes on each
# host. # host.
- name: calico-node - name: calico-node
image: quay.io/calico/node:v2.4.1 image: quay.io/calico/node:v2.6.2
resources: resources:
requests: requests:
cpu: 10m cpu: 10m
@ -169,6 +161,14 @@ spec:
# Auto-detect the BGP IP address. # Auto-detect the BGP IP address.
- name: IP - name: IP
value: "" value: ""
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext: securityContext:
privileged: true privileged: true
volumeMounts: volumeMounts:
@ -185,7 +185,7 @@ spec:
# This container installs the Calico CNI binaries # This container installs the Calico CNI binaries
# and CNI network config file on each node. # and CNI network config file on each node.
- name: install-cni - name: install-cni
image: quay.io/calico/cni:v1.10.0 image: quay.io/calico/cni:v1.11.0
resources: resources:
requests: requests:
cpu: 10m cpu: 10m
@ -194,7 +194,7 @@ spec:
env: env:
# The name of calico config file # The name of calico config file
- name: CNI_CONF_NAME - name: CNI_CONF_NAME
value: 10-calico.conflist value: 10-calico.conf
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
valueFrom: valueFrom:
@ -237,8 +237,8 @@ spec:
--- ---
# This manifest deploys the Calico policy controller on Kubernetes. # This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
# See https://github.com/projectcalico/k8s-policy # be removed entirely once the new kube-controllers deployment has been deployed above.
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
kind: Deployment kind: Deployment
metadata: metadata:
@ -246,35 +246,23 @@ metadata:
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: calico-policy k8s-app: calico-policy
role.kubernetes.io/networking: "1"
spec: spec:
# The policy controller can only have a single active instance. # Turn this deployment off in favor of the kube-controllers deployment above.
replicas: 1 replicas: 0
strategy:
type: Recreate
template: template:
metadata: metadata:
name: calico-policy-controller name: calico-policy-controller
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: calico-policy-controller k8s-app: calico-policy
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec: spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true hostNetwork: true
serviceAccountName: calico serviceAccountName: calico
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
containers: containers:
- name: calico-policy-controller - name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.7.0 image: quay.io/calico/kube-controllers:v1.0.0
resources:
requests:
cpu: 10m
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@ -282,15 +270,6 @@ spec:
configMapKeyRef: configMapKeyRef:
name: calico-config name: calico-config
key: etcd_endpoints key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
volumeMounts: volumeMounts:
# Necessary for gossip based DNS # Necessary for gossip based DNS
@ -301,6 +280,55 @@ spec:
- name: etc-hosts - name: etc-hosts
hostPath: hostPath:
path: /etc/hosts path: /etc/hosts
---
# This manifest deploys the Calico Kubernetes controllers.
# See https://github.com/projectcalico/kube-controllers
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
spec:
# The controllers can only have a single active instance.
replicas: 1
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
role.kubernetes.io/networking: "1"
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The controllers must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
serviceAccountName: calico
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
- key: CriticalAddonsOnly
operator: Exists
containers:
- name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.0
resources:
requests:
cpu: 10m
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}} {{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
# This manifest installs the k8s-ec2-srcdst container, which disables # This manifest installs the k8s-ec2-srcdst container, which disables

View File

@ -16,11 +16,10 @@ data:
calico_backend: "bird" calico_backend: "bird"
# The CNI network configuration to install on each node. # The CNI network configuration to install on each node.
# cniVersion should be 0.1.0 on k8s: https://github.com/projectcalico/calico/issues/742
cni_network_config: |- cni_network_config: |-
{ {
"name": "k8s-pod-network", "name": "k8s-pod-network",
"cniVersion": "0.1.0", "cniVersion": "0.3.0",
"plugins": [ "plugins": [
{ {
"type": "calico", "type": "calico",

View File

@ -137,7 +137,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: romana-daemon - name: romana-daemon
image: quay.io/romana/daemon:v2.0-preview.2 image: quay.io/romana/daemon:v2.0.0
imagePullPolicy: Always imagePullPolicy: Always
resources: resources:
requests: requests:
@ -170,7 +170,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: romana-listener - name: romana-listener
image: quay.io/romana/listener:v2.0-preview.2 image: quay.io/romana/listener:v2.0.0
imagePullPolicy: Always imagePullPolicy: Always
resources: resources:
requests: requests:
@ -185,6 +185,8 @@ metadata:
name: romana-agent name: romana-agent
namespace: kube-system namespace: kube-system
spec: spec:
updateStrategy:
type: RollingUpdate
template: template:
metadata: metadata:
labels: labels:
@ -200,7 +202,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: romana-agent - name: romana-agent
image: quay.io/romana/agent:v2.0-preview.2 image: quay.io/romana/agent:v2.0.0
imagePullPolicy: Always imagePullPolicy: Always
resources: resources:
requests: requests:
@ -213,6 +215,10 @@ spec:
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: NODEIP
valueFrom:
fieldRef:
fieldPath: status.hostIP
args: args:
- --service-cluster-ip-range={{ .ServiceClusterIPRange }} - --service-cluster-ip-range={{ .ServiceClusterIPRange }}
securityContext: securityContext:
@ -299,7 +305,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: romana-aws - name: romana-aws
image: quay.io/romana/aws:v2.0-preview.2 image: quay.io/romana/aws:v2.0.0
imagePullPolicy: Always imagePullPolicy: Always
resources: resources:
requests: requests:
@ -328,7 +334,7 @@ spec:
effect: NoSchedule effect: NoSchedule
containers: containers:
- name: romana-vpcrouter - name: romana-vpcrouter
image: quay.io/romana/vpcrouter-romana-plugin image: quay.io/romana/vpcrouter-romana-plugin:1.1.12
imagePullPolicy: Always imagePullPolicy: Always
resources: resources:
requests: requests:

View File

@ -62,7 +62,8 @@ func NewClientsetCAStore(cluster *kops.Cluster, clientset kopsinternalversion.Ko
return c return c
} }
// readCAKeypairs retrieves the CA keypair, generating a new keypair if not found // readCAKeypairs retrieves the CA keypair.
// (No longer generates a keypair if not found.)
func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) { func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) {
c.mutex.Lock() c.mutex.Lock()
defer c.mutex.Unlock() defer c.mutex.Unlock()
@ -78,14 +79,9 @@ func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) {
} }
if keyset == nil { if keyset == nil {
keyset, err = c.generateCACertificate(id) return nil, nil
if err != nil {
return nil, err
}
} }
c.cachedCaKeysets[id] = keyset c.cachedCaKeysets[id] = keyset
return keyset, nil return keyset, nil
} }

View File

@ -185,6 +185,14 @@ func NewAWSCloud(region string, tags map[string]string) (AWSCloud, error) {
config = config.WithCredentialsChainVerboseErrors(true) config = config.WithCredentialsChainVerboseErrors(true)
config = request.WithRetryer(config, newLoggingRetryer(ClientMaxRetries)) config = request.WithRetryer(config, newLoggingRetryer(ClientMaxRetries))
// We have the updated aws sdk from 1.9, but don't have https://github.com/kubernetes/kubernetes/pull/55307
// Set the SleepDelay function to work around this
// TODO: Remove once we update to k8s >= 1.9 (or a version of the retry delayer than includes this)
config.SleepDelay = func(d time.Duration) {
glog.V(6).Infof("aws request sleeping for %v", d)
time.Sleep(d)
}
requestLogger := newRequestLogger(2) requestLogger := newRequestLogger(2)
sess, err := session.NewSession(config) sess, err := session.NewSession(config)

View File

@ -133,8 +133,11 @@ func (c *MockAWSCloud) BuildTags(name *string) map[string]string {
} }
func (c *MockAWSCloud) Tags() map[string]string { func (c *MockAWSCloud) Tags() map[string]string {
glog.Fatalf("MockAWSCloud Tags not implemented") tags := make(map[string]string)
return nil for k, v := range c.tags {
tags[k] = v
}
return tags
} }
func (c *MockAWSCloud) CreateTags(resourceId string, tags map[string]string) error { func (c *MockAWSCloud) CreateTags(resourceId string, tags map[string]string) error {

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kops/upup/pkg/fi/utils" "k8s.io/kops/upup/pkg/fi/utils"
) )
// BootstrapChannelBuilder is responsible for handling the addons in channels
type BootstrapChannelBuilder struct { type BootstrapChannelBuilder struct {
cluster *kops.Cluster cluster *kops.Cluster
Lifecycle *fi.Lifecycle Lifecycle *fi.Lifecycle
@ -38,6 +39,7 @@ type BootstrapChannelBuilder struct {
var _ fi.ModelBuilder = &BootstrapChannelBuilder{} var _ fi.ModelBuilder = &BootstrapChannelBuilder{}
// Build is responsible for adding the addons to the channel
func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error { func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error {
addons, manifests, err := b.buildManifest() addons, manifests, err := b.buildManifest()
@ -184,6 +186,9 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
manifests[key] = "addons/" + location manifests[key] = "addons/" + location
} }
// @check the dns-controller has not been disabled
externalDNS := b.cluster.Spec.ExternalDNS
if externalDNS == nil || !externalDNS.Disable {
{ {
key := "dns-controller.addons.k8s.io" key := "dns-controller.addons.k8s.io"
version := "1.8.0-beta.1" version := "1.8.0-beta.1"
@ -218,6 +223,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
manifests[key+"-"+id] = "addons/" + location manifests[key+"-"+id] = "addons/" + location
} }
} }
}
if featureflag.EnableExternalDNS.Enabled() { if featureflag.EnableExternalDNS.Enabled() {
{ {
@ -468,11 +474,10 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
if b.cluster.Spec.Networking.Calico != nil { if b.cluster.Spec.Networking.Calico != nil {
key := "networking.projectcalico.org" key := "networking.projectcalico.org"
// 2.6.3-kops.1 = 2.6.2 with kops manifest tweaks. This should go away with the next version bump.
versions := map[string]string{ versions := map[string]string{
"pre-k8s-1.6": "2.4.1", "pre-k8s-1.6": "2.4.1",
"k8s-1.6": "2.4.2-kops.1", "k8s-1.6": "2.6.2",
"k8s-1.8": "2.6.3-kops.1", "k8s-1.7": "2.6.2",
} }
{ {
@ -499,14 +504,14 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
Version: fi.String(versions[id]), Version: fi.String(versions[id]),
Selector: networkingSelector, Selector: networkingSelector,
Manifest: fi.String(location), Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0 <1.8.0", KubernetesVersion: ">=1.6.0 <1.7.0",
Id: id, Id: id,
}) })
manifests[key+"-"+id] = "addons/" + location manifests[key+"-"+id] = "addons/" + location
} }
{ {
id := "k8s-1.8" id := "k8s-1.7"
location := key + "/" + id + ".yaml" location := key + "/" + id + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
@ -514,7 +519,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
Version: fi.String(versions[id]), Version: fi.String(versions[id]),
Selector: networkingSelector, Selector: networkingSelector,
Manifest: fi.String(location), Manifest: fi.String(location),
KubernetesVersion: ">=1.8.0", KubernetesVersion: ">=1.7.0",
Id: id, Id: id,
}) })
manifests[key+"-"+id] = "addons/" + location manifests[key+"-"+id] = "addons/" + location
@ -598,18 +603,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
if b.cluster.Spec.Networking.Romana != nil { if b.cluster.Spec.Networking.Romana != nil {
key := "networking.romana" key := "networking.romana"
version := "v2.0-preview.3" version := "v2.0.0"
{ {
location := key + "/k8s-1.6.yaml" location := key + "/k8s-1.7.yaml"
id := "k8s-1.6" id := "k8s-1.7"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key), Name: fi.String(key),
Version: fi.String(version), Version: fi.String(version),
Selector: networkingSelector, Selector: networkingSelector,
Manifest: fi.String(location), Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0", KubernetesVersion: ">=1.7.0",
Id: id, Id: id,
}) })
manifests[key+"-"+id] = "addons/" + location manifests[key+"-"+id] = "addons/" + location
@ -620,18 +625,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
if b.cluster.Spec.Authentication != nil && b.cluster.Spec.Authentication.Kopeio != nil { if b.cluster.Spec.Authentication != nil && b.cluster.Spec.Authentication.Kopeio != nil {
key := "authentication.kope.io" key := "authentication.kope.io"
version := "1.0.20170619" version := "1.0.20171125"
{ {
location := key + "/k8s-1.6.yaml" location := key + "/k8s-1.8.yaml"
id := "k8s-1.6" id := "k8s-1.8"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{ addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key), Name: fi.String(key),
Version: fi.String(version), Version: fi.String(version),
Selector: authenticationSelector, Selector: authenticationSelector,
Manifest: fi.String(location), Manifest: fi.String(location),
KubernetesVersion: ">=1.6.0", KubernetesVersion: ">=1.8.0",
Id: id, Id: id,
}) })
manifests[key+"-"+id] = "addons/" + location manifests[key+"-"+id] = "addons/" + location

View File

@ -12,7 +12,13 @@ go_library(
"//pkg/apis/kops:go_default_library", "//pkg/apis/kops:go_default_library",
"//pkg/cloudinstances:go_default_library", "//pkg/cloudinstances:go_default_library",
"//upup/pkg/fi:go_default_library", "//upup/pkg/fi:go_default_library",
"//util/pkg/vfs:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
"//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library",
"//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/kubernetes/federation/pkg/dnsprovider:go_default_library", "//vendor/k8s.io/kubernetes/federation/pkg/dnsprovider:go_default_library",
], ],
) )

View File

@ -18,25 +18,90 @@ package openstack
import ( import (
"fmt" "fmt"
"time"
"github.com/golang/glog"
"github.com/gophercloud/gophercloud"
os "github.com/gophercloud/gophercloud/openstack"
cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kops/pkg/apis/kops" "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/cloudinstances" "k8s.io/kops/pkg/cloudinstances"
"k8s.io/kops/upup/pkg/fi" "k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/util/pkg/vfs"
"k8s.io/kubernetes/federation/pkg/dnsprovider" "k8s.io/kubernetes/federation/pkg/dnsprovider"
) )
const TagNameEtcdClusterPrefix = "k8s.io/etcd/"
const TagNameRolePrefix = "k8s.io/role/"
const TagClusterName = "KubernetesCluster"
// readBackoff is the backoff strategy for openstack read retries.
var readBackoff = wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Jitter: 0.1,
Steps: 4,
}
// writeBackoff is the backoff strategy for openstack write retries.
var writeBackoff = wait.Backoff{
Duration: time.Second,
Factor: 1.5,
Jitter: 0.1,
Steps: 5,
}
type OpenstackCloud interface { type OpenstackCloud interface {
fi.Cloud fi.Cloud
// SetVolumeTags will set the tags for the Cinder volume
SetVolumeTags(id string, tags map[string]string) error
// GetCloudTags will return the tags attached on cloud
GetCloudTags() map[string]string
// ListVolumes will return the Cinder volumes which match the options
ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error)
// CreateVolume will create a new Cinder Volume
CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error)
} }
type openstackCloud struct { type openstackCloud struct {
cinderClient *gophercloud.ServiceClient
tags map[string]string
} }
var _ fi.Cloud = &openstackCloud{} var _ fi.Cloud = &openstackCloud{}
func NewOpenstackCloud() (OpenstackCloud, error) { func NewOpenstackCloud(tags map[string]string) (OpenstackCloud, error) {
return &openstackCloud{}, nil config := vfs.OpenstackConfig{}
authOption, err := config.GetCredential()
if err != nil {
return nil, err
}
provider, err := os.AuthenticatedClient(authOption)
if err != nil {
return nil, fmt.Errorf("error building openstack authenticated client: %v", err)
}
endpointOpt, err := config.GetServiceConfig("Cinder")
if err != nil {
return nil, err
}
cinderClient, err := os.NewBlockStorageV2(provider, endpointOpt)
if err != nil {
return nil, fmt.Errorf("error building swift client: %v", err)
}
c := &openstackCloud{
cinderClient: cinderClient,
tags: tags,
}
return c, nil
} }
func (c *openstackCloud) ProviderID() kops.CloudProviderID { func (c *openstackCloud) ProviderID() kops.CloudProviderID {
@ -62,3 +127,78 @@ func (c *openstackCloud) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error
func (c *openstackCloud) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) { func (c *openstackCloud) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) {
return nil, fmt.Errorf("openstackCloud::GetCloudGroups not implemented") return nil, fmt.Errorf("openstackCloud::GetCloudGroups not implemented")
} }
func (c *openstackCloud) SetVolumeTags(id string, tags map[string]string) error {
if len(tags) == 0 {
return nil
}
if id == "" {
return fmt.Errorf("error setting tags to unknown volume")
}
glog.V(4).Infof("setting tags to cinder volume %q: %v", id, tags)
opt := cinder.UpdateOpts{Metadata: tags}
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
_, err := cinder.Update(c.cinderClient, id, opt).Extract()
if err != nil {
return false, fmt.Errorf("error setting tags to cinder volume %q: %v", id, err)
}
return true, nil
})
if err != nil {
return err
} else if done {
return nil
} else {
return wait.ErrWaitTimeout
}
}
func (c *openstackCloud) GetCloudTags() map[string]string {
return c.tags
}
func (c *openstackCloud) ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error) {
var volumes []cinder.Volume
done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) {
allPages, err := cinder.List(c.cinderClient, opt).AllPages()
if err != nil {
return false, fmt.Errorf("error listing volumes %v: %v", opt, err)
}
vs, err := cinder.ExtractVolumes(allPages)
if err != nil {
return false, fmt.Errorf("error extracting volumes from pages: %v", err)
}
volumes = vs
return true, nil
})
if err != nil {
return volumes, err
} else if done {
return volumes, nil
} else {
return volumes, wait.ErrWaitTimeout
}
}
func (c *openstackCloud) CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error) {
var volume *cinder.Volume
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
v, err := cinder.Create(c.cinderClient, opt).Extract()
if err != nil {
return false, fmt.Errorf("error creating volume %v: %v", opt, err)
}
volume = v
return true, nil
})
if err != nil {
return volume, err
} else if done {
return volume, nil
} else {
return volume, wait.ErrWaitTimeout
}
}

View File

@ -0,0 +1,14 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["volume.go"],
importpath = "k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks",
visibility = ["//visibility:public"],
deps = [
"//upup/pkg/fi:go_default_library",
"//upup/pkg/fi/cloudup/openstack:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
],
)

View File

@ -0,0 +1,145 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstacktasks
import (
"fmt"
"github.com/golang/glog"
cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
)
type Volume struct {
ID *string
Name *string
AvailabilityZone *string
VolumeType *string
SizeGB *int64
Tags map[string]string
Lifecycle *fi.Lifecycle
}
var _ fi.CompareWithID = &Volume{}
func (c *Volume) CompareWithID() *string {
return c.ID
}
func (c *Volume) Find(context *fi.Context) (*Volume, error) {
cloud := context.Cloud.(openstack.OpenstackCloud)
opt := cinder.ListOpts{
Name: fi.StringValue(c.Name),
Metadata: cloud.GetCloudTags(),
}
volumes, err := cloud.ListVolumes(opt)
if err != nil {
return nil, err
}
n := len(volumes)
if n == 0 {
return nil, nil
} else if n != 1 {
return nil, fmt.Errorf("found multiple Volumes with name: %s", fi.StringValue(c.Name))
}
v := volumes[0]
actual := &Volume{
ID: fi.String(v.ID),
Name: fi.String(v.Name),
AvailabilityZone: fi.String(v.AvailabilityZone),
VolumeType: fi.String(v.VolumeType),
SizeGB: fi.Int64(int64(v.Size)),
Tags: v.Metadata,
Lifecycle: c.Lifecycle,
}
return actual, nil
}
func (c *Volume) Run(context *fi.Context) error {
cloud := context.Cloud.(openstack.OpenstackCloud)
for k, v := range cloud.GetCloudTags() {
c.Tags[k] = v
}
return fi.DefaultDeltaRunMethod(c, context)
}
func (_ *Volume) CheckChanges(a, e, changes *Volume) error {
if a == nil {
if e.Name == nil {
return fi.RequiredField("Name")
}
if e.AvailabilityZone == nil {
return fi.RequiredField("AvailabilityZone")
}
if e.VolumeType == nil {
return fi.RequiredField("VolumeType")
}
if e.SizeGB == nil {
return fi.RequiredField("SizeGB")
}
} else {
if changes.ID != nil {
return fi.CannotChangeField("ID")
}
if changes.AvailabilityZone != nil {
return fi.CannotChangeField("AvailabilityZone")
}
if changes.VolumeType != nil {
return fi.CannotChangeField("VolumeType")
}
if changes.SizeGB != nil {
return fi.CannotChangeField("SizeGB")
}
}
return nil
}
func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Volume) error {
if a == nil {
glog.V(2).Infof("Creating PersistentVolume with Name:%q", fi.StringValue(e.Name))
opt := cinder.CreateOpts{
Size: int(*e.SizeGB),
AvailabilityZone: fi.StringValue(e.AvailabilityZone),
Metadata: e.Tags,
Name: fi.StringValue(e.Name),
VolumeType: fi.StringValue(e.VolumeType),
}
v, err := t.Cloud.CreateVolume(opt)
if err != nil {
return fmt.Errorf("error creating PersistentVolume: %v", err)
}
e.ID = fi.String(v.ID)
return nil
}
if changes != nil && changes.Tags != nil {
glog.V(2).Infof("Update the tags on volume %q: %v, the differences are %v", fi.StringValue(e.ID), e.Tags, changes.Tags)
err := t.Cloud.SetVolumeTags(fi.StringValue(e.ID), e.Tags)
if err != nil {
return fmt.Errorf("error updating the tags on volume %q: %v", fi.StringValue(e.ID), err)
}
}
glog.V(2).Infof("Openstack task Volume::RenderOpenstack did nothing")
return nil
}

View File

@ -133,7 +133,8 @@ func BuildCloud(cluster *kops.Cluster) (fi.Cloud, error) {
} }
case kops.CloudProviderOpenstack: case kops.CloudProviderOpenstack:
{ {
osc, err := openstack.NewOpenstackCloud() cloudTags := map[string]string{openstack.TagClusterName: cluster.ObjectMeta.Name}
osc, err := openstack.NewOpenstackCloud(cloudTags)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -1,4 +1,4 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library") load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
@ -27,3 +27,12 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
], ],
) )
go_test(
name = "go_default_test",
size = "small",
srcs = ["keypair_test.go"],
importpath = "k8s.io/kops/upup/pkg/fi/fitasks",
library = ":go_default_library",
deps = ["//upup/pkg/fi:go_default_library"],
)

View File

@ -0,0 +1,44 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fitasks
import (
"k8s.io/kops/upup/pkg/fi"
"strings"
"testing"
)
func TestKeypairDeps(t *testing.T) {
ca := &Keypair{}
cert := &Keypair{
Signer: ca,
}
tasks := make(map[string]fi.Task)
tasks["ca"] = ca
tasks["cert"] = cert
deps := fi.FindTaskDependencies(tasks)
if strings.Join(deps["ca"], ",") != "" {
t.Errorf("unexpected dependencies for ca: %v", deps["ca"])
}
if strings.Join(deps["cert"], ",") != "ca" {
t.Errorf("unexpected dependencies for cert: %v", deps["cert"])
}
}

View File

@ -32,8 +32,10 @@ type SecretStore interface {
DeleteSecret(item *KeystoreItem) error DeleteSecret(item *KeystoreItem) error
// FindSecret finds a secret, if exists. Returns nil,nil if not found // FindSecret finds a secret, if exists. Returns nil,nil if not found
FindSecret(id string) (*Secret, error) FindSecret(id string) (*Secret, error)
// GetOrCreateSecret creates or replace a secret // GetOrCreateSecret creates a secret
GetOrCreateSecret(id string, secret *Secret) (current *Secret, created bool, err error) GetOrCreateSecret(id string, secret *Secret) (current *Secret, created bool, err error)
// ReplaceSecret will forcefully update an existing secret if it exists
ReplaceSecret(id string, secret *Secret) (current *Secret, err error)
// ListSecrets lists the ids of all known secrets // ListSecrets lists the ids of all known secrets
ListSecrets() ([]string, error) ListSecrets() ([]string, error)

View File

@ -157,7 +157,7 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret)
return s, false, nil return s, false, nil
} }
_, err = c.createSecret(secret, name) _, err = c.createSecret(secret, name, false)
if err != nil { if err != nil {
if errors.IsAlreadyExists(err) && i == 0 { if errors.IsAlreadyExists(err) && i == 0 {
glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry") glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry")
@ -181,6 +181,21 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret)
return s, true, nil return s, true, nil
} }
// ReplaceSecret implements fi.SecretStore::ReplaceSecret
func (c *ClientsetSecretStore) ReplaceSecret(name string, secret *fi.Secret) (*fi.Secret, error) {
_, err := c.createSecret(secret, name, true)
if err != nil {
return nil, fmt.Errorf("unable to write secret: %v", err)
}
// Confirm the secret exists
s, err := c.loadSecret(name)
if err != nil {
return nil, fmt.Errorf("unable to load secret immmediately after creation: %v", err)
}
return s, nil
}
// loadSecret returns the named secret, if it exists, otherwise returns nil // loadSecret returns the named secret, if it exists, otherwise returns nil
func (c *ClientsetSecretStore) loadSecret(name string) (*fi.Secret, error) { func (c *ClientsetSecretStore) loadSecret(name string) (*fi.Secret, error) {
name = NamePrefix + name name = NamePrefix + name
@ -207,8 +222,8 @@ func parseSecret(keyset *kops.Keyset) (*fi.Secret, error) {
return s, nil return s, nil
} }
// createSecret writes the secret, but only if it does not exist // createSecret will create the Secret, overwriting an existing secret if replace is true
func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Keyset, error) { func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string, replace bool) (*kops.Keyset, error) {
keyset := &kops.Keyset{} keyset := &kops.Keyset{}
keyset.Name = NamePrefix + name keyset.Name = NamePrefix + name
keyset.Spec.Type = kops.SecretTypeSecret keyset.Spec.Type = kops.SecretTypeSecret
@ -221,5 +236,8 @@ func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Ke
PrivateMaterial: s.Data, PrivateMaterial: s.Data,
}) })
if replace {
return c.clientset.Keysets(c.namespace).Update(keyset)
}
return c.clientset.Keysets(c.namespace).Create(keyset) return c.clientset.Keysets(c.namespace).Create(keyset)
} }

View File

@ -127,7 +127,7 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se
return nil, false, err return nil, false, err
} }
err = c.createSecret(secret, p, acl) err = c.createSecret(secret, p, acl, false)
if err != nil { if err != nil {
if os.IsExist(err) && i == 0 { if os.IsExist(err) && i == 0 {
glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry") glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry")
@ -151,6 +151,27 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se
return s, true, nil return s, true, nil
} }
func (c *VFSSecretStore) ReplaceSecret(id string, secret *fi.Secret) (*fi.Secret, error) {
p := c.buildSecretPath(id)
acl, err := acls.GetACL(p, c.cluster)
if err != nil {
return nil, err
}
err = c.createSecret(secret, p, acl, true)
if err != nil {
return nil, fmt.Errorf("unable to write secret: %v", err)
}
// Confirm the secret exists
s, err := c.loadSecret(p)
if err != nil {
return nil, fmt.Errorf("unable to load secret immmediately after creation %v: %v", p, err)
}
return s, nil
}
func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) { func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
data, err := p.ReadFile() data, err := p.ReadFile()
if err != nil { if err != nil {
@ -166,11 +187,15 @@ func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
return s, nil return s, nil
} }
// createSecret writes the secret, but only if it does not exists // createSecret will create the Secret, overwriting an existing secret if replace is true
func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL) error { func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL, replace bool) error {
data, err := json.Marshal(s) data, err := json.Marshal(s)
if err != nil { if err != nil {
return fmt.Errorf("error serializing secret: %v", err) return fmt.Errorf("error serializing secret: %v", err)
} }
if replace {
return p.WriteFile(data, acl)
}
return p.CreateFile(data, acl) return p.CreateFile(data, acl)
} }

View File

@ -68,7 +68,7 @@ func (s *VFSCAStore) VFSPath() vfs.Path {
return s.basedir return s.basedir
} }
// Retrieves the CA keypair, generating a new keypair if not found // Retrieves the CA keypair. No longer generates keypairs if not found.
func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, error) { func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, error) {
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
@ -98,16 +98,15 @@ func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, err
} }
if caPrivateKeys == nil { if caPrivateKeys == nil {
caCertificates, caPrivateKeys, err = s.generateCACertificate(id) // We no longer generate CA certificates automatically - too race-prone
if err != nil { return caCertificates, caPrivateKeys, nil
return nil, nil, err
} }
}
cached = &cachedEntry{certificates: caCertificates, privateKeys: caPrivateKeys} cached = &cachedEntry{certificates: caCertificates, privateKeys: caPrivateKeys}
s.cachedCAs[id] = cached s.cachedCAs[id] = cached
return cached.certificates, cached.privateKeys, nil return cached.certificates, cached.privateKeys, nil
} }
func BuildCAX509Template() *x509.Certificate { func BuildCAX509Template() *x509.Certificate {