mirror of https://github.com/kubernetes/kops.git
Merge remote-tracking branch 'upstream/master' into additional_cidr
This commit is contained in:
commit
2594d382ae
2
Makefile
2
Makefile
|
@ -587,7 +587,7 @@ kops-server-push: kops-server-build
|
|||
|
||||
.PHONY: bazel-test
|
||||
bazel-test:
|
||||
bazel ${BAZEL_OPTIONS} test //cmd/... //pkg/... //channels/... //nodeup/... //channels/... //protokube/... //dns-controller/... //upup/... //util/... //hack:verify-all --test_output=errors
|
||||
bazel ${BAZEL_OPTIONS} test //cmd/... //pkg/... //channels/... //nodeup/... //channels/... //protokube/... //dns-controller/... //tests/... //upup/... //util/... //hack:verify-all --test_output=errors
|
||||
|
||||
.PHONY: bazel-build
|
||||
bazel-build:
|
||||
|
|
2
OWNERS
2
OWNERS
|
@ -5,3 +5,5 @@ approvers:
|
|||
- zmerlynn
|
||||
- andrewsykim
|
||||
- geojaz
|
||||
- kashifsaadat
|
||||
- gambol99
|
||||
|
|
|
@ -13,6 +13,9 @@ spec:
|
|||
- name: kope.io/k8s-1.7-debian-jessie-amd64-hvm-ebs-2017-07-28
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.7.0"
|
||||
- name: kope.io/k8s-1.8-debian-jessie-amd64-hvm-ebs-2017-11-27
|
||||
providerID: aws
|
||||
kubernetesVersion: ">=1.8.0"
|
||||
- providerID: gce
|
||||
name: "cos-cloud/cos-stable-60-9592-90-0"
|
||||
cluster:
|
||||
|
@ -21,13 +24,13 @@ spec:
|
|||
kubenet: {}
|
||||
kubernetesVersions:
|
||||
- range: ">=1.8.0"
|
||||
recommendedVersion: 1.8.3
|
||||
recommendedVersion: 1.8.4
|
||||
requiredVersion: 1.8.0
|
||||
- range: ">=1.7.0"
|
||||
recommendedVersion: 1.7.10
|
||||
recommendedVersion: 1.7.11
|
||||
requiredVersion: 1.7.0
|
||||
- range: ">=1.6.0"
|
||||
recommendedVersion: 1.6.11
|
||||
recommendedVersion: 1.6.13
|
||||
requiredVersion: 1.6.0
|
||||
- range: ">=1.5.0"
|
||||
recommendedVersion: 1.5.8
|
||||
|
@ -39,15 +42,15 @@ spec:
|
|||
- range: ">=1.8.0-alpha.1"
|
||||
recommendedVersion: 1.8.0-beta.1
|
||||
#requiredVersion: 1.8.0
|
||||
kubernetesVersion: 1.8.3
|
||||
kubernetesVersion: 1.8.4
|
||||
- range: ">=1.7.0-alpha.1"
|
||||
recommendedVersion: 1.7.1
|
||||
#requiredVersion: 1.7.0
|
||||
kubernetesVersion: 1.7.10
|
||||
kubernetesVersion: 1.7.11
|
||||
- range: ">=1.6.0-alpha.1"
|
||||
#recommendedVersion: 1.6.0
|
||||
#requiredVersion: 1.6.0
|
||||
kubernetesVersion: 1.6.11
|
||||
kubernetesVersion: 1.6.13
|
||||
- range: ">=1.5.0-alpha1"
|
||||
recommendedVersion: 1.5.1
|
||||
#requiredVersion: 1.5.1
|
||||
|
|
|
@ -24,10 +24,10 @@ spec:
|
|||
recommendedVersion: 1.7.10
|
||||
requiredVersion: 1.7.0
|
||||
- range: ">=1.6.0"
|
||||
recommendedVersion: 1.6.7
|
||||
recommendedVersion: 1.6.11
|
||||
requiredVersion: 1.6.0
|
||||
- range: ">=1.5.0"
|
||||
recommendedVersion: 1.5.7
|
||||
recommendedVersion: 1.5.8
|
||||
requiredVersion: 1.5.1
|
||||
- range: "<1.5.0"
|
||||
recommendedVersion: 1.4.12
|
||||
|
@ -40,11 +40,11 @@ spec:
|
|||
- range: ">=1.6.0-alpha.1"
|
||||
#recommendedVersion: 1.6.0
|
||||
#requiredVersion: 1.6.0
|
||||
kubernetesVersion: 1.6.7
|
||||
kubernetesVersion: 1.6.11
|
||||
- range: ">=1.5.0-alpha1"
|
||||
recommendedVersion: 1.5.1
|
||||
#requiredVersion: 1.5.1
|
||||
kubernetesVersion: 1.5.7
|
||||
kubernetesVersion: 1.5.8
|
||||
- range: "<1.5.0"
|
||||
recommendedVersion: 1.4.4
|
||||
#requiredVersion: 1.4.4
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
|
||||
var (
|
||||
create_secret_dockerconfig_long = templates.LongDesc(i18n.T(`
|
||||
Create a new docker config, and store it in the state store.
|
||||
Create a new docker config, and store it in the state store.
|
||||
Used to configure docker on each master or node (ie. for auth)
|
||||
Use update to modify it, this command will only create a new entry.`))
|
||||
|
||||
|
@ -40,6 +40,9 @@ var (
|
|||
# Create an new docker config.
|
||||
kops create secret dockerconfig -f /path/to/docker/config.json \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
# Replace an existing docker config secret.
|
||||
kops create secret dockerconfig -f /path/to/docker/config.json --force \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
`))
|
||||
|
||||
create_secret_dockerconfig_short = i18n.T(`Create a docker config.`)
|
||||
|
@ -48,6 +51,7 @@ var (
|
|||
type CreateSecretDockerConfigOptions struct {
|
||||
ClusterName string
|
||||
DockerConfigPath string
|
||||
Force bool
|
||||
}
|
||||
|
||||
func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Command {
|
||||
|
@ -78,6 +82,7 @@ func NewCmdCreateSecretDockerConfig(f *util.Factory, out io.Writer) *cobra.Comma
|
|||
}
|
||||
|
||||
cmd.Flags().StringVarP(&options.DockerConfigPath, "", "f", "", "Path to docker config JSON file")
|
||||
cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -119,9 +124,19 @@ func RunCreateSecretDockerConfig(f *util.Factory, out io.Writer, options *Create
|
|||
|
||||
secret.Data = data
|
||||
|
||||
_, _, err = secretStore.GetOrCreateSecret("dockerconfig", secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding docker config secret: %v", err)
|
||||
if !options.Force {
|
||||
_, created, err := secretStore.GetOrCreateSecret("dockerconfig", secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding dockerconfig secret: %v", err)
|
||||
}
|
||||
if !created {
|
||||
return fmt.Errorf("failed to create the dockerconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.")
|
||||
}
|
||||
} else {
|
||||
_, err := secretStore.ReplaceSecret("dockerconfig", secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating dockerconfig secret: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -40,6 +40,9 @@ var (
|
|||
# Create a new encryption config.
|
||||
kops create secret encryptionconfig -f config.yaml \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
# Replace an existing encryption config secret.
|
||||
kops create secret encryptionconfig -f config.yaml --force \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
`))
|
||||
|
||||
create_secret_encryptionconfig_short = i18n.T(`Create an encryption config.`)
|
||||
|
@ -48,6 +51,7 @@ var (
|
|||
type CreateSecretEncryptionConfigOptions struct {
|
||||
ClusterName string
|
||||
EncryptionConfigPath string
|
||||
Force bool
|
||||
}
|
||||
|
||||
func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.Command {
|
||||
|
@ -78,6 +82,7 @@ func NewCmdCreateSecretEncryptionConfig(f *util.Factory, out io.Writer) *cobra.C
|
|||
}
|
||||
|
||||
cmd.Flags().StringVarP(&options.EncryptionConfigPath, "", "f", "", "Path to encryption config yaml file")
|
||||
cmd.Flags().BoolVar(&options.Force, "force", options.Force, "Force replace the kops secret if it already exists")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@ -120,9 +125,19 @@ func RunCreateSecretEncryptionConfig(f *util.Factory, out io.Writer, options *Cr
|
|||
|
||||
secret.Data = data
|
||||
|
||||
_, _, err = secretStore.GetOrCreateSecret("encryptionconfig", secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding encryption config secret: %v", err)
|
||||
if !options.Force {
|
||||
_, created, err := secretStore.GetOrCreateSecret("encryptionconfig", secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error adding encryptionconfig secret: %v", err)
|
||||
}
|
||||
if !created {
|
||||
return fmt.Errorf("failed to create the encryptionconfig secret as it already exists. The `--force` flag can be passed to replace an existing secret.")
|
||||
}
|
||||
} else {
|
||||
_, err := secretStore.ReplaceSecret("encryptionconfig", secret)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating encryptionconfig secret: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
# Authentication
|
||||
|
||||
Kops has support for configuring authentication systems. This support is
|
||||
currently highly experimental, and should not be used with kubernetes versions
|
||||
before 1.8.5 because of a serious bug with apimachinery (#55022)[https://github.com/kubernetes/kubernetes/issues/55022].
|
||||
|
||||
## kopeio authentication
|
||||
|
||||
If you want to experiment with kopeio authentication, you can use
|
||||
`--authentication kopeio`. However please be aware that kopeio authentication
|
||||
has not yet been formally released, and thus there is not a lot of upstream
|
||||
documentation.
|
||||
|
||||
Alternatively, you can add this block to your cluster:
|
||||
|
||||
```
|
||||
authentication:
|
||||
kopeio: {}
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
name: cluster.example.com
|
||||
spec:
|
||||
authentication:
|
||||
kopeio: {}
|
||||
authorization:
|
||||
rbac: {}
|
||||
```
|
||||
|
|
@ -20,12 +20,16 @@ kops create secret dockerconfig
|
|||
# Create an new docker config.
|
||||
kops create secret dockerconfig -f /path/to/docker/config.json \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
# Replace an existing docker config secret.
|
||||
kops create secret dockerconfig -f /path/to/docker/config.json --force \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-f, -- string Path to docker config JSON file
|
||||
--force Force replace the kops secret if it already exists
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
|
@ -20,12 +20,16 @@ kops create secret encryptionconfig
|
|||
# Create a new encryption config.
|
||||
kops create secret encryptionconfig -f config.yaml \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
# Replace an existing encryption config secret.
|
||||
kops create secret encryptionconfig -f config.yaml --force \
|
||||
--name k8s-cluster.example.com --state s3://example.com
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-f, -- string Path to encryption config yaml file
|
||||
--force Force replace the kops secret if it already exists
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Cluster Templating
|
||||
|
||||
The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [/cli/kops_replace.md](/cli/kops_replace.md)).
|
||||
The command `kops replace` can replace a cluster desired configuration from the config in a yaml file (see [cli/kops_replace.md](cli/kops_replace.md)).
|
||||
|
||||
It is possible to generate that yaml file from a template, using the command `kops toolbox template` (see [cli/kops_toolbox_template.md](cli/kops_toolbox_template.md)).
|
||||
|
||||
|
@ -45,7 +45,7 @@ Running `kops toolbox template` replaces the placeholders in the template by val
|
|||
|
||||
Note: when creating a cluster desired configuration template, you can
|
||||
|
||||
- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cli/cluster_spec.md](cli/cluster_spec.md).
|
||||
- use `kops get k8s-cluster.example.com -o yaml > cluster-desired-config.yaml` to create the cluster desired configuration file (see [cli/kops_get.md](cli/kops_get.md)). The values in this file are defined in [cluster_spec.md](cluster_spec.md).
|
||||
- replace values by placeholders in that file to create the template.
|
||||
|
||||
### Templates
|
||||
|
|
|
@ -1,3 +1,26 @@
|
|||
# Installing Kops via Hombrew
|
||||
|
||||
Homebrew makes installing kops [very simple for MacOS.](../install.md)
|
||||
```bash
|
||||
brew update && brew install kops
|
||||
```
|
||||
|
||||
Development Releases and master can also be installed via Homebrew very easily:
|
||||
```bash
|
||||
# Development Release
|
||||
brew update && brew install kops --devel
|
||||
# HEAD of master
|
||||
brew update && brew install kops --HEAD
|
||||
```
|
||||
|
||||
Note: if you already have kops installed, you need to substitute `upgrade` for `install`.
|
||||
|
||||
You can switch between development and stable releases with:
|
||||
```bash
|
||||
brew switch kops 1.7.1
|
||||
brew switch kops 1.8.0-beta.1
|
||||
```
|
||||
|
||||
# Releasing kops to Brew
|
||||
|
||||
Submitting a new release of kops to Homebrew is very simple.
|
||||
|
@ -8,11 +31,20 @@ Submitting a new release of kops to Homebrew is very simple.
|
|||
This will automatically update the provided fields and open a PR for you.
|
||||
More details on this script are located [here.](https://github.com/Homebrew/brew/blob/master/Library/Homebrew/dev-cmd/bump-formula-pr.rb)
|
||||
|
||||
We now include both major and development releases in homebrew. A development version can be updated by adding the `--devel` flag.
|
||||
|
||||
Example usage:
|
||||
```
|
||||
```bash
|
||||
# Major Version
|
||||
brew bump-formula-pr \
|
||||
--url=https://github.com/kubernetes/kops/archive/1.7.1.tar.gz \
|
||||
--sha256=044c5c7a737ed3acf53517e64bb27d3da8f7517d2914df89efeeaf84bc8a722a
|
||||
|
||||
# Development Version
|
||||
brew bump-formula-pr \
|
||||
--devel \
|
||||
--url=https://github.com/kubernetes/kops/archive/1.8.0-beta.1.tar.gz \
|
||||
--sha256=81026d6c1cd7b3898a88275538a7842b4bd8387775937e0528ccb7b83948abf1
|
||||
```
|
||||
|
||||
* Update the URL variable to the tar.gz of the new release source code
|
||||
|
|
|
@ -8,6 +8,8 @@ From Homebrew:
|
|||
brew update && brew install kops
|
||||
```
|
||||
|
||||
Developers can also easily install [development releases](development/homebrew.md).
|
||||
|
||||
From Github:
|
||||
|
||||
```bash
|
||||
|
|
|
@ -14,6 +14,13 @@ or `--networking flannel-udp` can be specified to explicitly choose a backend mo
|
|||
See the *Changes to k8s-policy* section in the
|
||||
[Calico release notes](https://github.com/projectcalico/calico/releases/tag/v2.4.0)
|
||||
for help.
|
||||
* Due to `ThirdPartyResources` becoming fully deprecated in Kubernetes v1.8 (replaced by `CustomResourceDefinitions`), existing Canal users upgrading their Clusters to Kubernetes v1.8 must follow the below TPR->CRD migration steps:
|
||||
1. Run: `kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/v2.6.2/upgrade/v2.5/manifests/upgrade-job.yaml`
|
||||
2. Retrieve the pod name from describing the job: `kubectl describe job/calico-upgrade-v2.5`
|
||||
3. Validate the last log line from the pod reports that it completed successfully: `kubectl logs calico-upgrade-v2.5-<random-id>`
|
||||
4. Update the `KubernetesVersion` within your ClusterSpec to v1.8 (or above), performing an update & rolling-update to all nodes (will involve downtime)
|
||||
5. Confirm cluster is back up and all canal pods are running successfully: `kops validate cluster` (this may take a few minutes for the cluster to fully validate)
|
||||
6. Delete the upgrade job as it is no longer required: `kubectl delete job calico-upgrade-v2.5` (you can also safely delete the `clusterrole`, `clusterrolebinding` and `serviceaccount` resources that were created by the above manifest file)
|
||||
|
||||
# Full changelist
|
||||
|
||||
|
|
|
@ -1,13 +1,33 @@
|
|||
## How to update Kops - Kubernetes Ops
|
||||
# Updating kops (Binaries)
|
||||
|
||||
Update the latest source code from kubernetes/kops
|
||||
## MacOS
|
||||
|
||||
```
|
||||
cd ${GOPATH}/src/k8s.io/kops/
|
||||
git pull && make
|
||||
```
|
||||
From Homebrew:
|
||||
|
||||
Alternatively, if you installed from Homebrew
|
||||
```
|
||||
```bash
|
||||
brew update && brew upgrade kops
|
||||
```
|
||||
|
||||
From Github:
|
||||
|
||||
```bash
|
||||
rm -rf /usr/local/bin/kops
|
||||
wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-darwin-amd64
|
||||
chmod +x ./kops
|
||||
sudo mv ./kops /usr/local/bin/
|
||||
```
|
||||
|
||||
You can also rerun rerun [these steps](development/building.md) if previously built from source.
|
||||
|
||||
## Linux
|
||||
|
||||
From Github:
|
||||
|
||||
```bash
|
||||
rm -rf /usr/local/bin/kops
|
||||
wget -O kops https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64
|
||||
chmod +x ./kops
|
||||
sudo mv ./kops /usr/local/bin/
|
||||
```
|
||||
|
||||
You can also rerun rerun [these steps](development/building.md) if previously built from source.
|
||||
|
|
|
@ -123,6 +123,7 @@ k8s.io/kops/upup/pkg/fi/cloudup/dotasks
|
|||
k8s.io/kops/upup/pkg/fi/cloudup/gce
|
||||
k8s.io/kops/upup/pkg/fi/cloudup/gcetasks
|
||||
k8s.io/kops/upup/pkg/fi/cloudup/openstack
|
||||
k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks
|
||||
k8s.io/kops/upup/pkg/fi/cloudup/terraform
|
||||
k8s.io/kops/upup/pkg/fi/cloudup/vsphere
|
||||
k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks
|
||||
|
|
|
@ -40,7 +40,7 @@ func (f *FileAssetsBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
// used to keep track of previous file, so a instanceGroup can override a cluster wide one
|
||||
tracker := make(map[string]bool, 0)
|
||||
// ensure the default path exists
|
||||
c.AddTask(&nodetasks.File{
|
||||
c.EnsureTask(&nodetasks.File{
|
||||
Path: f.FileAssetsDefaultPath(),
|
||||
Type: nodetasks.FileType_Directory,
|
||||
Mode: s("0755"),
|
||||
|
@ -88,8 +88,8 @@ func (f *FileAssetsBuilder) buildFileAssets(c *fi.ModelBuilderContext, assets []
|
|||
content = string(decoded)
|
||||
}
|
||||
|
||||
// @check if the directory structure exist or create it
|
||||
c.AddTask(&nodetasks.File{
|
||||
// We use EnsureTask so that we don't have to check if the asset directories have already been done
|
||||
c.EnsureTask(&nodetasks.File{
|
||||
Path: filepath.Dir(assetPath),
|
||||
Type: nodetasks.FileType_Directory,
|
||||
Mode: s("0755"),
|
||||
|
|
|
@ -55,7 +55,7 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
case "":
|
||||
name = fmt.Sprintf("kops-hook-%d", j)
|
||||
if isInstanceGroup {
|
||||
name = fmt.Sprintf("%s-ig", name)
|
||||
name += "-ig"
|
||||
}
|
||||
default:
|
||||
name = hook.Name
|
||||
|
@ -72,7 +72,7 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
enabled := false
|
||||
managed := true
|
||||
c.AddTask(&nodetasks.Service{
|
||||
Name: hook.Name,
|
||||
Name: ensureSystemdSuffix(name),
|
||||
ManageState: &managed,
|
||||
Enabled: &enabled,
|
||||
Running: &enabled,
|
||||
|
@ -94,6 +94,14 @@ func (h *HookBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ensureSystemdSuffix makes sure that we have a .service suffix on the name, needed on needed versions of systems
|
||||
func ensureSystemdSuffix(name string) string {
|
||||
if !strings.HasSuffix(name, ".service") && !strings.HasSuffix(name, ".timer") {
|
||||
name += ".service"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// buildSystemdService is responsible for generating the service
|
||||
func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*nodetasks.Service, error) {
|
||||
// perform some basic validation
|
||||
|
@ -130,7 +138,7 @@ func (h *HookBuilder) buildSystemdService(name string, hook *kops.HookSpec) (*no
|
|||
}
|
||||
|
||||
service := &nodetasks.Service{
|
||||
Name: name,
|
||||
Name: ensureSystemdSuffix(name),
|
||||
Definition: s(unit.Render()),
|
||||
}
|
||||
|
||||
|
|
|
@ -244,8 +244,11 @@ type RBACAuthorizationSpec struct {
|
|||
type AlwaysAllowAuthorizationSpec struct {
|
||||
}
|
||||
|
||||
// AccessSpec provides configuration details related to kubeapi dns and ELB access
|
||||
type AccessSpec struct {
|
||||
DNS *DNSAccessSpec `json:"dns,omitempty"`
|
||||
// DNS wil be used to provide config on kube-apiserver elb dns
|
||||
DNS *DNSAccessSpec `json:"dns,omitempty"`
|
||||
// LoadBalancer is the configuration for the kube-apiserver ELB
|
||||
LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -285,6 +288,8 @@ type KubeDNSConfig struct {
|
|||
|
||||
// ExternalDNSConfig are options of the dns-controller
|
||||
type ExternalDNSConfig struct {
|
||||
// Disable indicates we do not wish to run the dns-controller addon
|
||||
Disable bool `json:"disable,omitempty"`
|
||||
// WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources
|
||||
WatchIngress *bool `json:"watchIngress,omitempty"`
|
||||
// WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries)
|
||||
|
|
|
@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct {
|
|||
// long the autoscaler has to wait before another upscale operation can
|
||||
// be performed after the current one has completed.
|
||||
HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
|
||||
// HorizontalPodAutoscalerUseRestClients determines if the new-style clients
|
||||
// should be used if support for custom metrics is enabled.
|
||||
HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"`
|
||||
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
|
||||
}
|
||||
|
|
|
@ -243,8 +243,11 @@ type RBACAuthorizationSpec struct {
|
|||
type AlwaysAllowAuthorizationSpec struct {
|
||||
}
|
||||
|
||||
// AccessSpec provides configuration details related to kubeapi dns and ELB access
|
||||
type AccessSpec struct {
|
||||
DNS *DNSAccessSpec `json:"dns,omitempty"`
|
||||
// DNS wil be used to provide config on kube-apiserver elb dns
|
||||
DNS *DNSAccessSpec `json:"dns,omitempty"`
|
||||
// LoadBalancer is the configuration for the kube-apiserver ELB
|
||||
LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -284,6 +287,8 @@ type KubeDNSConfig struct {
|
|||
|
||||
// ExternalDNSConfig are options of the dns-controller
|
||||
type ExternalDNSConfig struct {
|
||||
// Disable indicates we do not wish to run the dns-controller addon
|
||||
Disable bool `json:"disable,omitempty"`
|
||||
// WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources
|
||||
WatchIngress *bool `json:"watchIngress,omitempty"`
|
||||
// WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries)
|
||||
|
|
|
@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct {
|
|||
// long the autoscaler has to wait before another upscale operation can
|
||||
// be performed after the current one has completed.
|
||||
HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
|
||||
// HorizontalPodAutoscalerUseRestClients determines if the new-style clients
|
||||
// should be used if support for custom metrics is enabled.
|
||||
HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"`
|
||||
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
|
||||
}
|
||||
|
|
|
@ -1266,6 +1266,7 @@ func Convert_kops_ExecContainerAction_To_v1alpha1_ExecContainerAction(in *kops.E
|
|||
}
|
||||
|
||||
func autoConvert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error {
|
||||
out.Disable = in.Disable
|
||||
out.WatchIngress = in.WatchIngress
|
||||
out.WatchNamespace = in.WatchNamespace
|
||||
return nil
|
||||
|
@ -1277,6 +1278,7 @@ func Convert_v1alpha1_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDN
|
|||
}
|
||||
|
||||
func autoConvert_kops_ExternalDNSConfig_To_v1alpha1_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error {
|
||||
out.Disable = in.Disable
|
||||
out.WatchIngress = in.WatchIngress
|
||||
out.WatchNamespace = in.WatchNamespace
|
||||
return nil
|
||||
|
@ -1944,6 +1946,7 @@ func autoConvert_v1alpha1_KubeControllerManagerConfig_To_kops_KubeControllerMana
|
|||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
|
||||
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
|
||||
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
|
||||
out.FeatureGates = in.FeatureGates
|
||||
return nil
|
||||
}
|
||||
|
@ -1979,6 +1982,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha1_KubeControllerMana
|
|||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
|
||||
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
|
||||
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
|
||||
out.FeatureGates = in.FeatureGates
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,11 +21,10 @@ limitations under the License.
|
|||
package v1alpha1
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -2179,6 +2178,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HorizontalPodAutoscalerUseRestClients != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.FeatureGates != nil {
|
||||
in, out := &in.FeatureGates, &out.FeatureGates
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
|
|
@ -244,8 +244,11 @@ type RBACAuthorizationSpec struct {
|
|||
type AlwaysAllowAuthorizationSpec struct {
|
||||
}
|
||||
|
||||
// AccessSpec provides configuration details related to kubeapi dns and ELB access
|
||||
type AccessSpec struct {
|
||||
DNS *DNSAccessSpec `json:"dns,omitempty"`
|
||||
// DNS wil be used to provide config on kube-apiserver elb dns
|
||||
DNS *DNSAccessSpec `json:"dns,omitempty"`
|
||||
// LoadBalancer is the configuration for the kube-apiserver ELB
|
||||
LoadBalancer *LoadBalancerAccessSpec `json:"loadBalancer,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -282,6 +285,8 @@ type KubeDNSConfig struct {
|
|||
|
||||
// ExternalDNSConfig are options of the dns-controller
|
||||
type ExternalDNSConfig struct {
|
||||
// Disable indicates we do not wish to run the dns-controller addon
|
||||
Disable bool `json:"disable,omitempty"`
|
||||
// WatchIngress indicates you want the dns-controller to watch and create dns entries for ingress resources
|
||||
WatchIngress *bool `json:"watchIngress,omitempty"`
|
||||
// WatchNamespace is namespace to watch, detaults to all (use to control whom can creates dns entries)
|
||||
|
|
|
@ -323,6 +323,9 @@ type KubeControllerManagerConfig struct {
|
|||
// long the autoscaler has to wait before another upscale operation can
|
||||
// be performed after the current one has completed.
|
||||
HorizontalPodAutoscalerUpscaleDelay *metav1.Duration `json:"horizontalPodAutoscalerUpscaleDelay,omitempty" flag:"horizontal-pod-autoscaler-upscale-delay"`
|
||||
// HorizontalPodAutoscalerUseRestClients determines if the new-style clients
|
||||
// should be used if support for custom metrics is enabled.
|
||||
HorizontalPodAutoscalerUseRestClients *bool `json:"horizontalPodAutoscalerUseRestClients,omitempty" flag:"horizontal-pod-autoscaler-use-rest-clients"`
|
||||
// FeatureGates is set of key=value pairs that describe feature gates for alpha/experimental features.
|
||||
FeatureGates map[string]string `json:"featureGates,omitempty" flag:"feature-gates"`
|
||||
}
|
||||
|
|
|
@ -1375,6 +1375,7 @@ func Convert_kops_ExecContainerAction_To_v1alpha2_ExecContainerAction(in *kops.E
|
|||
}
|
||||
|
||||
func autoConvert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDNSConfig, out *kops.ExternalDNSConfig, s conversion.Scope) error {
|
||||
out.Disable = in.Disable
|
||||
out.WatchIngress = in.WatchIngress
|
||||
out.WatchNamespace = in.WatchNamespace
|
||||
return nil
|
||||
|
@ -1386,6 +1387,7 @@ func Convert_v1alpha2_ExternalDNSConfig_To_kops_ExternalDNSConfig(in *ExternalDN
|
|||
}
|
||||
|
||||
func autoConvert_kops_ExternalDNSConfig_To_v1alpha2_ExternalDNSConfig(in *kops.ExternalDNSConfig, out *ExternalDNSConfig, s conversion.Scope) error {
|
||||
out.Disable = in.Disable
|
||||
out.WatchIngress = in.WatchIngress
|
||||
out.WatchNamespace = in.WatchNamespace
|
||||
return nil
|
||||
|
@ -2206,6 +2208,7 @@ func autoConvert_v1alpha2_KubeControllerManagerConfig_To_kops_KubeControllerMana
|
|||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
|
||||
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
|
||||
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
|
||||
out.FeatureGates = in.FeatureGates
|
||||
return nil
|
||||
}
|
||||
|
@ -2241,6 +2244,7 @@ func autoConvert_kops_KubeControllerManagerConfig_To_v1alpha2_KubeControllerMana
|
|||
out.HorizontalPodAutoscalerSyncPeriod = in.HorizontalPodAutoscalerSyncPeriod
|
||||
out.HorizontalPodAutoscalerDownscaleDelay = in.HorizontalPodAutoscalerDownscaleDelay
|
||||
out.HorizontalPodAutoscalerUpscaleDelay = in.HorizontalPodAutoscalerUpscaleDelay
|
||||
out.HorizontalPodAutoscalerUseRestClients = in.HorizontalPodAutoscalerUseRestClients
|
||||
out.FeatureGates = in.FeatureGates
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,11 +21,10 @@ limitations under the License.
|
|||
package v1alpha2
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -2305,6 +2304,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HorizontalPodAutoscalerUseRestClients != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.FeatureGates != nil {
|
||||
in, out := &in.FeatureGates, &out.FeatureGates
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
|
|
@ -483,9 +483,9 @@ func ValidateCluster(c *kops.Cluster, strict bool) *field.Error {
|
|||
}
|
||||
}
|
||||
|
||||
if kubernetesRelease.LT(semver.MustParse("1.6.0")) {
|
||||
if kubernetesRelease.LT(semver.MustParse("1.7.0")) {
|
||||
if c.Spec.Networking != nil && c.Spec.Networking.Romana != nil {
|
||||
return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.5 or lower")
|
||||
return field.Invalid(fieldSpec.Child("Networking"), "romana", "romana networking is not supported with kubernetes versions 1.6 or lower")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,11 +21,10 @@ limitations under the License.
|
|||
package kops
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -2524,6 +2523,15 @@ func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerCo
|
|||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.HorizontalPodAutoscalerUseRestClients != nil {
|
||||
in, out := &in.HorizontalPodAutoscalerUseRestClients, &out.HorizontalPodAutoscalerUseRestClients
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
if in.FeatureGates != nil {
|
||||
in, out := &in.FeatureGates, &out.FeatureGates
|
||||
*out = make(map[string]string, len(*in))
|
||||
|
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||
package scheme
|
||||
|
||||
import (
|
||||
os "os"
|
||||
|
||||
announced "k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -26,6 +24,7 @@ import (
|
|||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
kops "k8s.io/kops/pkg/apis/kops/install"
|
||||
os "os"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
|
|
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||
package scheme
|
||||
|
||||
import (
|
||||
os "os"
|
||||
|
||||
announced "k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
registered "k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -26,6 +24,7 @@ import (
|
|||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
kops "k8s.io/kops/pkg/apis/kops/install"
|
||||
os "os"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
|
|
|
@ -36,6 +36,8 @@ go_library(
|
|||
"//upup/pkg/fi/cloudup/dotasks:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/gce:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/gcetasks:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/openstack:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/openstacktasks:go_default_library",
|
||||
"//upup/pkg/fi/fitasks:go_default_library",
|
||||
"//util/pkg/vfs:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
|
|
|
@ -52,11 +52,9 @@ func (b *FirewallModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
|
||||
func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error {
|
||||
name := "nodes." + b.ClusterName()
|
||||
|
||||
{
|
||||
t := &awstasks.SecurityGroup{
|
||||
Name: s(name),
|
||||
Name: s(b.SecurityGroupName(kops.InstanceGroupRoleNode)),
|
||||
Lifecycle: b.Lifecycle,
|
||||
VPC: b.LinkToVPC(),
|
||||
Description: s("Security group for nodes"),
|
||||
|
@ -211,7 +209,16 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
|
||||
// TODO: Make less hacky
|
||||
// TODO: Fix management - we need a wildcard matcher now
|
||||
tcpRanges := []portRange{{From: 1, To: 4000}, {From: 4003, To: 65535}}
|
||||
tcpBlocked := make(map[int]bool)
|
||||
|
||||
// Don't allow nodes to access etcd client port
|
||||
tcpBlocked[4001] = true
|
||||
tcpBlocked[4002] = true
|
||||
|
||||
// Don't allow nodes to access etcd peer port
|
||||
tcpBlocked[2380] = true
|
||||
tcpBlocked[2381] = true
|
||||
|
||||
udpRanges := []portRange{{From: 1, To: 65535}}
|
||||
protocols := []Protocol{}
|
||||
|
||||
|
@ -219,14 +226,14 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
// Calico needs to access etcd
|
||||
// TODO: Remove, replace with etcd in calico manifest
|
||||
glog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.")
|
||||
tcpRanges = []portRange{{From: 1, To: 4001}, {From: 4003, To: 65535}}
|
||||
tcpBlocked[4001] = false
|
||||
protocols = append(protocols, ProtocolIPIP)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.Networking.Romana != nil {
|
||||
// Romana needs to access etcd
|
||||
glog.Warningf("Opening etcd port on masters for access from the nodes, for romana. This is unsafe in untrusted environments.")
|
||||
tcpRanges = []portRange{{From: 1, To: 4001}, {From: 4003, To: 65535}}
|
||||
tcpBlocked[4001] = false
|
||||
protocols = append(protocols, ProtocolIPIP)
|
||||
}
|
||||
|
||||
|
@ -245,6 +252,21 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
Protocol: s("udp"),
|
||||
})
|
||||
}
|
||||
|
||||
tcpRanges := []portRange{
|
||||
{From: 1, To: 0},
|
||||
}
|
||||
for port := 1; port < 65536; port++ {
|
||||
previous := &tcpRanges[len(tcpRanges)-1]
|
||||
if !tcpBlocked[port] {
|
||||
if (previous.To + 1) == port {
|
||||
previous.To = port
|
||||
} else {
|
||||
tcpRanges = append(tcpRanges, portRange{From: port, To: port})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range tcpRanges {
|
||||
c.AddTask(&awstasks.SecurityGroupRule{
|
||||
Name: s(fmt.Sprintf("node-to-master-tcp-%d-%d", r.From, r.To)),
|
||||
|
@ -277,18 +299,19 @@ func (b *FirewallModelBuilder) applyNodeToMasterBlockSpecificPorts(c *fi.ModelBu
|
|||
}
|
||||
|
||||
func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error {
|
||||
name := "masters." + b.ClusterName()
|
||||
|
||||
{
|
||||
t := &awstasks.SecurityGroup{
|
||||
Name: s(name),
|
||||
Name: s(b.SecurityGroupName(kops.InstanceGroupRoleMaster)),
|
||||
Lifecycle: b.Lifecycle,
|
||||
VPC: b.LinkToVPC(),
|
||||
Description: s("Security group for masters"),
|
||||
RemoveExtraRules: []string{
|
||||
"port=22", // SSH
|
||||
"port=443", // k8s api
|
||||
"port=4001", // etcd main (etcd events is 4002)
|
||||
"port=2380", // etcd main peer
|
||||
"port=2381", // etcd events peer
|
||||
"port=4001", // etcd main
|
||||
"port=4002", // etcd events
|
||||
"port=4789", // VXLAN
|
||||
"port=179", // Calico
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi/cloudup/dotasks"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -95,6 +97,11 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
b.addVSphereVolume(c, name, volumeSize, zone, etcd, m, allMembers)
|
||||
case kops.CloudProviderBareMetal:
|
||||
glog.Fatalf("BareMetal not implemented")
|
||||
case kops.CloudProviderOpenstack:
|
||||
err = b.addOpenstackVolume(c, name, volumeSize, zone, etcd, m, allMembers)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider)
|
||||
}
|
||||
|
@ -205,3 +212,33 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, name strin
|
|||
func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) {
|
||||
fmt.Print("addVSphereVolume to be implemented")
|
||||
}
|
||||
|
||||
func (b *MasterVolumeBuilder) addOpenstackVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, zone string, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) error {
|
||||
volumeType := fi.StringValue(m.VolumeType)
|
||||
if volumeType == "" {
|
||||
return fmt.Errorf("must set ETCDMemberSpec.VolumeType on Openstack platform")
|
||||
}
|
||||
|
||||
// The tags are how protokube knows to mount the volume and use it for etcd
|
||||
tags := make(map[string]string)
|
||||
// Apply all user defined labels on the volumes
|
||||
for k, v := range b.Cluster.Spec.CloudLabels {
|
||||
tags[k] = v
|
||||
}
|
||||
// This is the configuration of the etcd cluster
|
||||
tags[openstack.TagNameEtcdClusterPrefix+etcd.Name] = m.Name + "/" + strings.Join(allMembers, ",")
|
||||
// This says "only mount on a master"
|
||||
tags[openstack.TagNameRolePrefix+"master"] = "1"
|
||||
|
||||
t := &openstacktasks.Volume{
|
||||
Name: s(name),
|
||||
AvailabilityZone: s(zone),
|
||||
VolumeType: s(volumeType),
|
||||
SizeGB: fi.Int64(int64(volumeSize)),
|
||||
Tags: tags,
|
||||
Lifecycle: b.Lifecycle,
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -220,7 +220,7 @@ func addUntaggedRouteTables(cloud awsup.AWSCloud, clusterName string, resources
|
|||
continue
|
||||
}
|
||||
|
||||
t := buildTrackerForRouteTable(rt)
|
||||
t := buildTrackerForRouteTable(rt, clusterName)
|
||||
if resources[t.Type+":"+t.ID] == nil {
|
||||
resources[t.Type+":"+t.ID] = t
|
||||
}
|
||||
|
@ -973,19 +973,20 @@ func ListRouteTables(cloud fi.Cloud, clusterName string) ([]*Resource, error) {
|
|||
var resourceTrackers []*Resource
|
||||
|
||||
for _, rt := range routeTables {
|
||||
resourceTracker := buildTrackerForRouteTable(rt)
|
||||
resourceTracker := buildTrackerForRouteTable(rt, clusterName)
|
||||
resourceTrackers = append(resourceTrackers, resourceTracker)
|
||||
}
|
||||
|
||||
return resourceTrackers, nil
|
||||
}
|
||||
|
||||
func buildTrackerForRouteTable(rt *ec2.RouteTable) *Resource {
|
||||
func buildTrackerForRouteTable(rt *ec2.RouteTable, clusterName string) *Resource {
|
||||
resourceTracker := &Resource{
|
||||
Name: FindName(rt.Tags),
|
||||
ID: aws.StringValue(rt.RouteTableId),
|
||||
Type: ec2.ResourceTypeRouteTable,
|
||||
Deleter: DeleteRouteTable,
|
||||
Shared: HasSharedTag(ec2.ResourceTypeRouteTable+":"+*rt.RouteTableId, rt.Tags, clusterName),
|
||||
}
|
||||
|
||||
var blocks []string
|
||||
|
|
|
@ -88,3 +88,55 @@ func TestAddUntaggedRouteTables(t *testing.T) {
|
|||
t.Fatalf("expected=%q, actual=%q", expected, keys)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListRouteTables(t *testing.T) {
|
||||
cloud := awsup.BuildMockAWSCloud("us-east-1", "abc")
|
||||
//resources := make(map[string]*Resource)
|
||||
clusterName := "me.example.com"
|
||||
ownershipTagKey := "kubernetes.io/cluster/" + clusterName
|
||||
|
||||
c := &mockec2.MockEC2{}
|
||||
cloud.MockEC2 = c
|
||||
|
||||
c.RouteTables = append(c.RouteTables, &ec2.RouteTable{
|
||||
VpcId: aws.String("vpc-1234"),
|
||||
RouteTableId: aws.String("rt-shared"),
|
||||
Tags: []*ec2.Tag{
|
||||
{
|
||||
Key: aws.String("KubernetesCluster"),
|
||||
Value: aws.String(clusterName),
|
||||
},
|
||||
{
|
||||
Key: aws.String(ownershipTagKey),
|
||||
Value: aws.String("shared"),
|
||||
},
|
||||
},
|
||||
})
|
||||
c.RouteTables = append(c.RouteTables, &ec2.RouteTable{
|
||||
VpcId: aws.String("vpc-1234"),
|
||||
RouteTableId: aws.String("rt-owned"),
|
||||
Tags: []*ec2.Tag{
|
||||
{
|
||||
Key: aws.String("KubernetesCluster"),
|
||||
Value: aws.String(clusterName),
|
||||
},
|
||||
{
|
||||
Key: aws.String(ownershipTagKey),
|
||||
Value: aws.String("owned"),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
resources, err := ListRouteTables(cloud, clusterName)
|
||||
if err != nil {
|
||||
t.Fatalf("error listing route tables: %v", err)
|
||||
}
|
||||
for _, rt := range resources {
|
||||
if rt.ID == "rt-shared" && !rt.Shared {
|
||||
t.Fatalf("expected Shared: true, got: %v", rt.Shared)
|
||||
}
|
||||
if rt.ID == "rt-owned" && rt.Shared {
|
||||
t.Fatalf("expected Shared: false, got: %v", rt.Shared)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,9 +3,19 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
|||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["integration_test.go"],
|
||||
data = [
|
||||
"exported_testdata", # keep
|
||||
"//channels:channeldata", # keep
|
||||
],
|
||||
importpath = "k8s.io/kops/tests/integration/channel",
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
"//vendor/github.com/blang/semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "exported_testdata",
|
||||
srcs = glob(["simple/**"]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
|
@ -3,6 +3,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_test")
|
|||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["integration_test.go"],
|
||||
data = [
|
||||
"exported_testdata", # keep
|
||||
],
|
||||
importpath = "k8s.io/kops/tests/integration/conversion",
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
|
@ -14,3 +17,9 @@ go_test(
|
|||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "exported_testdata",
|
||||
srcs = glob(["minimal/**"]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
|
|
@ -266,7 +266,7 @@
|
|||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp14000": {
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp12379": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
|
@ -276,6 +276,20 @@
|
|||
"Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom"
|
||||
},
|
||||
"FromPort": 1,
|
||||
"ToPort": 2379,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp23824000": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersadditionaluserdataexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesadditionaluserdataexamplecom"
|
||||
},
|
||||
"FromPort": 2382,
|
||||
"ToPort": 4000,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
|
|
|
@ -339,11 +339,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-complex-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-complex-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-complex-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -481,11 +481,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-ha-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-ha-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -250,11 +250,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -311,11 +311,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -266,7 +266,7 @@
|
|||
"CidrIp": "0.0.0.0/0"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp14000": {
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp12379": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
|
@ -276,6 +276,20 @@
|
|||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 1,
|
||||
"ToPort": 2379,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
},
|
||||
"AWSEC2SecurityGroupIngressnodetomastertcp23824000": {
|
||||
"Type": "AWS::EC2::SecurityGroupIngress",
|
||||
"Properties": {
|
||||
"GroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupmastersminimalexamplecom"
|
||||
},
|
||||
"SourceSecurityGroupId": {
|
||||
"Ref": "AWSEC2SecurityGroupnodesminimalexamplecom"
|
||||
},
|
||||
"FromPort": 2382,
|
||||
"ToPort": 4000,
|
||||
"IpProtocol": "tcp"
|
||||
}
|
||||
|
|
|
@ -311,11 +311,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -591,11 +591,20 @@ resource "aws_security_group_rule" "node-to-master-protocol-ipip" {
|
|||
protocol = "4"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4001" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4001" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4001
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecanal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecanal-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -587,11 +587,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatedns1-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatedns1-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -573,11 +573,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatedns2-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatedns2-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -573,11 +573,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatekopeio-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatekopeio-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -582,11 +582,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -286,11 +286,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-sharedsubnet-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-sharedsubnet-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -302,11 +302,20 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-4000" {
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-1-2379" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}"
|
||||
from_port = 1
|
||||
to_port = 2379
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-2382-4000" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-sharedvpc-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-sharedvpc-example-com.id}"
|
||||
from_port = 2382
|
||||
to_port = 4000
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
|
|
@ -1,132 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: auth-portal
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: auth-portal
|
||||
spec:
|
||||
containers:
|
||||
- name: auth-portal
|
||||
image: kopeio/auth-portal:1.0.20170619
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
command:
|
||||
- /auth-portal
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: auth-portal
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
selector:
|
||||
app: auth-portal
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
|
||||
---
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: auth-api
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
containers:
|
||||
- name: auth-api
|
||||
image: kopeio/auth-api:1.0.20170619
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 9001
|
||||
command:
|
||||
- /auth-api
|
||||
- --listen=127.0.0.1:9001
|
||||
- --secure-port=9002
|
||||
- --server=https://127.0.0.1:9002
|
||||
- --insecure-skip-tls-verify
|
||||
- --etcd-servers=http://127.0.0.1:4001
|
||||
- --v=8
|
||||
- --storage-backend=etcd2
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
spec:
|
||||
selector:
|
||||
app: auth-api
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 9002
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1alpha1.auth.kope.io
|
||||
spec:
|
||||
insecureSkipTLSVerify: true
|
||||
group: auth.kope.io
|
||||
priority: 150
|
||||
service:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
version: v1alpha1
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1alpha1.config.auth.kope.io
|
||||
spec:
|
||||
insecureSkipTLSVerify: true
|
||||
group: config.auth.kope.io
|
||||
priority: 150
|
||||
service:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
version: v1alpha1
|
|
@ -0,0 +1,185 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
selector:
|
||||
app: auth-api
|
||||
ports:
|
||||
- port: 443
|
||||
targetPort: 9002
|
||||
|
||||
---
|
||||
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: auth-api
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: auth-api
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: auth-api
|
||||
image: kopeio/auth-api:1.0.20171125
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 9001
|
||||
command:
|
||||
- /auth-api
|
||||
- --listen=127.0.0.1:9001
|
||||
- --secure-port=9002
|
||||
- --etcd-servers=http://127.0.0.1:4001
|
||||
- --v=8
|
||||
- --storage-backend=etcd2
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1alpha1.auth.kope.io
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
insecureSkipTLSVerify: true
|
||||
group: auth.kope.io
|
||||
groupPriorityMinimum: 1000
|
||||
versionPriority: 15
|
||||
service:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
version: v1alpha1
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1alpha1.config.auth.kope.io
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
spec:
|
||||
insecureSkipTLSVerify: true
|
||||
group: config.auth.kope.io
|
||||
groupPriorityMinimum: 1000
|
||||
versionPriority: 15
|
||||
service:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
version: v1alpha1
|
||||
|
||||
---
|
||||
|
||||
kind: ServiceAccount
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kopeio-auth:auth-api:auth-reader
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kopeio-auth:system:auth-delegator
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
rules:
|
||||
- apiGroups: ["auth.kope.io"]
|
||||
resources: ["users"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
||||
labels:
|
||||
k8s-addon: authentication.kope.io
|
||||
role.kubernetes.io/authentication: "1"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: auth-api
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: auth-api
|
||||
namespace: kopeio-auth
|
|
@ -5,7 +5,7 @@ metadata:
|
|||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# The calico-etcd PetSet service IP:port
|
||||
# etcd servers
|
||||
etcd_endpoints: "{{ $cluster := index .EtcdClusters 0 -}}
|
||||
{{- range $j, $member := $cluster.Members -}}
|
||||
{{- if $j }},{{ end -}}
|
||||
|
@ -18,33 +18,22 @@ data:
|
|||
# The CNI network configuration to install on each node.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||
"log_level": "info",
|
||||
"ipam": {
|
||||
"name": "k8s-pod-network",
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||
"log_level": "info",
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
kind: ClusterRole
|
||||
|
@ -133,12 +122,15 @@ spec:
|
|||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v2.4.1
|
||||
image: quay.io/calico/node:v2.6.2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
@ -169,6 +161,14 @@ spec:
|
|||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: ""
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to "info"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
|
@ -185,7 +185,7 @@ spec:
|
|||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.10.0
|
||||
image: quay.io/calico/cni:v1.11.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
|
@ -194,7 +194,7 @@ spec:
|
|||
env:
|
||||
# The name of calico config file
|
||||
- name: CNI_CONF_NAME
|
||||
value: 10-calico.conflist
|
||||
value: 10-calico.conf
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
|
@ -237,8 +237,8 @@ spec:
|
|||
|
||||
---
|
||||
|
||||
# This manifest deploys the Calico policy controller on Kubernetes.
|
||||
# See https://github.com/projectcalico/k8s-policy
|
||||
# This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then
|
||||
# be removed entirely once the new kube-controllers deployment has been deployed above.
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
|
@ -246,35 +246,23 @@ metadata:
|
|||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
# The policy controller can only have a single active instance.
|
||||
replicas: 1
|
||||
# Turn this deployment off in favor of the kube-controllers deployment above.
|
||||
replicas: 0
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy-controller
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
k8s-app: calico-policy
|
||||
spec:
|
||||
# The policy controller must run in the host network namespace so that
|
||||
# it isn't governed by policy that would prevent it from working.
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: calico-policy-controller
|
||||
image: quay.io/calico/kube-policy-controller:v0.7.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
image: quay.io/calico/kube-controllers:v1.0.0
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
|
@ -282,15 +270,6 @@ spec:
|
|||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# The location of the Kubernetes API. Use the default Kubernetes
|
||||
# service for API access.
|
||||
- name: K8S_API
|
||||
value: "https://kubernetes.default:443"
|
||||
# Since we're running in the host namespace and might not have KubeDNS
|
||||
# access, configure the container's /etc/hosts to resolve
|
||||
# kubernetes.default to the correct service clusterIP.
|
||||
- name: CONFIGURE_ETC_HOSTS
|
||||
value: "true"
|
||||
|
||||
volumeMounts:
|
||||
# Necessary for gossip based DNS
|
||||
|
@ -301,6 +280,55 @@ spec:
|
|||
- name: etc-hosts
|
||||
hostPath:
|
||||
path: /etc/hosts
|
||||
---
|
||||
|
||||
# This manifest deploys the Calico Kubernetes controllers.
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
# The controllers must run in the host network namespace so that
|
||||
# it isn't governed by policy that would prevent it from working.
|
||||
hostNetwork: true
|
||||
serviceAccountName: calico
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: quay.io/calico/kube-controllers:v1.0.0
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
|
||||
|
||||
|
||||
{{ if and (eq .CloudProvider "aws") (.Networking.Calico.CrossSubnet) -}}
|
||||
# This manifest installs the k8s-ec2-srcdst container, which disables
|
||||
|
|
|
@ -16,11 +16,10 @@ data:
|
|||
calico_backend: "bird"
|
||||
|
||||
# The CNI network configuration to install on each node.
|
||||
# cniVersion should be 0.1.0 on k8s: https://github.com/projectcalico/calico/issues/742
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.1.0",
|
||||
"cniVersion": "0.3.0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
|
@ -137,7 +137,7 @@ spec:
|
|||
effect: NoSchedule
|
||||
containers:
|
||||
- name: romana-daemon
|
||||
image: quay.io/romana/daemon:v2.0-preview.2
|
||||
image: quay.io/romana/daemon:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
|
@ -170,7 +170,7 @@ spec:
|
|||
effect: NoSchedule
|
||||
containers:
|
||||
- name: romana-listener
|
||||
image: quay.io/romana/listener:v2.0-preview.2
|
||||
image: quay.io/romana/listener:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
|
@ -185,6 +185,8 @@ metadata:
|
|||
name: romana-agent
|
||||
namespace: kube-system
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -200,7 +202,7 @@ spec:
|
|||
effect: NoSchedule
|
||||
containers:
|
||||
- name: romana-agent
|
||||
image: quay.io/romana/agent:v2.0-preview.2
|
||||
image: quay.io/romana/agent:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
|
@ -213,6 +215,10 @@ spec:
|
|||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: NODEIP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
args:
|
||||
- --service-cluster-ip-range={{ .ServiceClusterIPRange }}
|
||||
securityContext:
|
||||
|
@ -299,7 +305,7 @@ spec:
|
|||
effect: NoSchedule
|
||||
containers:
|
||||
- name: romana-aws
|
||||
image: quay.io/romana/aws:v2.0-preview.2
|
||||
image: quay.io/romana/aws:v2.0.0
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
|
@ -328,7 +334,7 @@ spec:
|
|||
effect: NoSchedule
|
||||
containers:
|
||||
- name: romana-vpcrouter
|
||||
image: quay.io/romana/vpcrouter-romana-plugin
|
||||
image: quay.io/romana/vpcrouter-romana-plugin:1.1.12
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
|
@ -62,7 +62,8 @@ func NewClientsetCAStore(cluster *kops.Cluster, clientset kopsinternalversion.Ko
|
|||
return c
|
||||
}
|
||||
|
||||
// readCAKeypairs retrieves the CA keypair, generating a new keypair if not found
|
||||
// readCAKeypairs retrieves the CA keypair.
|
||||
// (No longer generates a keypair if not found.)
|
||||
func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
@ -78,14 +79,9 @@ func (c *ClientsetCAStore) readCAKeypairs(id string) (*keyset, error) {
|
|||
}
|
||||
|
||||
if keyset == nil {
|
||||
keyset, err = c.generateCACertificate(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
c.cachedCaKeysets[id] = keyset
|
||||
|
||||
return keyset, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -185,6 +185,14 @@ func NewAWSCloud(region string, tags map[string]string) (AWSCloud, error) {
|
|||
config = config.WithCredentialsChainVerboseErrors(true)
|
||||
config = request.WithRetryer(config, newLoggingRetryer(ClientMaxRetries))
|
||||
|
||||
// We have the updated aws sdk from 1.9, but don't have https://github.com/kubernetes/kubernetes/pull/55307
|
||||
// Set the SleepDelay function to work around this
|
||||
// TODO: Remove once we update to k8s >= 1.9 (or a version of the retry delayer than includes this)
|
||||
config.SleepDelay = func(d time.Duration) {
|
||||
glog.V(6).Infof("aws request sleeping for %v", d)
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
requestLogger := newRequestLogger(2)
|
||||
|
||||
sess, err := session.NewSession(config)
|
||||
|
|
|
@ -133,8 +133,11 @@ func (c *MockAWSCloud) BuildTags(name *string) map[string]string {
|
|||
}
|
||||
|
||||
func (c *MockAWSCloud) Tags() map[string]string {
|
||||
glog.Fatalf("MockAWSCloud Tags not implemented")
|
||||
return nil
|
||||
tags := make(map[string]string)
|
||||
for k, v := range c.tags {
|
||||
tags[k] = v
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
func (c *MockAWSCloud) CreateTags(resourceId string, tags map[string]string) error {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi/utils"
|
||||
)
|
||||
|
||||
// BootstrapChannelBuilder is responsible for handling the addons in channels
|
||||
type BootstrapChannelBuilder struct {
|
||||
cluster *kops.Cluster
|
||||
Lifecycle *fi.Lifecycle
|
||||
|
@ -38,6 +39,7 @@ type BootstrapChannelBuilder struct {
|
|||
|
||||
var _ fi.ModelBuilder = &BootstrapChannelBuilder{}
|
||||
|
||||
// Build is responsible for adding the addons to the channel
|
||||
func (b *BootstrapChannelBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
|
||||
addons, manifests, err := b.buildManifest()
|
||||
|
@ -184,38 +186,42 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
manifests[key] = "addons/" + location
|
||||
}
|
||||
|
||||
{
|
||||
key := "dns-controller.addons.k8s.io"
|
||||
version := "1.8.0-beta.1"
|
||||
|
||||
// @check the dns-controller has not been disabled
|
||||
externalDNS := b.cluster.Spec.ExternalDNS
|
||||
if externalDNS == nil || !externalDNS.Disable {
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
id := "pre-k8s-1.6"
|
||||
key := "dns-controller.addons.k8s.io"
|
||||
version := "1.8.0-beta.1"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
{
|
||||
location := key + "/pre-k8s-1.6.yaml"
|
||||
id := "pre-k8s-1.6"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: "<1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -468,11 +474,10 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Calico != nil {
|
||||
key := "networking.projectcalico.org"
|
||||
// 2.6.3-kops.1 = 2.6.2 with kops manifest tweaks. This should go away with the next version bump.
|
||||
versions := map[string]string{
|
||||
"pre-k8s-1.6": "2.4.1",
|
||||
"k8s-1.6": "2.4.2-kops.1",
|
||||
"k8s-1.8": "2.6.3-kops.1",
|
||||
"k8s-1.6": "2.6.2",
|
||||
"k8s-1.7": "2.6.2",
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -499,14 +504,14 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0 <1.8.0",
|
||||
KubernetesVersion: ">=1.6.0 <1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
}
|
||||
|
||||
{
|
||||
id := "k8s-1.8"
|
||||
id := "k8s-1.7"
|
||||
location := key + "/" + id + ".yaml"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
|
@ -514,7 +519,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
Version: fi.String(versions[id]),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.8.0",
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
|
@ -598,18 +603,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Networking.Romana != nil {
|
||||
key := "networking.romana"
|
||||
version := "v2.0-preview.3"
|
||||
version := "v2.0.0"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
location := key + "/k8s-1.7.yaml"
|
||||
id := "k8s-1.7"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: networkingSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
KubernetesVersion: ">=1.7.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
|
@ -620,18 +625,18 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
if b.cluster.Spec.Authentication != nil && b.cluster.Spec.Authentication.Kopeio != nil {
|
||||
key := "authentication.kope.io"
|
||||
version := "1.0.20170619"
|
||||
version := "1.0.20171125"
|
||||
|
||||
{
|
||||
location := key + "/k8s-1.6.yaml"
|
||||
id := "k8s-1.6"
|
||||
location := key + "/k8s-1.8.yaml"
|
||||
id := "k8s-1.8"
|
||||
|
||||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: authenticationSelector,
|
||||
Manifest: fi.String(location),
|
||||
KubernetesVersion: ">=1.6.0",
|
||||
KubernetesVersion: ">=1.8.0",
|
||||
Id: id,
|
||||
})
|
||||
manifests[key+"-"+id] = "addons/" + location
|
||||
|
|
|
@ -12,7 +12,13 @@ go_library(
|
|||
"//pkg/apis/kops:go_default_library",
|
||||
"//pkg/cloudinstances:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//util/pkg/vfs:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/kubernetes/federation/pkg/dnsprovider:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -18,25 +18,90 @@ package openstack
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
os "github.com/gophercloud/gophercloud/openstack"
|
||||
cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/cloudinstances"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
"k8s.io/kubernetes/federation/pkg/dnsprovider"
|
||||
)
|
||||
|
||||
const TagNameEtcdClusterPrefix = "k8s.io/etcd/"
|
||||
const TagNameRolePrefix = "k8s.io/role/"
|
||||
const TagClusterName = "KubernetesCluster"
|
||||
|
||||
// readBackoff is the backoff strategy for openstack read retries.
|
||||
var readBackoff = wait.Backoff{
|
||||
Duration: time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.1,
|
||||
Steps: 4,
|
||||
}
|
||||
|
||||
// writeBackoff is the backoff strategy for openstack write retries.
|
||||
var writeBackoff = wait.Backoff{
|
||||
Duration: time.Second,
|
||||
Factor: 1.5,
|
||||
Jitter: 0.1,
|
||||
Steps: 5,
|
||||
}
|
||||
|
||||
type OpenstackCloud interface {
|
||||
fi.Cloud
|
||||
|
||||
// SetVolumeTags will set the tags for the Cinder volume
|
||||
SetVolumeTags(id string, tags map[string]string) error
|
||||
|
||||
// GetCloudTags will return the tags attached on cloud
|
||||
GetCloudTags() map[string]string
|
||||
|
||||
// ListVolumes will return the Cinder volumes which match the options
|
||||
ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error)
|
||||
|
||||
// CreateVolume will create a new Cinder Volume
|
||||
CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error)
|
||||
}
|
||||
|
||||
type openstackCloud struct {
|
||||
cinderClient *gophercloud.ServiceClient
|
||||
tags map[string]string
|
||||
}
|
||||
|
||||
var _ fi.Cloud = &openstackCloud{}
|
||||
|
||||
func NewOpenstackCloud() (OpenstackCloud, error) {
|
||||
return &openstackCloud{}, nil
|
||||
func NewOpenstackCloud(tags map[string]string) (OpenstackCloud, error) {
|
||||
config := vfs.OpenstackConfig{}
|
||||
|
||||
authOption, err := config.GetCredential()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
provider, err := os.AuthenticatedClient(authOption)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building openstack authenticated client: %v", err)
|
||||
}
|
||||
|
||||
endpointOpt, err := config.GetServiceConfig("Cinder")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cinderClient, err := os.NewBlockStorageV2(provider, endpointOpt)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building swift client: %v", err)
|
||||
}
|
||||
|
||||
c := &openstackCloud{
|
||||
cinderClient: cinderClient,
|
||||
tags: tags,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *openstackCloud) ProviderID() kops.CloudProviderID {
|
||||
|
@ -62,3 +127,78 @@ func (c *openstackCloud) DeleteGroup(g *cloudinstances.CloudInstanceGroup) error
|
|||
func (c *openstackCloud) GetCloudGroups(cluster *kops.Cluster, instancegroups []*kops.InstanceGroup, warnUnmatched bool, nodes []v1.Node) (map[string]*cloudinstances.CloudInstanceGroup, error) {
|
||||
return nil, fmt.Errorf("openstackCloud::GetCloudGroups not implemented")
|
||||
}
|
||||
|
||||
func (c *openstackCloud) SetVolumeTags(id string, tags map[string]string) error {
|
||||
if len(tags) == 0 {
|
||||
return nil
|
||||
}
|
||||
if id == "" {
|
||||
return fmt.Errorf("error setting tags to unknown volume")
|
||||
}
|
||||
glog.V(4).Infof("setting tags to cinder volume %q: %v", id, tags)
|
||||
|
||||
opt := cinder.UpdateOpts{Metadata: tags}
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
_, err := cinder.Update(c.cinderClient, id, opt).Extract()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error setting tags to cinder volume %q: %v", id, err)
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
} else if done {
|
||||
return nil
|
||||
} else {
|
||||
return wait.ErrWaitTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (c *openstackCloud) GetCloudTags() map[string]string {
|
||||
return c.tags
|
||||
}
|
||||
|
||||
func (c *openstackCloud) ListVolumes(opt cinder.ListOpts) ([]cinder.Volume, error) {
|
||||
var volumes []cinder.Volume
|
||||
|
||||
done, err := vfs.RetryWithBackoff(readBackoff, func() (bool, error) {
|
||||
allPages, err := cinder.List(c.cinderClient, opt).AllPages()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error listing volumes %v: %v", opt, err)
|
||||
}
|
||||
|
||||
vs, err := cinder.ExtractVolumes(allPages)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error extracting volumes from pages: %v", err)
|
||||
}
|
||||
volumes = vs
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return volumes, err
|
||||
} else if done {
|
||||
return volumes, nil
|
||||
} else {
|
||||
return volumes, wait.ErrWaitTimeout
|
||||
}
|
||||
}
|
||||
|
||||
func (c *openstackCloud) CreateVolume(opt cinder.CreateOpts) (*cinder.Volume, error) {
|
||||
var volume *cinder.Volume
|
||||
|
||||
done, err := vfs.RetryWithBackoff(writeBackoff, func() (bool, error) {
|
||||
v, err := cinder.Create(c.cinderClient, opt).Extract()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error creating volume %v: %v", opt, err)
|
||||
}
|
||||
volume = v
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return volume, err
|
||||
} else if done {
|
||||
return volume, nil
|
||||
} else {
|
||||
return volume, wait.ErrWaitTimeout
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["volume.go"],
|
||||
importpath = "k8s.io/kops/upup/pkg/fi/cloudup/openstacktasks",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/openstack:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes:go_default_library",
|
||||
],
|
||||
)
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package openstacktasks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
cinder "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/openstack"
|
||||
)
|
||||
|
||||
type Volume struct {
|
||||
ID *string
|
||||
Name *string
|
||||
AvailabilityZone *string
|
||||
VolumeType *string
|
||||
SizeGB *int64
|
||||
Tags map[string]string
|
||||
Lifecycle *fi.Lifecycle
|
||||
}
|
||||
|
||||
var _ fi.CompareWithID = &Volume{}
|
||||
|
||||
func (c *Volume) CompareWithID() *string {
|
||||
return c.ID
|
||||
}
|
||||
|
||||
func (c *Volume) Find(context *fi.Context) (*Volume, error) {
|
||||
cloud := context.Cloud.(openstack.OpenstackCloud)
|
||||
opt := cinder.ListOpts{
|
||||
Name: fi.StringValue(c.Name),
|
||||
Metadata: cloud.GetCloudTags(),
|
||||
}
|
||||
volumes, err := cloud.ListVolumes(opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
n := len(volumes)
|
||||
if n == 0 {
|
||||
return nil, nil
|
||||
} else if n != 1 {
|
||||
return nil, fmt.Errorf("found multiple Volumes with name: %s", fi.StringValue(c.Name))
|
||||
}
|
||||
v := volumes[0]
|
||||
actual := &Volume{
|
||||
ID: fi.String(v.ID),
|
||||
Name: fi.String(v.Name),
|
||||
AvailabilityZone: fi.String(v.AvailabilityZone),
|
||||
VolumeType: fi.String(v.VolumeType),
|
||||
SizeGB: fi.Int64(int64(v.Size)),
|
||||
Tags: v.Metadata,
|
||||
Lifecycle: c.Lifecycle,
|
||||
}
|
||||
return actual, nil
|
||||
}
|
||||
|
||||
func (c *Volume) Run(context *fi.Context) error {
|
||||
cloud := context.Cloud.(openstack.OpenstackCloud)
|
||||
for k, v := range cloud.GetCloudTags() {
|
||||
c.Tags[k] = v
|
||||
}
|
||||
|
||||
return fi.DefaultDeltaRunMethod(c, context)
|
||||
}
|
||||
|
||||
func (_ *Volume) CheckChanges(a, e, changes *Volume) error {
|
||||
if a == nil {
|
||||
if e.Name == nil {
|
||||
return fi.RequiredField("Name")
|
||||
}
|
||||
if e.AvailabilityZone == nil {
|
||||
return fi.RequiredField("AvailabilityZone")
|
||||
}
|
||||
if e.VolumeType == nil {
|
||||
return fi.RequiredField("VolumeType")
|
||||
}
|
||||
if e.SizeGB == nil {
|
||||
return fi.RequiredField("SizeGB")
|
||||
}
|
||||
} else {
|
||||
if changes.ID != nil {
|
||||
return fi.CannotChangeField("ID")
|
||||
}
|
||||
if changes.AvailabilityZone != nil {
|
||||
return fi.CannotChangeField("AvailabilityZone")
|
||||
}
|
||||
if changes.VolumeType != nil {
|
||||
return fi.CannotChangeField("VolumeType")
|
||||
}
|
||||
if changes.SizeGB != nil {
|
||||
return fi.CannotChangeField("SizeGB")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_ *Volume) RenderOpenstack(t *openstack.OpenstackAPITarget, a, e, changes *Volume) error {
|
||||
if a == nil {
|
||||
glog.V(2).Infof("Creating PersistentVolume with Name:%q", fi.StringValue(e.Name))
|
||||
|
||||
opt := cinder.CreateOpts{
|
||||
Size: int(*e.SizeGB),
|
||||
AvailabilityZone: fi.StringValue(e.AvailabilityZone),
|
||||
Metadata: e.Tags,
|
||||
Name: fi.StringValue(e.Name),
|
||||
VolumeType: fi.StringValue(e.VolumeType),
|
||||
}
|
||||
|
||||
v, err := t.Cloud.CreateVolume(opt)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating PersistentVolume: %v", err)
|
||||
}
|
||||
|
||||
e.ID = fi.String(v.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if changes != nil && changes.Tags != nil {
|
||||
glog.V(2).Infof("Update the tags on volume %q: %v, the differences are %v", fi.StringValue(e.ID), e.Tags, changes.Tags)
|
||||
|
||||
err := t.Cloud.SetVolumeTags(fi.StringValue(e.ID), e.Tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating the tags on volume %q: %v", fi.StringValue(e.ID), err)
|
||||
}
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Openstack task Volume::RenderOpenstack did nothing")
|
||||
return nil
|
||||
}
|
|
@ -133,7 +133,8 @@ func BuildCloud(cluster *kops.Cluster) (fi.Cloud, error) {
|
|||
}
|
||||
case kops.CloudProviderOpenstack:
|
||||
{
|
||||
osc, err := openstack.NewOpenstackCloud()
|
||||
cloudTags := map[string]string{openstack.TagClusterName: cluster.ObjectMeta.Name}
|
||||
osc, err := openstack.NewOpenstackCloud(cloudTags)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
|
@ -27,3 +27,12 @@ go_library(
|
|||
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = ["keypair_test.go"],
|
||||
importpath = "k8s.io/kops/upup/pkg/fi/fitasks",
|
||||
library = ":go_default_library",
|
||||
deps = ["//upup/pkg/fi:go_default_library"],
|
||||
)
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fitasks
|
||||
|
||||
import (
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestKeypairDeps(t *testing.T) {
|
||||
ca := &Keypair{}
|
||||
cert := &Keypair{
|
||||
Signer: ca,
|
||||
}
|
||||
|
||||
tasks := make(map[string]fi.Task)
|
||||
tasks["ca"] = ca
|
||||
tasks["cert"] = cert
|
||||
|
||||
deps := fi.FindTaskDependencies(tasks)
|
||||
|
||||
if strings.Join(deps["ca"], ",") != "" {
|
||||
t.Errorf("unexpected dependencies for ca: %v", deps["ca"])
|
||||
}
|
||||
|
||||
if strings.Join(deps["cert"], ",") != "ca" {
|
||||
t.Errorf("unexpected dependencies for cert: %v", deps["cert"])
|
||||
}
|
||||
}
|
|
@ -32,8 +32,10 @@ type SecretStore interface {
|
|||
DeleteSecret(item *KeystoreItem) error
|
||||
// FindSecret finds a secret, if exists. Returns nil,nil if not found
|
||||
FindSecret(id string) (*Secret, error)
|
||||
// GetOrCreateSecret creates or replace a secret
|
||||
// GetOrCreateSecret creates a secret
|
||||
GetOrCreateSecret(id string, secret *Secret) (current *Secret, created bool, err error)
|
||||
// ReplaceSecret will forcefully update an existing secret if it exists
|
||||
ReplaceSecret(id string, secret *Secret) (current *Secret, err error)
|
||||
// ListSecrets lists the ids of all known secrets
|
||||
ListSecrets() ([]string, error)
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret)
|
|||
return s, false, nil
|
||||
}
|
||||
|
||||
_, err = c.createSecret(secret, name)
|
||||
_, err = c.createSecret(secret, name, false)
|
||||
if err != nil {
|
||||
if errors.IsAlreadyExists(err) && i == 0 {
|
||||
glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry")
|
||||
|
@ -181,6 +181,21 @@ func (c *ClientsetSecretStore) GetOrCreateSecret(name string, secret *fi.Secret)
|
|||
return s, true, nil
|
||||
}
|
||||
|
||||
// ReplaceSecret implements fi.SecretStore::ReplaceSecret
|
||||
func (c *ClientsetSecretStore) ReplaceSecret(name string, secret *fi.Secret) (*fi.Secret, error) {
|
||||
_, err := c.createSecret(secret, name, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to write secret: %v", err)
|
||||
}
|
||||
|
||||
// Confirm the secret exists
|
||||
s, err := c.loadSecret(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load secret immmediately after creation: %v", err)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// loadSecret returns the named secret, if it exists, otherwise returns nil
|
||||
func (c *ClientsetSecretStore) loadSecret(name string) (*fi.Secret, error) {
|
||||
name = NamePrefix + name
|
||||
|
@ -207,8 +222,8 @@ func parseSecret(keyset *kops.Keyset) (*fi.Secret, error) {
|
|||
return s, nil
|
||||
}
|
||||
|
||||
// createSecret writes the secret, but only if it does not exist
|
||||
func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Keyset, error) {
|
||||
// createSecret will create the Secret, overwriting an existing secret if replace is true
|
||||
func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string, replace bool) (*kops.Keyset, error) {
|
||||
keyset := &kops.Keyset{}
|
||||
keyset.Name = NamePrefix + name
|
||||
keyset.Spec.Type = kops.SecretTypeSecret
|
||||
|
@ -221,5 +236,8 @@ func (c *ClientsetSecretStore) createSecret(s *fi.Secret, name string) (*kops.Ke
|
|||
PrivateMaterial: s.Data,
|
||||
})
|
||||
|
||||
if replace {
|
||||
return c.clientset.Keysets(c.namespace).Update(keyset)
|
||||
}
|
||||
return c.clientset.Keysets(c.namespace).Create(keyset)
|
||||
}
|
||||
|
|
|
@ -127,7 +127,7 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se
|
|||
return nil, false, err
|
||||
}
|
||||
|
||||
err = c.createSecret(secret, p, acl)
|
||||
err = c.createSecret(secret, p, acl, false)
|
||||
if err != nil {
|
||||
if os.IsExist(err) && i == 0 {
|
||||
glog.Infof("Got already-exists error when writing secret; likely due to concurrent creation. Will retry")
|
||||
|
@ -151,6 +151,27 @@ func (c *VFSSecretStore) GetOrCreateSecret(id string, secret *fi.Secret) (*fi.Se
|
|||
return s, true, nil
|
||||
}
|
||||
|
||||
func (c *VFSSecretStore) ReplaceSecret(id string, secret *fi.Secret) (*fi.Secret, error) {
|
||||
p := c.buildSecretPath(id)
|
||||
|
||||
acl, err := acls.GetACL(p, c.cluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = c.createSecret(secret, p, acl, true)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to write secret: %v", err)
|
||||
}
|
||||
|
||||
// Confirm the secret exists
|
||||
s, err := c.loadSecret(p)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to load secret immmediately after creation %v: %v", p, err)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
|
||||
data, err := p.ReadFile()
|
||||
if err != nil {
|
||||
|
@ -166,11 +187,15 @@ func (c *VFSSecretStore) loadSecret(p vfs.Path) (*fi.Secret, error) {
|
|||
return s, nil
|
||||
}
|
||||
|
||||
// createSecret writes the secret, but only if it does not exists
|
||||
func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL) error {
|
||||
// createSecret will create the Secret, overwriting an existing secret if replace is true
|
||||
func (c *VFSSecretStore) createSecret(s *fi.Secret, p vfs.Path, acl vfs.ACL, replace bool) error {
|
||||
data, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error serializing secret: %v", err)
|
||||
}
|
||||
|
||||
if replace {
|
||||
return p.WriteFile(data, acl)
|
||||
}
|
||||
return p.CreateFile(data, acl)
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ func (s *VFSCAStore) VFSPath() vfs.Path {
|
|||
return s.basedir
|
||||
}
|
||||
|
||||
// Retrieves the CA keypair, generating a new keypair if not found
|
||||
// Retrieves the CA keypair. No longer generates keypairs if not found.
|
||||
func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
@ -98,16 +98,15 @@ func (s *VFSCAStore) readCAKeypairs(id string) (*certificates, *privateKeys, err
|
|||
}
|
||||
|
||||
if caPrivateKeys == nil {
|
||||
caCertificates, caPrivateKeys, err = s.generateCACertificate(id)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We no longer generate CA certificates automatically - too race-prone
|
||||
return caCertificates, caPrivateKeys, nil
|
||||
}
|
||||
|
||||
cached = &cachedEntry{certificates: caCertificates, privateKeys: caPrivateKeys}
|
||||
s.cachedCAs[id] = cached
|
||||
|
||||
return cached.certificates, cached.privateKeys, nil
|
||||
|
||||
}
|
||||
|
||||
func BuildCAX509Template() *x509.Certificate {
|
||||
|
|
Loading…
Reference in New Issue