mirror of https://github.com/kubernetes/kops.git
Merge branch 'master' into kris-and-eric-1282
This commit is contained in:
commit
a03ba42b56
23
Makefile
23
Makefile
|
|
@ -31,7 +31,23 @@ MAKEDIR:=$(strip $(shell dirname "$(realpath $(lastword $(MAKEFILE_LIST)))"))
|
|||
# Keep in sync with upup/models/cloudup/resources/addons/dns-controller/
|
||||
DNS_CONTROLLER_TAG=1.4.1
|
||||
|
||||
VERSION?=1.5.0-alpha1
|
||||
ifndef VERSION
|
||||
# To keep both CI and end-users building from source happy,
|
||||
# we expect that CI sets CI=1.
|
||||
#
|
||||
# For end users, they need only build kops, and they can use the last
|
||||
# released version of nodeup/protokube.
|
||||
# For CI, we continue to build a synthetic version from the git SHA, so
|
||||
# we never cross versions.
|
||||
#
|
||||
# We expect that if you are uploading nodeup/protokube, you will set
|
||||
# VERSION (along with S3_BUCKET), either directly or by setting CI=1
|
||||
ifndef CI
|
||||
VERSION=1.5.0-alpha1
|
||||
else
|
||||
VERSION := git-$(shell git describe --always)
|
||||
endif
|
||||
endif
|
||||
|
||||
|
||||
# Go exports:
|
||||
|
|
@ -80,6 +96,7 @@ test:
|
|||
go test k8s.io/kops/dns-controller/pkg/... -args -v=1 -logtostderr
|
||||
go test k8s.io/kops/cmd/... -args -v=1 -logtostderr
|
||||
go test k8s.io/kops/tests/... -args -v=1 -logtostderr
|
||||
go test k8s.io/kops/util/... -args -v=1 -logtostderr
|
||||
|
||||
crossbuild-nodeup:
|
||||
mkdir -p .build/dist/
|
||||
|
|
@ -123,11 +140,11 @@ upload: kops version-dist
|
|||
aws s3 sync --acl public-read .build/upload/ ${S3_BUCKET}
|
||||
|
||||
gcs-upload: version-dist
|
||||
@echo "== Logging gcloud info =="
|
||||
@gcloud info
|
||||
@echo "== Uploading kops =="
|
||||
gsutil -h "Cache-Control:private, max-age=0, no-transform" -m cp -n -r .build/upload/kops/* ${GCS_LOCATION}
|
||||
|
||||
# In CI testing, always upload the CI version.
|
||||
gcs-publish-ci: VERSION := git-$(shell git describe --always)
|
||||
gcs-publish-ci: gcs-upload
|
||||
echo "${GCS_URL}/${VERSION}" > .build/upload/${LATEST_FILE}
|
||||
gsutil -h "Cache-Control:private, max-age=0, no-transform" cp .build/upload/${LATEST_FILE} ${GCS_LOCATION}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
spec:
|
||||
images:
|
||||
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2016-12-19
|
||||
- name: kope.io/k8s-1.5-debian-jessie-amd64-hvm-ebs-2017-01-09
|
||||
providerID: aws
|
||||
cluster:
|
||||
kubernetesVersion: v1.5.1
|
||||
|
|
|
|||
|
|
@ -65,6 +65,9 @@ type CreateClusterOptions struct {
|
|||
// The network topology to use
|
||||
Topology string
|
||||
|
||||
// The DNS type to use (public/private)
|
||||
DNSType string
|
||||
|
||||
// Enable/Disable Bastion Host complete setup
|
||||
Bastion bool
|
||||
|
||||
|
|
@ -80,7 +83,8 @@ func (o *CreateClusterOptions) InitDefaults() {
|
|||
o.Networking = "kubenet"
|
||||
o.AssociatePublicIP = true
|
||||
o.Channel = api.DefaultChannel
|
||||
o.Topology = "public"
|
||||
o.Topology = api.TopologyPublic
|
||||
o.DNSType = string(api.DNSTypePublic)
|
||||
o.Bastion = false
|
||||
}
|
||||
|
||||
|
|
@ -146,6 +150,9 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
|
|||
// Network topology
|
||||
cmd.Flags().StringVarP(&options.Topology, "topology", "t", options.Topology, "Controls network topology for the cluster. public|private. Default is 'public'.")
|
||||
|
||||
// DNS
|
||||
cmd.Flags().StringVar(&options.DNSType, "dns", options.DNSType, "DNS hosted zone to use: public|private. Default is 'public'.")
|
||||
|
||||
// Bastion
|
||||
cmd.Flags().BoolVar(&options.Bastion, "bastion", options.Bastion, "Pass the --bastion flag to enable a bastion instance group. Only applies to private topology.")
|
||||
|
||||
|
|
@ -458,7 +465,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
|
||||
case api.TopologyPrivate:
|
||||
if !supportsPrivateTopology(cluster.Spec.Networking) {
|
||||
return fmt.Errorf("Invalid networking option %s. Currently only '--networking cni', '--networking kopeio-vxlan', '--networking weave', '--networking calico' are supported for private topologies", c.Networking)
|
||||
return fmt.Errorf("Invalid networking option %s. Currently only '--networking kopeio-vxlan', '--networking weave', '--networking calico' (or '--networking cni') are supported for private topologies", c.Networking)
|
||||
}
|
||||
cluster.Spec.Topology = &api.TopologySpec{
|
||||
Masters: api.TopologyPrivate,
|
||||
|
|
@ -489,16 +496,8 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
bastionGroup.ObjectMeta.Name = "bastions"
|
||||
instanceGroups = append(instanceGroups, bastionGroup)
|
||||
|
||||
// Logic to handle default bastion names
|
||||
if c.DNSZone != "" {
|
||||
cluster.Spec.Topology.Bastion = &api.BastionSpec{
|
||||
BastionPublicName: "bastion-" + c.DNSZone,
|
||||
}
|
||||
} else {
|
||||
// Use default zone and cluster name
|
||||
cluster.Spec.Topology.Bastion = &api.BastionSpec{
|
||||
BastionPublicName: "bastion-" + clusterName,
|
||||
}
|
||||
cluster.Spec.Topology.Bastion = &api.BastionSpec{
|
||||
BastionPublicName: "bastion." + clusterName,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -506,6 +505,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
return fmt.Errorf("Invalid topology %s.", c.Topology)
|
||||
}
|
||||
|
||||
|
||||
// NAT Gateway/ElasticIP
|
||||
if c.NgwIds != "" {
|
||||
ngwEipList := make([]string, 0)
|
||||
|
|
@ -530,6 +530,28 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
|
|||
}
|
||||
// fmt.Printf("This is cluster.Spec.Subnets %+v\n", subnet)
|
||||
}
|
||||
|
||||
// DNS
|
||||
if c.DNSType == "" {
|
||||
// The flag default should have set this, but we might be being called as a library
|
||||
glog.Infof("Empty DNS. Defaulting to public DNS")
|
||||
c.DNSType = string(api.DNSTypePublic)
|
||||
}
|
||||
|
||||
if cluster.Spec.Topology == nil {
|
||||
cluster.Spec.Topology = &api.TopologySpec{}
|
||||
}
|
||||
if cluster.Spec.Topology.DNS == nil {
|
||||
cluster.Spec.Topology.DNS = &api.DNSSpec{}
|
||||
}
|
||||
switch strings.ToLower(c.DNSType) {
|
||||
case "public":
|
||||
cluster.Spec.Topology.DNS.Type = api.DNSTypePublic
|
||||
case "private":
|
||||
cluster.Spec.Topology.DNS.Type = api.DNSTypePrivate
|
||||
default:
|
||||
return fmt.Errorf("unknown DNSType: %q", c.DNSType)
|
||||
|
||||
}
|
||||
|
||||
sshPublicKeys := make(map[string][]byte)
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/kops/pkg/apis/kops/registry"
|
||||
"k8s.io/kops/util/pkg/tables"
|
||||
k8sapi "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
type GetClusterOptions struct {
|
||||
|
|
@ -126,14 +127,14 @@ func RunGetClusters(context Factory, out io.Writer, options *GetClusterOptions)
|
|||
t.AddColumn("CLOUD", func(c *api.Cluster) string {
|
||||
return c.Spec.CloudProvider
|
||||
})
|
||||
t.AddColumn("SUBNETS", func(c *api.Cluster) string {
|
||||
var subnetNames []string
|
||||
t.AddColumn("ZONES", func(c *api.Cluster) string {
|
||||
zones := sets.NewString()
|
||||
for _, s := range c.Spec.Subnets {
|
||||
subnetNames = append(subnetNames, s.Name)
|
||||
zones.Insert(s.Zone)
|
||||
}
|
||||
return strings.Join(subnetNames, ",")
|
||||
return strings.Join(zones.List(), ",")
|
||||
})
|
||||
return t.Render(clusters, out, "NAME", "CLOUD", "SUBNETS")
|
||||
return t.Render(clusters, out, "NAME", "CLOUD", "ZONES")
|
||||
|
||||
case OutputYaml:
|
||||
for i, cluster := range clusters {
|
||||
|
|
|
|||
|
|
@ -205,9 +205,19 @@ func RunUpdateCluster(f *util.Factory, clusterName string, out io.Writer, c *Upd
|
|||
|
||||
if !hasKubecfg {
|
||||
// Assume initial creation
|
||||
fmt.Printf("\n")
|
||||
fmt.Printf("Cluster is starting. It should be ready in a few minutes.\n")
|
||||
fmt.Printf("\n")
|
||||
if c.Target == cloudup.TargetTerraform {
|
||||
fmt.Printf("\n")
|
||||
fmt.Printf("Terraform output has been placed into %s\n", c.OutDir)
|
||||
fmt.Printf("Run these commands to apply the configuration:\n")
|
||||
fmt.Printf(" cd %s\n", c.OutDir)
|
||||
fmt.Printf(" terraform plan\n")
|
||||
fmt.Printf(" terraform apply\n")
|
||||
fmt.Printf("\n")
|
||||
} else {
|
||||
fmt.Printf("\n")
|
||||
fmt.Printf("Cluster is starting. It should be ready in a few minutes.\n")
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
fmt.Printf("Suggestions:\n")
|
||||
fmt.Printf(" * list nodes: kubectl get nodes --show-labels\n")
|
||||
if cluster.Spec.Topology.Masters == kops.TopologyPublic {
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ import (
|
|||
"k8s.io/kops/dns-controller/pkg/dns"
|
||||
"k8s.io/kops/dns-controller/pkg/watchers"
|
||||
"k8s.io/kubernetes/federation/pkg/dnsprovider"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
client_extensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
|
||||
client "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
client_extensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
|
||||
kubectl_util "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"os"
|
||||
|
||||
|
|
|
|||
|
|
@ -146,7 +146,7 @@ func (c *DNSController) snapshotIfChangedAndReady() *snapshot {
|
|||
aliasTargets := make(map[string][]Record)
|
||||
|
||||
if c.lastSuccessfulSnapshot != nil && s.changeCount == c.lastSuccessfulSnapshot.changeCount {
|
||||
glog.V(4).Infof("No changes since DNS values last successfully applied")
|
||||
glog.V(6).Infof("No changes since DNS values last successfully applied")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -257,6 +257,8 @@ func (c *DNSController) runOnce() error {
|
|||
ttl := DefaultTTL
|
||||
glog.Infof("Using default TTL of %v", ttl)
|
||||
|
||||
glog.V(4).Infof("updating records for %s: %v -> %v", k, newValues, oldValues)
|
||||
|
||||
err := op.updateRecords(k, newValues, int64(ttl.Seconds()))
|
||||
if err != nil {
|
||||
glog.Infof("error updating records for %s: %v", k, err)
|
||||
|
|
@ -480,19 +482,44 @@ func (s *DNSControllerScope) MarkReady() {
|
|||
}
|
||||
|
||||
func (s *DNSControllerScope) Replace(recordName string, records []Record) {
|
||||
glog.V(2).Infof("Update %s/%s: %v", s.ScopeName, recordName, records)
|
||||
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
|
||||
existing, exists := s.Records[recordName]
|
||||
|
||||
if len(records) == 0 {
|
||||
if !exists {
|
||||
glog.V(6).Infof("skipping spurious removal of record %s/%s", s.ScopeName, recordName)
|
||||
return
|
||||
}
|
||||
|
||||
delete(s.Records, recordName)
|
||||
} else {
|
||||
if recordsSliceEquals(existing, records) {
|
||||
glog.V(6).Infof("skipping spurious update of record %s/%s=%s", s.ScopeName, recordName, records)
|
||||
return
|
||||
}
|
||||
|
||||
s.Records[recordName] = records
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Update %s/%s: %v", s.ScopeName, recordName, records)
|
||||
s.parent.recordChange()
|
||||
}
|
||||
|
||||
// recordsSliceEquals compares two []Record
|
||||
func recordsSliceEquals(l, r []Record) bool {
|
||||
if len(l) != len(r) {
|
||||
return false
|
||||
}
|
||||
for i := range l {
|
||||
if l[i] != r[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// CreateScope creates a scope object.
|
||||
func (c *DNSController) CreateScope(scopeName string) (Scope, error) {
|
||||
c.mutex.Lock()
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ Bastion provide an external facing point of entry into a network containing priv
|
|||
|
||||
To enable a bastion instance group, a user will need to set the `--bastion` flag on cluster create
|
||||
|
||||
```
|
||||
```yaml
|
||||
kops create cluster --topology private --networking $provider --bastion $NAME
|
||||
```
|
||||
|
||||
|
|
@ -19,13 +19,13 @@ kops create cluster --topology private --networking $provider --bastion $NAME
|
|||
|
||||
You can edit the bastion instance group to make changes. By default the name of the bastion instance group will be `bastions` and you can specify the name of the cluster with `--name` as in:
|
||||
|
||||
```
|
||||
```yaml
|
||||
kops edit ig bastions --name $KOPS_NAME
|
||||
```
|
||||
|
||||
You should now be able to edit and configure your bastion instance group.
|
||||
|
||||
```
|
||||
```yaml
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
|
|
@ -51,21 +51,21 @@ If you do not want the bastion instance group created at all, simply drop the `-
|
|||
|
||||
By default the bastion instance group will create a public CNAME alias that will point to the bastion ELB.
|
||||
|
||||
The default bastion name is `bastion-$NAME` as in
|
||||
The default bastion name is `bastion.$NAME` as in
|
||||
|
||||
```
|
||||
bastion-example.kubernetes.com
|
||||
```yaml
|
||||
bastion.example.kubernetes.com
|
||||
```
|
||||
|
||||
Unless a user is using `--dns-zone` which will inherently use the `basion-$ZONE` syntax.
|
||||
|
||||
You can define a custom bastion CNAME by editing the main cluster config `kops edit cluster $NAME` and modifying the following block
|
||||
|
||||
```
|
||||
```yaml
|
||||
spec:
|
||||
topology:
|
||||
bastion:
|
||||
bastionPublicName: bastion-example.kubernetes.com
|
||||
bastionPublicName: bastion.example.kubernetes.com
|
||||
```
|
||||
|
||||
|
||||
|
|
@ -75,7 +75,7 @@ The bastion is accessed via an AWS ELB. The ELB is required to gain secure acces
|
|||
|
||||
You can increase the ELB idle timeout by editing the main cluster config `kops edit cluster $NAME` and modifyng the following block
|
||||
|
||||
```
|
||||
```yaml
|
||||
spec:
|
||||
topology:
|
||||
bastion:
|
||||
|
|
@ -83,3 +83,22 @@ spec:
|
|||
```
|
||||
|
||||
Where the maximum value is 1200 seconds (20 minutes) allowed by AWS. [More information](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html)
|
||||
|
||||
### Using the bastion
|
||||
|
||||
Once your cluster is setup and you need to SSH into the bastion you can access a cluster resource using the following steps
|
||||
|
||||
```bash
|
||||
# Verify you have an SSH agent running. This should match whatever you built your cluster with.
|
||||
ssh-add -l
|
||||
# If you need to add an agent
|
||||
ssh-add path/to/public/key
|
||||
|
||||
# Now you can SSH into the bastion
|
||||
ssh -A admin@<bastion-ELB-address>
|
||||
|
||||
# Where <bastion-ELB-address> is usually bastion.$clustername (bastion.example.kubernetes.cluster) unless otherwise specified
|
||||
|
||||
```
|
||||
|
||||
Now that you can successfully SSH into the bastion with a forwarded SSH agent. You can SSH into any of your cluster resources using their local IP address. You can get their local IP address from the cloud console.
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ An example:
|
|||
|
||||
```
|
||||
...
|
||||
spec:
|
||||
nodeLabels:
|
||||
spot: "false"
|
||||
cloudLabels:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,126 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SysctlBuilder set up our sysctls
|
||||
type SysctlBuilder struct {
|
||||
*NodeupModelContext
|
||||
}
|
||||
|
||||
var _ fi.ModelBuilder = &SysctlBuilder{}
|
||||
|
||||
func (b *SysctlBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
var sysctls []string
|
||||
|
||||
// Common settings
|
||||
{
|
||||
sysctls = append(sysctls,
|
||||
"# Kubernetes Settings",
|
||||
"")
|
||||
|
||||
// A higher vm.max_map_count is great for elasticsearch, mongo, or other mmap users
|
||||
// See https://github.com/kubernetes/kops/issues/1340
|
||||
sysctls = append(sysctls, "vm.max_map_count = 262144",
|
||||
"")
|
||||
|
||||
// See https://github.com/kubernetes/kubernetes/pull/38001
|
||||
sysctls = append(sysctls,
|
||||
"kernel.softlockup_panic = 1",
|
||||
"kernel.softlockup_all_cpu_backtrace = 1",
|
||||
"")
|
||||
|
||||
// See https://github.com/kubernetes/kube-deploy/issues/261
|
||||
sysctls = append(sysctls,
|
||||
"# Increase the number of connections",
|
||||
"net.core.somaxconn = 32768",
|
||||
"",
|
||||
|
||||
"# Maximum Socket Receive Buffer",
|
||||
"net.core.rmem_max = 16777216",
|
||||
"",
|
||||
|
||||
"# Default Socket Send Buffer",
|
||||
"net.core.wmem_max = 16777216",
|
||||
"",
|
||||
|
||||
"# Increase the maximum total buffer-space allocatable",
|
||||
"net.ipv4.tcp_wmem = 4096 12582912 16777216",
|
||||
"net.ipv4.tcp_rmem = 4096 12582912 16777216",
|
||||
"",
|
||||
|
||||
"# Increase the number of outstanding syn requests allowed",
|
||||
"net.ipv4.tcp_max_syn_backlog = 8096",
|
||||
"",
|
||||
|
||||
"# For persistent HTTP connections",
|
||||
"net.ipv4.tcp_slow_start_after_idle = 0",
|
||||
"",
|
||||
|
||||
"# Increase the tcp-time-wait buckets pool size to prevent simple DOS attacks",
|
||||
"net.ipv4.tcp_tw_reuse = 1",
|
||||
"",
|
||||
|
||||
// We can't change the local_port_range without changing the NodePort range
|
||||
//"# Allowed local port range",
|
||||
//"net.ipv4.ip_local_port_range = 10240 65535",
|
||||
//"",
|
||||
|
||||
"# Max number of packets that can be queued on interface input",
|
||||
"# If kernel is receiving packets faster than can be processed",
|
||||
"# this queue increases",
|
||||
"net.core.netdev_max_backlog = 16384",
|
||||
"",
|
||||
|
||||
"# Increase size of file handles and inode cache",
|
||||
"fs.file-max = 2097152",
|
||||
"",
|
||||
)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.CloudProvider == string(fi.CloudProviderAWS) {
|
||||
sysctls = append(sysctls,
|
||||
"# AWS settings",
|
||||
"",
|
||||
"# Issue #23395",
|
||||
"net.ipv4.neigh.default.gc_thresh1=0",
|
||||
"")
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.CloudProvider == string(fi.CloudProviderGCE) {
|
||||
sysctls = append(sysctls,
|
||||
"# GCE settings",
|
||||
"",
|
||||
"net.ipv4.ip_forward=1",
|
||||
"")
|
||||
}
|
||||
|
||||
t := &nodetasks.File{
|
||||
Path: "/etc/sysctl.d/99-k8s-general.conf",
|
||||
Contents: fi.NewStringResource(strings.Join(sysctls, "\n")),
|
||||
Type: nodetasks.FileType_File,
|
||||
OnChangeExecute: []string{"sysctl", "--system"},
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -348,6 +348,7 @@ func (c *Cluster) FillDefaults() error {
|
|||
// Topology support
|
||||
if c.Spec.Topology == nil {
|
||||
c.Spec.Topology = &TopologySpec{Masters: TopologyPublic, Nodes: TopologyPublic}
|
||||
c.Spec.Topology.DNS = &DNSSpec{Type: DNSTypePublic}
|
||||
}
|
||||
|
||||
if len(c.Spec.KubernetesAPIAccess) == 0 {
|
||||
|
|
|
|||
|
|
@ -34,4 +34,18 @@ type TopologySpec struct {
|
|||
// or disable inbound SSH communication from the Internet, some call bastion
|
||||
// as the "jump server".
|
||||
Bastion *BastionSpec `json:"bastion,omitempty"`
|
||||
|
||||
// DNS configures options relating to DNS, in particular whether we use a public or a private hosted zone
|
||||
DNS *DNSSpec `json:"dns,omitempty"`
|
||||
}
|
||||
|
||||
type DNSSpec struct {
|
||||
Type DNSType `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
type DNSType string
|
||||
|
||||
const (
|
||||
DNSTypePublic DNSType = "Public"
|
||||
DNSTypePrivate DNSType = "Private"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -247,6 +247,14 @@ func Convert_v1alpha1_TopologySpec_To_kops_TopologySpec(in *TopologySpec, out *k
|
|||
} else {
|
||||
out.Bastion = nil
|
||||
}
|
||||
if in.DNS != nil {
|
||||
out.DNS = new(kops.DNSSpec)
|
||||
if err := Convert_v1alpha1_DNSSpec_To_kops_DNSSpec(in.DNS, out.DNS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.DNS = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -261,5 +269,13 @@ func Convert_kops_TopologySpec_To_v1alpha1_TopologySpec(in *kops.TopologySpec, o
|
|||
} else {
|
||||
out.Bastion = nil
|
||||
}
|
||||
if in.DNS != nil {
|
||||
out.DNS = new(DNSSpec)
|
||||
if err := Convert_kops_DNSSpec_To_v1alpha1_DNSSpec(in.DNS, out.DNS, s); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
out.DNS = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -41,6 +41,14 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) {
|
|||
obj.Topology.Nodes = TopologyPublic
|
||||
}
|
||||
|
||||
if obj.Topology.DNS == nil {
|
||||
obj.Topology.DNS = &DNSSpec{}
|
||||
}
|
||||
|
||||
if obj.Topology.DNS.Type == "" {
|
||||
obj.Topology.DNS.Type = DNSTypePublic
|
||||
}
|
||||
|
||||
if obj.API == nil {
|
||||
obj.API = &AccessSpec{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,4 +35,18 @@ type TopologySpec struct {
|
|||
// or disable inbound SSH communication from the Internet, some call bastion
|
||||
// as the "jump server".
|
||||
Bastion *BastionSpec `json:"bastion,omitempty"`
|
||||
|
||||
// DNS configures options relating to DNS, in particular whether we use a public or a private hosted zone
|
||||
DNS *DNSSpec `json:"dns,omitempty"`
|
||||
}
|
||||
|
||||
type DNSSpec struct {
|
||||
Type DNSType `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
type DNSType string
|
||||
|
||||
const (
|
||||
DNSTypePublic DNSType = "Public"
|
||||
DNSTypePrivate DNSType = "Private"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -50,6 +50,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
|
|||
Convert_kops_ClusterSpec_To_v1alpha1_ClusterSpec,
|
||||
Convert_v1alpha1_DNSAccessSpec_To_kops_DNSAccessSpec,
|
||||
Convert_kops_DNSAccessSpec_To_v1alpha1_DNSAccessSpec,
|
||||
Convert_v1alpha1_DNSSpec_To_kops_DNSSpec,
|
||||
Convert_kops_DNSSpec_To_v1alpha1_DNSSpec,
|
||||
Convert_v1alpha1_DockerConfig_To_kops_DockerConfig,
|
||||
Convert_kops_DockerConfig_To_v1alpha1_DockerConfig,
|
||||
Convert_v1alpha1_EtcdClusterSpec_To_kops_EtcdClusterSpec,
|
||||
|
|
@ -555,6 +557,24 @@ func Convert_kops_DNSAccessSpec_To_v1alpha1_DNSAccessSpec(in *kops.DNSAccessSpec
|
|||
return autoConvert_kops_DNSAccessSpec_To_v1alpha1_DNSAccessSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_DNSSpec_To_kops_DNSSpec(in *DNSSpec, out *kops.DNSSpec, s conversion.Scope) error {
|
||||
out.Type = kops.DNSType(in.Type)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_v1alpha1_DNSSpec_To_kops_DNSSpec(in *DNSSpec, out *kops.DNSSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_DNSSpec_To_kops_DNSSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_kops_DNSSpec_To_v1alpha1_DNSSpec(in *kops.DNSSpec, out *DNSSpec, s conversion.Scope) error {
|
||||
out.Type = DNSType(in.Type)
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_kops_DNSSpec_To_v1alpha1_DNSSpec(in *kops.DNSSpec, out *DNSSpec, s conversion.Scope) error {
|
||||
return autoConvert_kops_DNSSpec_To_v1alpha1_DNSSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_DockerConfig_To_kops_DockerConfig(in *DockerConfig, out *kops.DockerConfig, s conversion.Scope) error {
|
||||
out.Bridge = in.Bridge
|
||||
out.LogLevel = in.LogLevel
|
||||
|
|
|
|||
|
|
@ -41,6 +41,14 @@ func SetDefaults_ClusterSpec(obj *ClusterSpec) {
|
|||
obj.Topology.Nodes = TopologyPublic
|
||||
}
|
||||
|
||||
if obj.Topology.DNS == nil {
|
||||
obj.Topology.DNS = &DNSSpec{}
|
||||
}
|
||||
|
||||
if obj.Topology.DNS.Type == "" {
|
||||
obj.Topology.DNS.Type = DNSTypePublic
|
||||
}
|
||||
|
||||
if obj.API == nil {
|
||||
obj.API = &AccessSpec{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,4 +34,18 @@ type TopologySpec struct {
|
|||
// or disable inbound SSH communication from the Internet, some call bastion
|
||||
// as the "jump server".
|
||||
Bastion *BastionSpec `json:"bastion,omitempty"`
|
||||
|
||||
// DNS configures options relating to DNS, in particular whether we use a public or a private hosted zone
|
||||
DNS *DNSSpec `json:"dns,omitempty"`
|
||||
}
|
||||
|
||||
type DNSSpec struct {
|
||||
Type DNSType `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
type DNSType string
|
||||
|
||||
const (
|
||||
DNSTypePublic DNSType = "Public"
|
||||
DNSTypePrivate DNSType = "Private"
|
||||
)
|
||||
|
|
|
|||
|
|
@ -34,9 +34,6 @@ func Bool(b bool) *bool {
|
|||
return &b
|
||||
}
|
||||
|
||||
// PreviewPrivateDNS turns on the preview of the private hosted zone support.
|
||||
var PreviewPrivateDNS = New("PreviewPrivateDNS", Bool(false))
|
||||
|
||||
// DNSPreCreate controls whether we pre-create DNS records.
|
||||
var DNSPreCreate = New("DNSPreCreate", Bool(true))
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import (
|
|||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/featureflag"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -165,9 +164,19 @@ func (m *KopsModelContext) UseLoadBalancerForAPI() bool {
|
|||
}
|
||||
|
||||
func (m *KopsModelContext) UsePrivateDNS() bool {
|
||||
if featureflag.PreviewPrivateDNS.Enabled() {
|
||||
glog.Infof("PreviewPrivateDNS enabled; using private DNS")
|
||||
return true
|
||||
topology := m.Cluster.Spec.Topology
|
||||
if topology != nil && topology.DNS != nil {
|
||||
switch topology.DNS.Type {
|
||||
case kops.DNSTypePublic:
|
||||
return false
|
||||
case kops.DNSTypePrivate:
|
||||
return true
|
||||
|
||||
default:
|
||||
glog.Warningf("Unknown DNS type %q", topology.DNS.Type)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ limitations under the License.
|
|||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
||||
"strings"
|
||||
|
|
@ -29,63 +31,74 @@ type DNSModelBuilder struct {
|
|||
|
||||
var _ fi.ModelBuilder = &DNSModelBuilder{}
|
||||
|
||||
func (b *DNSModelBuilder) ensureDNSZone(c *fi.ModelBuilderContext) error {
|
||||
// Configuration for a DNS zone
|
||||
dnsZone := &awstasks.DNSZone{
|
||||
Name: s(b.NameForDNSZone()),
|
||||
}
|
||||
|
||||
topology := b.Cluster.Spec.Topology
|
||||
if topology != nil && topology.DNS != nil {
|
||||
switch topology.DNS.Type {
|
||||
case kops.DNSTypePublic:
|
||||
// Ignore
|
||||
|
||||
case kops.DNSTypePrivate:
|
||||
dnsZone.Private = fi.Bool(true)
|
||||
dnsZone.PrivateVPC = b.LinkToVPC()
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Unknown DNS type %q", topology.DNS.Type)
|
||||
}
|
||||
}
|
||||
|
||||
if !strings.Contains(b.Cluster.Spec.DNSZone, ".") {
|
||||
// Looks like a hosted zone ID
|
||||
dnsZone.ZoneID = s(b.Cluster.Spec.DNSZone)
|
||||
} else {
|
||||
// Looks like a normal DNS name
|
||||
dnsZone.DNSName = s(b.Cluster.Spec.DNSZone)
|
||||
}
|
||||
|
||||
return c.EnsureTask(dnsZone)
|
||||
}
|
||||
|
||||
func (b *DNSModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
// Add a HostedZone if we are going to publish a dns record that depends on it
|
||||
if b.UsePrivateDNS() || b.UsesBastionDns() {
|
||||
// UsePrivateDNS is only exposed as a feature flag currently
|
||||
// TODO: We may still need a public zone to publish an ELB
|
||||
|
||||
if b.UsePrivateDNS() {
|
||||
// Check to see if we are using a bastion DNS record that points to the hosted zone
|
||||
// If we are, we need to make sure we include the hosted zone as a task
|
||||
|
||||
// Configuration for a DNS zone, attached to our VPC
|
||||
dnsZone := &awstasks.DNSZone{
|
||||
Name: s("private-" + b.Cluster.Spec.DNSZone),
|
||||
Private: fi.Bool(true),
|
||||
PrivateVPC: b.LinkToVPC(),
|
||||
if err := b.ensureDNSZone(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !strings.Contains(b.Cluster.Spec.DNSZone, ".") {
|
||||
// Looks like a hosted zone ID
|
||||
dnsZone.ZoneID = s(b.Cluster.Spec.DNSZone)
|
||||
} else {
|
||||
// Looks like a normal ddns name
|
||||
dnsZone.DNSName = s(b.Cluster.Spec.DNSZone)
|
||||
}
|
||||
|
||||
c.AddTask(dnsZone)
|
||||
} else if b.UseLoadBalancerForAPI() {
|
||||
// This will point our DNS to the load balancer, and put the pieces
|
||||
// together for kubectl to be work
|
||||
|
||||
// Configuration for a DNS name for the master
|
||||
dnsZone := &awstasks.DNSZone{
|
||||
Name: s(b.Cluster.Spec.DNSZone),
|
||||
Private: fi.Bool(false),
|
||||
}
|
||||
|
||||
if !strings.Contains(b.Cluster.Spec.DNSZone, ".") {
|
||||
// Looks like a hosted zone ID
|
||||
dnsZone.ZoneID = s(b.Cluster.Spec.DNSZone)
|
||||
} else {
|
||||
// Looks like a normal ddns name
|
||||
dnsZone.DNSName = s(b.Cluster.Spec.DNSZone)
|
||||
}
|
||||
|
||||
c.AddTask(dnsZone)
|
||||
}
|
||||
|
||||
if b.UseLoadBalancerForAPI() {
|
||||
// This will point our DNS to the load balancer, and put the pieces
|
||||
// together for kubectl to be work
|
||||
|
||||
dnsName := &awstasks.DNSName{
|
||||
if err := b.ensureDNSZone(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
apiDnsName := &awstasks.DNSName{
|
||||
Name: s(b.Cluster.Spec.MasterPublicName),
|
||||
Zone: &awstasks.DNSZone{Name: s(b.Cluster.Spec.DNSZone)},
|
||||
Zone: b.LinkToDNSZone(),
|
||||
ResourceType: s("A"),
|
||||
TargetLoadBalancer: b.LinkToELB("api"),
|
||||
}
|
||||
c.AddTask(dnsName)
|
||||
c.AddTask(apiDnsName)
|
||||
}
|
||||
|
||||
if b.UsesBastionDns() {
|
||||
// Pulling this down into it's own if statement. The DNS configuration here
|
||||
// is similar to others, but I would like to keep it on it's own in case we need
|
||||
// to change anything.
|
||||
|
||||
if err := b.ensureDNSZone(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -17,9 +17,18 @@ limitations under the License.
|
|||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Protocol int
|
||||
|
||||
const (
|
||||
ProtocolIPIP Protocol = 4
|
||||
)
|
||||
|
||||
// FirewallModelBuilder configures firewall network objects
|
||||
|
|
@ -73,14 +82,79 @@ func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error {
|
|||
c.AddTask(t)
|
||||
}
|
||||
|
||||
// Nodes can talk to master nodes
|
||||
{
|
||||
t := &awstasks.SecurityGroupRule{
|
||||
Name: s("all-node-to-master"),
|
||||
// TODO: We need to remove the ALL rule
|
||||
//W1229 12:32:22.300132 9003 executor.go:109] error running task "SecurityGroupRule/node-to-master-443" (9m58s remaining to succeed): error creating SecurityGroupIngress: InvalidPermission.Duplicate: the specified rule "peer: sg-f6b1a68b, ALL, ALLOW" already exists
|
||||
//status code: 400, request id: 6a69627f-9a26-4bd0-b294-a9a96f89bc46
|
||||
|
||||
udpPorts := []int64{}
|
||||
tcpPorts := []int64{}
|
||||
protocols := []Protocol{}
|
||||
|
||||
// allow access to API
|
||||
tcpPorts = append(tcpPorts, 443)
|
||||
|
||||
// allow cadvisor
|
||||
tcpPorts = append(tcpPorts, 4194)
|
||||
|
||||
if b.Cluster.Spec.Networking != nil {
|
||||
if b.Cluster.Spec.Networking.Kopeio != nil {
|
||||
// VXLAN over UDP
|
||||
udpPorts = append(udpPorts, 4789)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.Networking.Weave != nil {
|
||||
udpPorts = append(udpPorts, 6783)
|
||||
tcpPorts = append(tcpPorts, 6783)
|
||||
udpPorts = append(udpPorts, 6784)
|
||||
}
|
||||
|
||||
if b.Cluster.Spec.Networking.Calico != nil {
|
||||
// Calico needs to access etcd
|
||||
// TODO: Remove, replace with etcd in calico manifest
|
||||
glog.Warningf("Opening etcd port on masters for access from the nodes, for calico. This is unsafe in untrusted environments.")
|
||||
tcpPorts = append(tcpPorts, 4001)
|
||||
|
||||
tcpPorts = append(tcpPorts, 179)
|
||||
protocols = append(protocols, ProtocolIPIP)
|
||||
}
|
||||
}
|
||||
|
||||
for _, udpPort := range udpPorts {
|
||||
c.AddTask(&awstasks.SecurityGroupRule{
|
||||
Name: s(fmt.Sprintf("node-to-master-udp-%d", udpPort)),
|
||||
SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster),
|
||||
SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode),
|
||||
FromPort: i64(udpPort),
|
||||
ToPort: i64(udpPort),
|
||||
Protocol: s("udp"),
|
||||
})
|
||||
}
|
||||
for _, tcpPort := range tcpPorts {
|
||||
c.AddTask(&awstasks.SecurityGroupRule{
|
||||
Name: s(fmt.Sprintf("node-to-master-tcp-%d", tcpPort)),
|
||||
SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster),
|
||||
SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode),
|
||||
FromPort: i64(tcpPort),
|
||||
ToPort: i64(tcpPort),
|
||||
Protocol: s("tcp"),
|
||||
})
|
||||
}
|
||||
for _, protocol := range protocols {
|
||||
awsName := strconv.Itoa(int(protocol))
|
||||
name := awsName
|
||||
switch protocol {
|
||||
case ProtocolIPIP:
|
||||
name = "ipip"
|
||||
default:
|
||||
glog.Warningf("unknown protocol %q - naming by number", awsName)
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
||||
c.AddTask(&awstasks.SecurityGroupRule{
|
||||
Name: s("node-to-master-protocol-" + name),
|
||||
SecurityGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleMaster),
|
||||
SourceGroup: b.LinkToSecurityGroup(kops.InstanceGroupRoleNode),
|
||||
Protocol: s(awsName),
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
@ -95,8 +169,14 @@ func (b *FirewallModelBuilder) buildMasterRules(c *fi.ModelBuilderContext) error
|
|||
VPC: b.LinkToVPC(),
|
||||
Description: s("Security group for masters"),
|
||||
RemoveExtraRules: []string{
|
||||
"port=22",
|
||||
"port=443",
|
||||
"port=22", // SSH
|
||||
"port=443", // k8s api
|
||||
"port=4001", // etcd
|
||||
"port=4789", // VXLAN
|
||||
"port=179", // Calico
|
||||
|
||||
// TODO: UDP vs TCP
|
||||
// TODO: Protocol 4 for calico
|
||||
},
|
||||
}
|
||||
c.AddTask(t)
|
||||
|
|
|
|||
|
|
@ -90,10 +90,15 @@ func (b *KopsModelContext) LinkToVPC() *awstasks.VPC {
|
|||
}
|
||||
|
||||
func (b *KopsModelContext) LinkToDNSZone() *awstasks.DNSZone {
|
||||
name := b.Cluster.Spec.DNSZone
|
||||
name := b.NameForDNSZone()
|
||||
return &awstasks.DNSZone{Name: &name}
|
||||
}
|
||||
|
||||
func (b *KopsModelContext) NameForDNSZone() string {
|
||||
name := b.Cluster.Spec.DNSZone
|
||||
return name
|
||||
}
|
||||
|
||||
func (b *KopsModelContext) IAMName(role kops.InstanceGroupRole) string {
|
||||
var name string
|
||||
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ import (
|
|||
)
|
||||
|
||||
// TestMinimal runs the test on a minimum configuration, similar to kops create cluster minimal.example.com --zones us-west-1a
|
||||
func TestMinimal(t *testing.T) {
|
||||
func ConversionTestMinimal(t *testing.T) {
|
||||
runTest(t, "minimal", "v1alpha1", "v1alpha2")
|
||||
runTest(t, "minimal", "v1alpha2", "v1alpha1")
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ spec:
|
|||
bastion:
|
||||
idleTimeout: 120
|
||||
machineType: t2.medium
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
zones:
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ spec:
|
|||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
zones:
|
||||
|
|
|
|||
|
|
@ -35,6 +35,8 @@ spec:
|
|||
type: Public
|
||||
zone: us-test-1a
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,8 @@ spec:
|
|||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
zones:
|
||||
|
|
|
|||
|
|
@ -46,6 +46,8 @@ spec:
|
|||
type: Public
|
||||
zone: us-test-1c
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ spec:
|
|||
kubenet: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
zones:
|
||||
|
|
|
|||
|
|
@ -30,6 +30,8 @@ spec:
|
|||
type: Public
|
||||
zone: us-test-1a
|
||||
topology:
|
||||
dns:
|
||||
type: Public
|
||||
masters: public
|
||||
nodes: public
|
||||
|
||||
|
|
|
|||
|
|
@ -28,7 +28,9 @@ spec:
|
|||
topology:
|
||||
bastion:
|
||||
enable: true
|
||||
name: bastion-private.example.com
|
||||
name: bastion.private.example.com
|
||||
dns:
|
||||
type: Public
|
||||
masters: private
|
||||
nodes: private
|
||||
zones:
|
||||
|
|
|
|||
|
|
@ -36,7 +36,9 @@ spec:
|
|||
zone: us-test-1a
|
||||
topology:
|
||||
bastion:
|
||||
bastionPublicName: bastion-private.example.com
|
||||
bastionPublicName: bastion.private.example.com
|
||||
dns:
|
||||
type: Public
|
||||
masters: private
|
||||
nodes: private
|
||||
|
||||
|
|
|
|||
|
|
@ -214,15 +214,6 @@ resource "aws_security_group_rule" "all-master-to-node" {
|
|||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
|
||||
|
|
@ -259,6 +250,24 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4194" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
|
||||
from_port = 4194
|
||||
to_port = 4194
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-443" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-141-example-com.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-141-example-com.id}"
|
||||
|
|
|
|||
|
|
@ -214,15 +214,6 @@ resource "aws_security_group_rule" "all-master-to-node" {
|
|||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
|
||||
|
|
@ -259,6 +250,24 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4194" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
|
||||
from_port = 4194
|
||||
to_port = 4194
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-443" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-minimal-example-com.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-external-to-master-0-0-0-0--0" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-minimal-example-com.id}"
|
||||
|
|
|
|||
|
|
@ -397,15 +397,6 @@ resource "aws_security_group_rule" "all-master-to-node" {
|
|||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
|
|
@ -496,6 +487,51 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-protocol-ipip" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
protocol = "4"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-179" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 179
|
||||
to_port = 179
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4001" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 4001
|
||||
to_port = 4001
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4194" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 4194
|
||||
to_port = 4194
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-443" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privatecalico-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privatecalico-example-com.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.bastion-privatecalico-example-com.id}"
|
||||
|
|
|
|||
|
|
@ -397,15 +397,6 @@ resource "aws_security_group_rule" "all-master-to-node" {
|
|||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-master" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "all-node-to-node" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
|
|
@ -496,6 +487,51 @@ resource "aws_security_group_rule" "node-egress" {
|
|||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-4194" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 4194
|
||||
to_port = 4194
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-443" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-tcp-6783" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 6783
|
||||
to_port = 6783
|
||||
protocol = "tcp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-udp-6783" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 6783
|
||||
to_port = 6783
|
||||
protocol = "udp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "node-to-master-udp-6784" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.masters-privateweave-example-com.id}"
|
||||
source_security_group_id = "${aws_security_group.nodes-privateweave-example-com.id}"
|
||||
from_port = 6784
|
||||
to_port = 6784
|
||||
protocol = "udp"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
|
||||
type = "ingress"
|
||||
security_group_id = "${aws_security_group.bastion-privateweave-example-com.id}"
|
||||
|
|
|
|||
|
|
@ -3,8 +3,8 @@ metadata:
|
|||
name: dns-controller
|
||||
spec:
|
||||
addons:
|
||||
- version: 1.4.1
|
||||
- version: 1.5.0
|
||||
selector:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
manifest: v1.4.1.yaml
|
||||
manifest: v1.5.0.yaml
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,39 @@
|
|||
kind: Deployment
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: dns-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
k8s-app: dns-controller
|
||||
version: v1.5.0
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: dns-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
k8s-app: dns-controller
|
||||
version: v1.5.0
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
scheduler.alpha.kubernetes.io/tolerations: '[{"key": "dedicated", "value": "master"}]'
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/role: master
|
||||
dnsPolicy: Default # Don't use cluster DNS (we are likely running before kube-dns)
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: dns-controller
|
||||
image: kope/dns-controller:1.5.0
|
||||
command:
|
||||
{{ range $arg := DnsControllerArgv }}
|
||||
- "{{ $arg }}"
|
||||
{{ end }}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
|
|
@ -5,11 +5,13 @@ metadata:
|
|||
namespace: kube-system
|
||||
labels:
|
||||
k8s-addon: networking.addons.k8s.io
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: kopeio-networking-agent
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
hostPID: true
|
||||
hostIPC: true
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ metadata:
|
|||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
|
|
@ -68,6 +69,7 @@ spec:
|
|||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
|
|
@ -165,6 +167,7 @@ metadata:
|
|||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
# The policy controller can only have a single active instance.
|
||||
replicas: 1
|
||||
|
|
@ -174,6 +177,7 @@ spec:
|
|||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy-controller
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
scheduler.alpha.kubernetes.io/tolerations: |
|
||||
|
|
@ -217,6 +221,7 @@ metadata:
|
|||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
|
|
|
|||
|
|
@ -3,11 +3,14 @@ kind: DaemonSet
|
|||
metadata:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
labels:
|
||||
role.kubernetes.io/networking: "1"
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: weave-net
|
||||
role.kubernetes.io/networking: "1"
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/tolerations: |
|
||||
[
|
||||
|
|
|
|||
|
|
@ -1,4 +0,0 @@
|
|||
# Kubernetes AWS settings
|
||||
|
||||
# Issue #23395
|
||||
net.ipv4.neigh.default.gc_thresh1=0
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"onChangeExecute": [ "sysctl", "--system" ]
|
||||
}
|
||||
|
|
@ -1,2 +0,0 @@
|
|||
# Kubernetes
|
||||
net.ipv4.ip_forward=1
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
{
|
||||
"onChangeExecute": [ "sysctl", "--system" ]
|
||||
}
|
||||
|
|
@ -64,7 +64,9 @@ func (e *DNSZone) Find(c *fi.Context) (*DNSZone, error) {
|
|||
if z.HostedZone.Name != nil {
|
||||
actual.DNSName = fi.String(strings.TrimSuffix(*z.HostedZone.Name, "."))
|
||||
}
|
||||
actual.ZoneID = z.HostedZone.Id
|
||||
if z.HostedZone.Id != nil {
|
||||
actual.ZoneID = fi.String(strings.TrimPrefix(*z.HostedZone.Id, "/hostedzone/"))
|
||||
}
|
||||
actual.Private = z.HostedZone.Config.PrivateZone
|
||||
|
||||
// If the zone is private, but we don't want it to be, that will be an error
|
||||
|
|
|
|||
|
|
@ -25,6 +25,8 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
|
||||
"k8s.io/kubernetes/pkg/util/validation/field"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//go:generate fitask -type=SecurityGroupRule
|
||||
|
|
@ -173,15 +175,51 @@ func (e *SecurityGroupRule) Run(c *fi.Context) error {
|
|||
func (_ *SecurityGroupRule) CheckChanges(a, e, changes *SecurityGroupRule) error {
|
||||
if a == nil {
|
||||
if e.SecurityGroup == nil {
|
||||
return fi.RequiredField("SecurityGroup")
|
||||
return field.Required(field.NewPath("SecurityGroup"), "")
|
||||
}
|
||||
}
|
||||
|
||||
if e.FromPort != nil && e.Protocol == nil {
|
||||
return field.Required(field.NewPath("Protocol"), "Protocol must be specified with FromPort")
|
||||
}
|
||||
if e.ToPort != nil && e.Protocol == nil {
|
||||
return field.Required(field.NewPath("Protocol"), "Protocol must be specified with ToPort")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (_ *SecurityGroupRule) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *SecurityGroupRule) error {
|
||||
if a == nil {
|
||||
// Description returns a human readable summary of the security group rule
|
||||
func (e *SecurityGroupRule) Description() string {
|
||||
var description []string
|
||||
|
||||
if e.Protocol != nil {
|
||||
description = append(description, fmt.Sprintf("protocol=%s", *e.Protocol))
|
||||
}
|
||||
|
||||
if e.FromPort != nil {
|
||||
description = append(description, fmt.Sprintf("fromPort=%d", *e.FromPort))
|
||||
}
|
||||
|
||||
if e.ToPort != nil {
|
||||
description = append(description, fmt.Sprintf("toPort=%d", *e.ToPort))
|
||||
}
|
||||
|
||||
if e.SourceGroup != nil {
|
||||
description = append(description, fmt.Sprintf("sourceGroup=%s", fi.StringValue(e.SourceGroup.ID)))
|
||||
}
|
||||
|
||||
if e.CIDR != nil {
|
||||
description = append(description, fmt.Sprintf("cidr=%s", *e.CIDR))
|
||||
}
|
||||
|
||||
return strings.Join(description, " ")
|
||||
}
|
||||
|
||||
func (_ *SecurityGroupRule) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *SecurityGroupRule) error {
|
||||
name := fi.StringValue(e.Name)
|
||||
|
||||
if a == nil {
|
||||
protocol := e.Protocol
|
||||
if protocol == nil {
|
||||
protocol = aws.String("-1")
|
||||
|
|
@ -206,13 +244,15 @@ func (_ *SecurityGroupRule) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Secu
|
|||
}
|
||||
}
|
||||
|
||||
description := e.Description()
|
||||
|
||||
if fi.BoolValue(e.Egress) {
|
||||
request := &ec2.AuthorizeSecurityGroupEgressInput{
|
||||
GroupId: e.SecurityGroup.ID,
|
||||
}
|
||||
request.IpPermissions = []*ec2.IpPermission{ipPermission}
|
||||
|
||||
glog.V(2).Infof("Calling EC2 AuthorizeSecurityGroupEgress")
|
||||
glog.V(2).Infof("%s: Calling EC2 AuthorizeSecurityGroupEgress (%s)", name, description)
|
||||
_, err := t.Cloud.EC2().AuthorizeSecurityGroupEgress(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating SecurityGroupEgress: %v", err)
|
||||
|
|
@ -223,7 +263,7 @@ func (_ *SecurityGroupRule) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *Secu
|
|||
}
|
||||
request.IpPermissions = []*ec2.IpPermission{ipPermission}
|
||||
|
||||
glog.V(2).Infof("Calling EC2 AuthorizeSecurityGroupIngress")
|
||||
glog.V(2).Infof("%s: Calling EC2 AuthorizeSecurityGroupIngress (%s)", name, description)
|
||||
_, err := t.Cloud.EC2().AuthorizeSecurityGroupIngress(request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating SecurityGroupIngress: %v", err)
|
||||
|
|
|
|||
|
|
@ -115,7 +115,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
|
||||
{
|
||||
key := "dns-controller.addons.k8s.io"
|
||||
version := "1.4.1"
|
||||
version := "1.5.0"
|
||||
|
||||
location := key + "/v" + version + ".yaml"
|
||||
|
||||
|
|
@ -143,6 +143,19 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
manifests[key] = "addons/" + location
|
||||
}
|
||||
|
||||
// The role.kubernetes.io/networking is used to label anything related to a networking addin,
|
||||
// so that if we switch networking plugins (e.g. calico -> weave or vice-versa), we'll replace the
|
||||
// old networking plugin, and there won't be old pods "floating around".
|
||||
|
||||
// This means whenever we create or update a networking plugin, we should be sure that:
|
||||
// 1. the selector is role.kubernetes.io/networking=1
|
||||
// 2. every object in the manifest is labeleled with role.kubernetes.io/networking=1
|
||||
|
||||
// TODO: Some way to test/enforce this?
|
||||
|
||||
// TODO: Create "empty" configurations for others, so we can delete e.g. the kopeio configuration
|
||||
// if we switch to kubenet?
|
||||
|
||||
if b.cluster.Spec.Networking.Kopeio != nil {
|
||||
key := "networking.kope.io"
|
||||
version := "1.0.20161116"
|
||||
|
|
@ -153,7 +166,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
||||
Manifest: fi.String(location),
|
||||
})
|
||||
|
||||
|
|
@ -170,7 +183,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
||||
Manifest: fi.String(location),
|
||||
})
|
||||
|
||||
|
|
@ -187,7 +200,7 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
|
|||
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
|
||||
Name: fi.String(key),
|
||||
Version: fi.String(version),
|
||||
Selector: map[string]string{"k8s-addon": key},
|
||||
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
|
||||
Manifest: fi.String(location),
|
||||
})
|
||||
|
||||
|
|
|
|||
|
|
@ -121,38 +121,18 @@ func precreateDNS(cluster *api.Cluster, cloud fi.Cloud) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
glog.Infof("Pre-creating DNS records")
|
||||
|
||||
// We precreate some DNS names (where they don't exist), with a dummy IP address
|
||||
// This avoids hitting negative TTL on DNS lookups, which tend to be very long
|
||||
// If we get the names wrong here, it doesn't really matter (extra DNS name, slower boot)
|
||||
dnsSuffix := cluster.Spec.MasterPublicName
|
||||
|
||||
var dnsHostnames []string
|
||||
dnsHostnames := buildPrecreateDNSHostnames(cluster)
|
||||
|
||||
if cluster.Spec.MasterPublicName != "" {
|
||||
dnsHostnames = append(dnsHostnames, cluster.Spec.MasterPublicName)
|
||||
} else {
|
||||
glog.Warningf("cannot pre-create MasterPublicName - not set")
|
||||
if len(dnsHostnames) == 0 {
|
||||
glog.Infof("No DNS records to pre-create")
|
||||
return nil
|
||||
}
|
||||
|
||||
if cluster.Spec.MasterInternalName != "" {
|
||||
dnsHostnames = append(dnsHostnames, cluster.Spec.MasterInternalName)
|
||||
} else {
|
||||
glog.Warningf("cannot pre-create MasterInternalName - not set")
|
||||
}
|
||||
|
||||
for _, etcdCluster := range cluster.Spec.EtcdClusters {
|
||||
etcClusterName := "etcd-" + etcdCluster.Name
|
||||
if etcdCluster.Name == "main" {
|
||||
// Special case
|
||||
etcClusterName = "etcd"
|
||||
}
|
||||
for _, etcdClusterMember := range etcdCluster.Members {
|
||||
name := etcClusterName + "-" + etcdClusterMember.Name + ".internal." + dnsSuffix
|
||||
dnsHostnames = append(dnsHostnames, name)
|
||||
}
|
||||
}
|
||||
glog.Infof("Pre-creating DNS records")
|
||||
|
||||
zone, err := findZone(cluster, cloud)
|
||||
if err != nil {
|
||||
|
|
@ -216,3 +196,36 @@ func precreateDNS(cluster *api.Cluster, cloud fi.Cloud) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// buildPrecreateDNSHostnames returns the hostnames we should precreate
|
||||
func buildPrecreateDNSHostnames(cluster *api.Cluster) []string {
|
||||
dnsInternalSuffix := ".internal." + cluster.ObjectMeta.Name
|
||||
|
||||
var dnsHostnames []string
|
||||
|
||||
if cluster.Spec.MasterPublicName != "" {
|
||||
dnsHostnames = append(dnsHostnames, cluster.Spec.MasterPublicName)
|
||||
} else {
|
||||
glog.Warningf("cannot pre-create MasterPublicName - not set")
|
||||
}
|
||||
|
||||
if cluster.Spec.MasterInternalName != "" {
|
||||
dnsHostnames = append(dnsHostnames, cluster.Spec.MasterInternalName)
|
||||
} else {
|
||||
glog.Warningf("cannot pre-create MasterInternalName - not set")
|
||||
}
|
||||
|
||||
for _, etcdCluster := range cluster.Spec.EtcdClusters {
|
||||
etcClusterName := "etcd-" + etcdCluster.Name
|
||||
if etcdCluster.Name == "main" {
|
||||
// Special case
|
||||
etcClusterName = "etcd"
|
||||
}
|
||||
for _, etcdClusterMember := range etcdCluster.Members {
|
||||
name := etcClusterName + "-" + etcdClusterMember.Name + dnsInternalSuffix
|
||||
dnsHostnames = append(dnsHostnames, name)
|
||||
}
|
||||
}
|
||||
|
||||
return dnsHostnames
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudup
|
||||
|
||||
import (
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestPrecreateDNSNames(t *testing.T) {
|
||||
cluster := &kops.Cluster{}
|
||||
cluster.ObjectMeta.Name = "cluster1.example.com"
|
||||
cluster.Spec.MasterPublicName = "api." + cluster.ObjectMeta.Name
|
||||
cluster.Spec.MasterInternalName = "api.internal." + cluster.ObjectMeta.Name
|
||||
cluster.Spec.EtcdClusters = []*kops.EtcdClusterSpec{
|
||||
{
|
||||
Name: "main",
|
||||
Members: []*kops.EtcdMemberSpec{
|
||||
{Name: "zone1"},
|
||||
{Name: "zone2"},
|
||||
{Name: "zone3"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "events",
|
||||
Members: []*kops.EtcdMemberSpec{
|
||||
{Name: "zonea"},
|
||||
{Name: "zoneb"},
|
||||
{Name: "zonec"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actual := buildPrecreateDNSHostnames(cluster)
|
||||
|
||||
expected := []string{
|
||||
"api.cluster1.example.com",
|
||||
"api.internal.cluster1.example.com",
|
||||
"etcd-events-zonea.internal.cluster1.example.com",
|
||||
"etcd-events-zoneb.internal.cluster1.example.com",
|
||||
"etcd-events-zonec.internal.cluster1.example.com",
|
||||
"etcd-zone1.internal.cluster1.example.com",
|
||||
"etcd-zone2.internal.cluster1.example.com",
|
||||
"etcd-zone3.internal.cluster1.example.com",
|
||||
}
|
||||
|
||||
sort.Strings(actual)
|
||||
sort.Strings(expected)
|
||||
|
||||
if !reflect.DeepEqual(expected, actual) {
|
||||
t.Fatalf("unexpected records. expected=%v actual=%v", expected, actual)
|
||||
}
|
||||
}
|
||||
|
|
@ -223,14 +223,6 @@ func (c *populateClusterSpec) run() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Hard coding topology here
|
||||
//
|
||||
// We want topology to pass through
|
||||
// Otherwise we were losing the pointer
|
||||
// TODO: This should not be needed...
|
||||
cluster.Spec.Topology = c.InputCluster.Spec.Topology
|
||||
//cluster.Spec.Topology.Bastion = c.InputCluster.Spec.Topology.Bastion
|
||||
|
||||
if cluster.Spec.DNSZone == "" {
|
||||
dns, err := cloud.DNS()
|
||||
if err != nil {
|
||||
|
|
@ -238,7 +230,7 @@ func (c *populateClusterSpec) run() error {
|
|||
}
|
||||
dnsZone, err := FindDNSHostedZone(dns, cluster.ObjectMeta.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error determining default DNS zone; please specify --dns-zone: %v", err)
|
||||
return fmt.Errorf("error determining default DNS zone: %v", err)
|
||||
}
|
||||
glog.Infof("Defaulting DNS zone to: %s", dnsZone)
|
||||
cluster.Spec.DNSZone = dnsZone
|
||||
|
|
|
|||
|
|
@ -19,11 +19,11 @@ spec:
|
|||
selector:
|
||||
k8s-addon: limit-range.addons.k8s.io
|
||||
version: 1.5.0
|
||||
- manifest: dns-controller.addons.k8s.io/v1.4.1.yaml
|
||||
- manifest: dns-controller.addons.k8s.io/v1.5.0.yaml
|
||||
name: dns-controller.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
version: 1.4.1
|
||||
version: 1.5.0
|
||||
- manifest: storage-aws.addons.k8s.io/v1.5.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
|
|
@ -32,6 +32,6 @@ spec:
|
|||
- manifest: networking.kope.io/v1.0.20161116.yaml
|
||||
name: networking.kope.io
|
||||
selector:
|
||||
k8s-addon: networking.kope.io
|
||||
role.kubernetes.io/networking: "1"
|
||||
version: 1.0.20161116
|
||||
|
||||
|
|
|
|||
|
|
@ -19,11 +19,11 @@ spec:
|
|||
selector:
|
||||
k8s-addon: limit-range.addons.k8s.io
|
||||
version: 1.5.0
|
||||
- manifest: dns-controller.addons.k8s.io/v1.4.1.yaml
|
||||
- manifest: dns-controller.addons.k8s.io/v1.5.0.yaml
|
||||
name: dns-controller.addons.k8s.io
|
||||
selector:
|
||||
k8s-addon: dns-controller.addons.k8s.io
|
||||
version: 1.4.1
|
||||
version: 1.5.0
|
||||
- manifest: storage-aws.addons.k8s.io/v1.5.0.yaml
|
||||
name: storage-aws.addons.k8s.io
|
||||
selector:
|
||||
|
|
|
|||
|
|
@ -196,6 +196,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
loader := NewLoader(c.config, c.cluster, assets, tags)
|
||||
|
||||
loader.Builders = append(loader.Builders, &model.DockerBuilder{NodeupModelContext: modelContext})
|
||||
loader.Builders = append(loader.Builders, &model.SysctlBuilder{NodeupModelContext: modelContext})
|
||||
tf, err := newTemplateFunctions(c.config, c.cluster, c.instanceGroup, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing: %v", err)
|
||||
|
|
|
|||
|
|
@ -276,7 +276,7 @@ func (t *templateFunctions) ProtokubeFlags() *ProtokubeFlags {
|
|||
f.Channels = t.nodeupConfig.Channels
|
||||
}
|
||||
|
||||
f.LogLevel = fi.Int(8)
|
||||
f.LogLevel = fi.Int(4)
|
||||
f.Containerized = fi.Bool(true)
|
||||
|
||||
zone := t.cluster.Spec.DNSZone
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ package fi
|
|||
import (
|
||||
"fmt"
|
||||
"github.com/golang/glog"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
|
@ -57,6 +58,30 @@ func (c *ModelBuilderContext) AddTask(task Task) {
|
|||
c.Tasks[key] = task
|
||||
}
|
||||
|
||||
// EnsureTask ensures that the specified task is configured.
|
||||
// It adds the task if it does not already exist.
|
||||
// If it does exist, it verifies that the existing task reflect.DeepEqual the new task,
|
||||
// if they are different an error is returned.
|
||||
func (c *ModelBuilderContext) EnsureTask(task Task) error {
|
||||
key := buildTaskKey(task)
|
||||
|
||||
existing, found := c.Tasks[key]
|
||||
if found {
|
||||
if reflect.DeepEqual(task, existing) {
|
||||
glog.V(8).Infof("EnsureTask ignoring identical ")
|
||||
return nil
|
||||
} else {
|
||||
glog.Warningf("EnsureTask found task mismatch for %q", key)
|
||||
glog.Warningf("\tExisting: %v", existing)
|
||||
glog.Warningf("\tNew: %v", task)
|
||||
|
||||
return fmt.Errorf("cannot add different task with same key %q", key)
|
||||
}
|
||||
}
|
||||
c.Tasks[key] = task
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTaskKey(task Task) string {
|
||||
hasName, ok := task.(HasName)
|
||||
if !ok {
|
||||
|
|
|
|||
|
|
@ -130,8 +130,11 @@ func (c *VFSContext) buildS3Path(p string) (*S3Path, error) {
|
|||
}
|
||||
|
||||
bucket := strings.TrimSuffix(u.Host, "/")
|
||||
if bucket == "" {
|
||||
return nil, fmt.Errorf("invalid s3 path: %q", err)
|
||||
}
|
||||
|
||||
s3path := NewS3Path(c.s3Context, bucket, u.Path)
|
||||
s3path := newS3Path(c.s3Context, bucket, u.Path)
|
||||
return s3path, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ type S3Path struct {
|
|||
var _ Path = &S3Path{}
|
||||
var _ HasHash = &S3Path{}
|
||||
|
||||
func NewS3Path(s3Context *S3Context, bucket string, key string) *S3Path {
|
||||
func newS3Path(s3Context *S3Context, bucket string, key string) *S3Path {
|
||||
bucket = strings.TrimSuffix(bucket, "/")
|
||||
key = strings.TrimPrefix(key, "/")
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,66 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package vfs
|
||||
|
||||
import "testing"
|
||||
|
||||
func Test_S3Path_Parse(t *testing.T) {
|
||||
grid := []struct {
|
||||
Input string
|
||||
ExpectError bool
|
||||
ExpectedBucket string
|
||||
ExpectedPath string
|
||||
}{
|
||||
{
|
||||
Input: "s3://bucket",
|
||||
ExpectedBucket: "bucket",
|
||||
ExpectedPath: "",
|
||||
},
|
||||
{
|
||||
Input: "s3://bucket/path",
|
||||
ExpectedBucket: "bucket",
|
||||
ExpectedPath: "path",
|
||||
},
|
||||
{
|
||||
Input: "s3://bucket2/path/subpath",
|
||||
ExpectedBucket: "bucket2",
|
||||
ExpectedPath: "path/subpath",
|
||||
},
|
||||
{
|
||||
Input: "s3:///bucket/path/subpath",
|
||||
ExpectError: true,
|
||||
},
|
||||
}
|
||||
for _, g := range grid {
|
||||
s3path, err := Context.buildS3Path(g.Input)
|
||||
if !g.ExpectError {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error parsing s3 path: %v", err)
|
||||
}
|
||||
if s3path.bucket != g.ExpectedBucket {
|
||||
t.Fatalf("unexpected s3 path: %v", s3path)
|
||||
}
|
||||
if s3path.key != g.ExpectedPath {
|
||||
t.Fatalf("unexpected s3 path: %v", s3path)
|
||||
}
|
||||
} else {
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected error parsing %q", g.Input)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue