mirror of https://github.com/kubernetes/kops.git
Merge pull request #1366 from reactiveops/kris-and-eric-1282
Specify Existing NAT Gateways to Use in Cluster Creation
This commit is contained in:
commit
a60e10eacd
|
@ -18,6 +18,7 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
//"fmt"
|
||||
"github.com/golang/glog"
|
||||
"io/ioutil"
|
||||
"k8s.io/kops/cmd/kops/util"
|
||||
|
@ -51,6 +52,12 @@ func TestCreateClusterPrivate(t *testing.T) {
|
|||
runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/private", "v1alpha2")
|
||||
}
|
||||
|
||||
// TestCreateClusterPrivate runs kops create cluster private.example.com --zones us-test-1a --master-zones us-test-1a
|
||||
//func TestCreateClusterWithNGWSpecified(t *testing.T) {
|
||||
// runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ngwspecified", "v1alpha1")
|
||||
// runCreateClusterIntegrationTest(t, "../../tests/integration/create_cluster/ngwspecified", "v1alpha2")
|
||||
//}
|
||||
|
||||
func runCreateClusterIntegrationTest(t *testing.T, srcDir string, version string) {
|
||||
var stdout bytes.Buffer
|
||||
|
||||
|
|
|
@ -156,7 +156,9 @@ func RunUpdateCluster(f *util.Factory, clusterName string, out io.Writer, c *Upd
|
|||
DryRun: isDryrun,
|
||||
MaxTaskDuration: c.MaxTaskDuration,
|
||||
}
|
||||
|
||||
err = applyCmd.Run()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -74,8 +74,6 @@ aws configure # Input your credentials here
|
|||
aws iam list-users
|
||||
```
|
||||
|
||||
PyPi is the officially supported `aws cli` download avenue, and kops suggests using it. [More information](https://pypi.python.org/pypi/awscli) on the package.
|
||||
|
||||
#### Other Platforms
|
||||
|
||||
Official documentation [here](http://docs.aws.amazon.com/cli/latest/userguide/installing.html)
|
||||
|
@ -241,7 +239,9 @@ kubectl -n kube-system get po
|
|||
|
||||
## What's next?
|
||||
|
||||
Kops has a ton of great features, and an amazing support team. We recommend researching [other interesting modes](commands.md#other-interesting-modes) to learn more about generating Terraform configurations, or running your cluster in HA (Highly Available). Also be sure to check out how to run a [private network topology](topology.md) in AWS.
|
||||
Kops has a ton of great features, and an amazing support team. We recommend researching [other interesting modes](commands.md#other-interesting-modes) to learn more about generating Terraform configurations, or running your cluster in HA (Highly Available). You might want to take a peek at the [cluster spec docs](cluster_spec.md) for helping to configure these "other interesting modes". Also be sure to check out how to run a [private network topology](topology.md) in AWS.
|
||||
|
||||
|
||||
|
||||
Explore the program, and work on getting your `cluster config` hammered out!
|
||||
|
||||
|
|
|
@ -55,6 +55,31 @@ spec:
|
|||
- 12.34.56.78/32
|
||||
```
|
||||
|
||||
### cluster.spec Subnet Keys
|
||||
|
||||
#### id
|
||||
ID of a subnet to share in an existing VPC.
|
||||
|
||||
#### egress
|
||||
The resource identifier (ID) of something in your existing VPC that you would like to use as "egress" to the outside world. This feature was originally envisioned to allow re-use of NAT Gateways. In this case, the correct usageis as follows.
|
||||
|
||||
|
||||
|
||||
```
|
||||
spec:
|
||||
subnets:
|
||||
- cidr: 10.20.64.0/21
|
||||
name: us-east-1a
|
||||
egress: nat-987654321
|
||||
type: Private
|
||||
zone: us-east-1a
|
||||
- cidr: 10.20.32.0/21
|
||||
name: utility-us-east-1a
|
||||
id: subnet-12345
|
||||
type: Utility
|
||||
zone: us-east-1a
|
||||
```
|
||||
|
||||
### kubeAPIServer
|
||||
|
||||
This block contains configuration for the `kube-apiserver`.
|
||||
|
|
|
@ -9,12 +9,14 @@ Use kops create cluster with the `--vpc` and `--network-cidr` arguments for your
|
|||
```
|
||||
export KOPS_STATE_STORE=s3://<somes3bucket>
|
||||
export CLUSTER_NAME=<sharedvpc.mydomain.com>
|
||||
export VPC_ID=vpc-12345678 # replace with your VPC id
|
||||
export NETWORK_CIDR=10.100.0.0/16 # replace with the cidr for the VPC ${VPC_ID}
|
||||
|
||||
kops create cluster --zones=us-east-1b --name=${CLUSTER_NAME} \
|
||||
--vpc=vpc-a80734c1 --network-cidr=10.100.0.0/16
|
||||
--vpc=${VPC_ID} --network-cidr=${NETWORK_CIDR}
|
||||
```
|
||||
|
||||
Then `kops edit cluster ${CLUSTER_NAME}` should show you something like:
|
||||
Then `kops edit cluster ${CLUSTER_NAME}` will show you something like:
|
||||
|
||||
```
|
||||
metadata:
|
||||
|
@ -22,12 +24,14 @@ metadata:
|
|||
name: ${CLUSTER_NAME}
|
||||
spec:
|
||||
cloudProvider: aws
|
||||
networkCIDR: 10.100.0.0/16
|
||||
networkID: vpc-a80734c1
|
||||
networkCIDR: ${NETWORK_CIDR}
|
||||
networkID: ${VPC_ID}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
zones:
|
||||
- cidr: 10.100.32.0/19
|
||||
name: eu-central-1a
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-east-1b
|
||||
type: Public
|
||||
zone: us-east-1b
|
||||
```
|
||||
|
||||
|
||||
|
@ -74,11 +78,13 @@ probably remove that tag to indicate that the resources are not owned by that cl
|
|||
deleting the cluster won't try to delete the VPC. (Deleting the VPC won't succeed anyway, because it's in use,
|
||||
but it's better to avoid the later confusion!)
|
||||
|
||||
## Running in a shared subnet
|
||||
## Advanced Options for Creating Clusters in Existing VPCs
|
||||
|
||||
You can also use a shared subnet. Doing so is not recommended unless you are using external networking ([kope-routing](https://github.com/kopeio/kope-routing)).
|
||||
### Shared Subnets
|
||||
|
||||
Edit your cluster to add the ID of the subnet:
|
||||
`kops` can create a cluster in shared subnets in both public and private network [topologies](docs/topology.md). Doing so is not recommended unless you are using [external networking](docs/networking.md#supported-cni-networking)
|
||||
|
||||
After creating a basic cluster spec, edit your cluster to add the ID of the subnet:
|
||||
|
||||
`kops edit cluster ${CLUSTER_NAME}`
|
||||
|
||||
|
@ -88,13 +94,15 @@ metadata:
|
|||
name: ${CLUSTER_NAME}
|
||||
spec:
|
||||
cloudProvider: aws
|
||||
networkCIDR: 10.100.0.0/16
|
||||
networkID: vpc-a80734c1
|
||||
networkCIDR: ${NETWORK_CIDR}
|
||||
networkID: ${VPC_ID}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
zones:
|
||||
- cidr: 10.100.32.0/19
|
||||
name: eu-central-1a
|
||||
id: subnet-1234567 # Replace this with the ID of your subnet
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-east-1b
|
||||
id: subnet-id123
|
||||
type: Public
|
||||
zone: us-east-1b
|
||||
```
|
||||
|
||||
Make sure that the CIDR matches the CIDR of your subnet. Then update your cluster through the normal update procedure:
|
||||
|
@ -104,3 +112,25 @@ kops update cluster ${CLUSTER_NAME}
|
|||
# Review changes
|
||||
kops update cluster ${CLUSTER_NAME} --yes
|
||||
```
|
||||
|
||||
### Shared NAT Gateways
|
||||
|
||||
On AWS in private [topology](docs/topology.md), `kops` creates one NAT Gateway (NGW) per AZ. If your shared VPC is already set up with an NGW in the subnet that `kops` deploys private resources to, it is possible to specify the ID and have `kops`/`kubernetes` use it.
|
||||
|
||||
After creating a basic cluster spec, edit your cluster to specify NGW:
|
||||
|
||||
`kops edit cluster ${CLUSTER_NAME}`
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
subnets:
|
||||
- cidr: 10.20.64.0/21
|
||||
name: us-east-1a
|
||||
egress: nat-987654321
|
||||
type: Private
|
||||
zone: us-east-1a
|
||||
- cidr: 10.20.32.0/21
|
||||
name: utility-us-east-1a
|
||||
type: Utility
|
||||
zone: us-east-1a
|
||||
```
|
||||
|
|
|
@ -322,6 +322,8 @@ type ClusterSubnetSpec struct {
|
|||
// ProviderID is the cloud provider id for the objects associated with the zone (the subnet on AWS)
|
||||
ProviderID string `json:"id,omitempty"`
|
||||
|
||||
Egress string `json:"egress,omitempty"`
|
||||
|
||||
Type SubnetType `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ func ParseInstanceGroupRole(input string, lenient bool) (InstanceGroupRole, bool
|
|||
// ParseRawYaml parses an object just using yaml, without the full api machinery
|
||||
// Deprecated: prefer using the API machinery
|
||||
func ParseRawYaml(data []byte, dest interface{}) error {
|
||||
|
||||
// Yaml can't parse empty strings
|
||||
configString := string(data)
|
||||
configString = strings.TrimSpace(configString)
|
||||
|
|
|
@ -228,5 +228,6 @@ type ClusterSubnetSpec struct {
|
|||
// ProviderID is the cloud provider id for the objects associated with the zone (the subnet on AWS)
|
||||
ProviderID string `json:"id,omitempty"`
|
||||
|
||||
Type SubnetType `json:"type,omitempty"`
|
||||
Egress string `json:"egress,omitempty"`
|
||||
Type SubnetType `json:"type,omitempty"`
|
||||
}
|
||||
|
|
|
@ -332,6 +332,14 @@ func (c *Cluster) Validate(strict bool) error {
|
|||
|
||||
}
|
||||
}
|
||||
// Egress specification support
|
||||
{
|
||||
for _, s := range c.Spec.Subnets {
|
||||
if s.Egress != "" && !(strings.Contains("nat-", s.Egress)) {
|
||||
return fmt.Errorf("egress must be of type NAT Gateway")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Etcd
|
||||
{
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NetworkModelBuilder configures network objects
|
||||
|
@ -32,6 +33,7 @@ type NetworkModelBuilder struct {
|
|||
var _ fi.ModelBuilder = &NetworkModelBuilder{}
|
||||
|
||||
func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
||||
|
||||
sharedVPC := b.Cluster.SharedVPC()
|
||||
|
||||
// VPC that holds everything for the cluster
|
||||
|
@ -112,6 +114,7 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
CIDR: s(subnetSpec.CIDR),
|
||||
Shared: fi.Bool(sharedSubnet),
|
||||
}
|
||||
|
||||
if subnetSpec.ProviderID != "" {
|
||||
subnet.ID = s(subnetSpec.ProviderID)
|
||||
}
|
||||
|
@ -148,36 +151,63 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
|
|||
}
|
||||
}
|
||||
|
||||
for _, zone := range privateZones.List() {
|
||||
// Loop over zones
|
||||
for i, zone := range privateZones.List() {
|
||||
|
||||
utilitySubnet, err := b.LinkToUtilitySubnetInZone(zone)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Every NGW needs a public (Elastic) IP address, every private
|
||||
// subnet needs a NGW, lets create it. We tie it to a subnet
|
||||
// so we can track it in AWS
|
||||
eip := &awstasks.ElasticIP{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
AssociatedNatGatewayRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
}
|
||||
c.AddTask(eip)
|
||||
var ngw = &awstasks.NatGateway{}
|
||||
if b.Cluster.Spec.Subnets[i].Egress != "" {
|
||||
if strings.Contains(b.Cluster.Spec.Subnets[i].Egress, "nat-") {
|
||||
|
||||
// NAT Gateway
|
||||
//
|
||||
// All private subnets will need a NGW, one per zone
|
||||
//
|
||||
// The instances in the private subnet can access the Internet by
|
||||
// using a network address translation (NAT) gateway that resides
|
||||
// in the public subnet.
|
||||
ngw := &awstasks.NatGateway{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Subnet: utilitySubnet,
|
||||
ElasticIP: eip,
|
||||
ngw = &awstasks.NatGateway{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Subnet: utilitySubnet,
|
||||
ID: s(b.Cluster.Spec.Subnets[i].Egress),
|
||||
AssociatedRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
// If we're here, it means this NatGateway was specified, so we are Shared
|
||||
Shared: fi.Bool(true),
|
||||
}
|
||||
|
||||
AssociatedRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
c.AddTask(ngw)
|
||||
|
||||
} else {
|
||||
return fmt.Errorf("kops currently only supports re-use of NAT Gateways. We will support more eventually! Please see https://github.com/kubernetes/kops/issues/1530")
|
||||
}
|
||||
|
||||
} else {
|
||||
|
||||
// Every NGW needs a public (Elastic) IP address, every private
|
||||
// subnet needs a NGW, lets create it. We tie it to a subnet
|
||||
// so we can track it in AWS
|
||||
var eip = &awstasks.ElasticIP{}
|
||||
|
||||
eip = &awstasks.ElasticIP{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
AssociatedNatGatewayRouteTable: b.LinkToPrivateRouteTableInZone(zone),
|
||||
}
|
||||
|
||||
c.AddTask(eip)
|
||||
// NAT Gateway
|
||||
//
|
||||
// All private subnets will need a NGW, one per zone
|
||||
//
|
||||
// The instances in the private subnet can access the Internet by
|
||||
// using a network address translation (NAT) gateway that resides
|
||||
// in the public subnet.
|
||||
|
||||
//var ngw = &awstasks.NatGateway{}
|
||||
ngw = &awstasks.NatGateway{
|
||||
Name: s(zone + "." + b.ClusterName()),
|
||||
Subnet: utilitySubnet,
|
||||
ElasticIP: eip,
|
||||
AssociatedRouteTable: b.LinkToPrivateRouteTableInZone(zone), // Unsure about this?
|
||||
}
|
||||
c.AddTask(ngw)
|
||||
}
|
||||
c.AddTask(ngw)
|
||||
|
||||
// Private Route Table
|
||||
//
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
apiVersion: kops/v1alpha1
|
||||
kind: Cluster
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: private.example.com
|
||||
spec:
|
||||
api:
|
||||
loadBalancer:
|
||||
type: Public
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://tests/private.example.com
|
||||
etcdClusters:
|
||||
- etcdMembers:
|
||||
- name: us-test-1a
|
||||
zone: us-test-1a
|
||||
name: main
|
||||
- etcdMembers:
|
||||
- name: us-test-1a
|
||||
zone: us-test-1a
|
||||
name: events
|
||||
kubernetesVersion: v1.4.7
|
||||
masterPublicName: api.private.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
kopeio: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
topology:
|
||||
bastion:
|
||||
enable: true
|
||||
name: bastion.private.example.com
|
||||
dns:
|
||||
type: Public
|
||||
masters: private
|
||||
nodes: private
|
||||
zones:
|
||||
- cidr: 172.20.0.0/22
|
||||
name: us-test-1a
|
||||
privateCIDR: 172.20.32.0/19
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha1
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: bastions
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: t2.micro
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Bastion
|
||||
zones:
|
||||
- utility-us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha1
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: master-us-test-1a
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
zones:
|
||||
- us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha1
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: nodes
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: t2.medium
|
||||
maxSize: 2
|
||||
minSize: 2
|
||||
role: Node
|
||||
zones:
|
||||
- us-test-1a
|
|
@ -0,0 +1,96 @@
|
|||
apiVersion: kops/v1alpha2
|
||||
kind: Cluster
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: private.example.com
|
||||
spec:
|
||||
api:
|
||||
loadBalancer:
|
||||
type: Public
|
||||
channel: stable
|
||||
cloudProvider: aws
|
||||
configBase: memfs://tests/private.example.com
|
||||
etcdClusters:
|
||||
- etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
name: main
|
||||
- etcdMembers:
|
||||
- instanceGroup: master-us-test-1a
|
||||
name: us-test-1a
|
||||
name: events
|
||||
kubernetesVersion: v1.4.7
|
||||
masterPublicName: api.private.example.com
|
||||
networkCIDR: 172.20.0.0/16
|
||||
networking:
|
||||
kopeio: {}
|
||||
nonMasqueradeCIDR: 100.64.0.0/10
|
||||
subnets:
|
||||
- cidr: 172.20.32.0/19
|
||||
name: us-test-1a
|
||||
ngwEip: eipalloc-e12345
|
||||
ngwId: nat-09123456
|
||||
type: Private
|
||||
zone: us-test-1a
|
||||
- cidr: 172.20.0.0/22
|
||||
name: utility-us-test-1a
|
||||
type: Utility
|
||||
zone: us-test-1a
|
||||
topology:
|
||||
bastion:
|
||||
bastionPublicName: bastion.private.example.com
|
||||
dns:
|
||||
type: Public
|
||||
masters: private
|
||||
nodes: private
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: bastions
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: t2.micro
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Bastion
|
||||
subnets:
|
||||
- utility-us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: master-us-test-1a
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: m3.medium
|
||||
maxSize: 1
|
||||
minSize: 1
|
||||
role: Master
|
||||
subnets:
|
||||
- us-test-1a
|
||||
|
||||
---
|
||||
|
||||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
creationTimestamp: "2017-01-01T00:00:00Z"
|
||||
name: nodes
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
|
||||
machineType: t2.medium
|
||||
maxSize: 2
|
||||
minSize: 2
|
||||
role: Node
|
||||
subnets:
|
||||
- us-test-1a
|
|
@ -0,0 +1,8 @@
|
|||
ClusterName: private.example.com
|
||||
Zones: us-test-1a
|
||||
Cloud: aws
|
||||
Topology: private
|
||||
Networking: kopeio-vxlan
|
||||
Bastion: true
|
||||
NgwEips: eipalloc-e12345
|
||||
NgwIds: nat-09123456
|
|
@ -34,6 +34,11 @@ type NatGateway struct {
|
|||
Subnet *Subnet
|
||||
ID *string
|
||||
|
||||
EgressId *string
|
||||
|
||||
// Shared is set if this is a shared NatGateway
|
||||
Shared *bool
|
||||
|
||||
// We can't tag NatGateways, so we have to find through a surrogate
|
||||
AssociatedRouteTable *RouteTable
|
||||
}
|
||||
|
@ -45,17 +50,54 @@ func (e *NatGateway) CompareWithID() *string {
|
|||
}
|
||||
|
||||
func (e *NatGateway) Find(c *fi.Context) (*NatGateway, error) {
|
||||
ngw, err := e.findNatGateway(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ngw == nil {
|
||||
return nil, nil
|
||||
|
||||
cloud := c.Cloud.(awsup.AWSCloud)
|
||||
var ngw *ec2.NatGateway
|
||||
actual := &NatGateway{}
|
||||
|
||||
if e.ID != nil && *e.ID != "" {
|
||||
// We have an existing NGW, lets look up the EIP
|
||||
var ngwIds []*string
|
||||
ngwIds = append(ngwIds, e.ID)
|
||||
|
||||
request := &ec2.DescribeNatGatewaysInput{
|
||||
NatGatewayIds: ngwIds,
|
||||
}
|
||||
|
||||
response, err := cloud.EC2().DescribeNatGateways(request)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error listing Nat Gateways %v", err)
|
||||
}
|
||||
|
||||
if len(response.NatGateways) != 1 {
|
||||
return nil, fmt.Errorf("found %b NAT Gateways, expected 1", len(response.NatGateways))
|
||||
}
|
||||
if len(response.NatGateways) == 1 {
|
||||
ngw = response.NatGateways[0]
|
||||
}
|
||||
|
||||
if len(response.NatGateways[0].NatGatewayAddresses) != 1 {
|
||||
return nil, fmt.Errorf("found %b EIP Addresses for 1 NAT Gateway, expected 1", len(response.NatGateways))
|
||||
}
|
||||
if len(response.NatGateways[0].NatGatewayAddresses) == 1 {
|
||||
|
||||
actual.ElasticIP = &ElasticIP{ID: response.NatGateways[0].NatGatewayAddresses[0].AllocationId}
|
||||
|
||||
}
|
||||
} else {
|
||||
// This is the normal/default path
|
||||
ngw, err := e.findNatGateway(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ngw == nil {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
actual := &NatGateway{
|
||||
ID: ngw.NatGatewayId,
|
||||
}
|
||||
actual.ID = ngw.NatGatewayId
|
||||
|
||||
actual.Subnet = e.Subnet
|
||||
if len(ngw.NatGatewayAddresses) == 0 {
|
||||
// Not sure if this ever happens
|
||||
|
@ -233,6 +275,7 @@ func (e *NatGateway) waitAvailable(cloud awsup.AWSCloud) error {
|
|||
|
||||
func (_ *NatGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *NatGateway) error {
|
||||
// New NGW
|
||||
|
||||
var id *string
|
||||
if a == nil {
|
||||
glog.V(2).Infof("Creating Nat Gateway")
|
||||
|
@ -262,8 +305,19 @@ func (_ *NatGateway) RenderAWS(t *awsup.AWSAPITarget, a, e, changes *NatGateway)
|
|||
tags["AssociatedNatgateway"] = *id
|
||||
err := t.AddAWSTags(*e.Subnet.ID, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to tag subnet %v", err)
|
||||
return fmt.Errorf("unable to tag subnet %v", err)
|
||||
}
|
||||
|
||||
// If this is a shared NGW, we need to tag it
|
||||
// The tag that implies "shared" is `AssociatedNatgateway`=> NGW-ID
|
||||
// This is better than just a tag that's shared because this lets us create a whitelist of these NGWs
|
||||
// without doing a bunch more work in `kutil/delete_cluster.go`
|
||||
|
||||
if *e.Shared == true {
|
||||
glog.V(2).Infof("tagging route table %s to track shared NGW", *e.AssociatedRouteTable.ID)
|
||||
err = t.AddAWSTags(*e.AssociatedRouteTable.ID, tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -28,6 +28,11 @@ import (
|
|||
// For example, it assigns stable Keys to InstanceGroups & Masters, and
|
||||
// it assigns CIDRs to subnets
|
||||
// We also assign KubernetesVersion, because we want it to be explicit
|
||||
//
|
||||
// PerformAssignments is called on create, as well as an update. In fact
|
||||
// any time Run() is called in apply_cluster.go we will reach this function.
|
||||
// Please do all after-market logic here.
|
||||
//
|
||||
func PerformAssignments(c *kops.Cluster) error {
|
||||
|
||||
// Topology support
|
||||
|
|
|
@ -921,10 +921,33 @@ func ListSubnets(cloud fi.Cloud, clusterName string) ([]*ResourceTracker, error)
|
|||
}
|
||||
|
||||
// Associated Nat Gateways
|
||||
// Note: Jan 2017 @geojaz we musn't delete any shared NAT Gateways here.
|
||||
// Since we don't have tagging on the NGWs, we have to read the route tables
|
||||
|
||||
if len(ngws) != 0 {
|
||||
|
||||
rtRequest := &ec2.DescribeRouteTablesInput{}
|
||||
rtResponse, err := c.EC2().DescribeRouteTables(rtRequest)
|
||||
|
||||
// sharedNgwIds is like a whitelist for shared Ngws that we can ensure are not deleted
|
||||
sharedNgwIds := sets.NewString()
|
||||
{
|
||||
for _, rt := range rtResponse.RouteTables {
|
||||
for _, t := range rt.Tags {
|
||||
k := *t.Key
|
||||
v := *t.Value
|
||||
if k == "AssociatedNatgateway" {
|
||||
sharedNgwIds.Insert(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Querying Nat Gateways")
|
||||
request := &ec2.DescribeNatGatewaysInput{}
|
||||
response, err := c.EC2().DescribeNatGateways(request)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error describing NatGateways: %v", err)
|
||||
}
|
||||
|
@ -934,6 +957,10 @@ func ListSubnets(cloud fi.Cloud, clusterName string) ([]*ResourceTracker, error)
|
|||
if !ngws[id] {
|
||||
continue
|
||||
}
|
||||
if sharedNgwIds.Has(id) {
|
||||
// If we find this NGW in our whitelist- skip it (don't delete!)
|
||||
continue
|
||||
}
|
||||
|
||||
tracker := &ResourceTracker{
|
||||
Name: id,
|
||||
|
@ -1431,7 +1458,6 @@ func FindNatGateways(cloud fi.Cloud, routeTableIds sets.String) ([]*ResourceTrac
|
|||
c := cloud.(awsup.AWSCloud)
|
||||
|
||||
natGatewayIds := sets.NewString()
|
||||
|
||||
{
|
||||
request := &ec2.DescribeRouteTablesInput{}
|
||||
for routeTableId := range routeTableIds {
|
||||
|
@ -1441,33 +1467,43 @@ func FindNatGateways(cloud fi.Cloud, routeTableIds sets.String) ([]*ResourceTrac
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("error from DescribeRouteTables: %v", err)
|
||||
}
|
||||
|
||||
shared := false
|
||||
for _, rt := range response.RouteTables {
|
||||
for _, route := range rt.Routes {
|
||||
if route.NatGatewayId != nil {
|
||||
natGatewayIds.Insert(*route.NatGatewayId)
|
||||
shared = false
|
||||
for _, t := range rt.Tags {
|
||||
k := *t.Key
|
||||
// v := *t.Value
|
||||
if k == "AssociatedNatgateway" {
|
||||
shared = true
|
||||
}
|
||||
}
|
||||
if shared == false {
|
||||
for _, route := range rt.Routes {
|
||||
if route.NatGatewayId != nil {
|
||||
natGatewayIds.Insert(*route.NatGatewayId)
|
||||
fmt.Printf("inserting %s to be deleted\n", *route.NatGatewayId)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
// We could do this to find if a NAT gateway is shared
|
||||
|
||||
//request := &ec2.DescribeRouteTablesInput{}
|
||||
//request.Filters = append(request.Filters, awsup.NewEC2Filter("route.nat-gateway-id", natGatewayId))
|
||||
//response, err := c.EC2().DescribeRouteTables(request)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("error from DescribeRouteTables: %v", err)
|
||||
//}
|
||||
//
|
||||
//for _, rt := range response.RouteTables {
|
||||
// routeTableId := aws.StringValue(rt.RouteTableId)
|
||||
//}
|
||||
}
|
||||
// I think this is now out of date. @geojaz 1/2017
|
||||
//{
|
||||
// // We could do this to find if a NAT gateway is shared
|
||||
//
|
||||
// request := &ec2.DescribeRouteTablesInput{}
|
||||
// request.Filters = append(request.Filters, awsup.NewEC2Filter("route.nat-gateway-id", natGatewayIds[0]))
|
||||
// response, err := c.EC2().DescribeRouteTables(request)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error from DescribeRouteTables: %v", err)
|
||||
// }
|
||||
//
|
||||
// for _, rt := range response.RouteTables {
|
||||
// routeTableId := aws.StringValue(rt.RouteTableId)
|
||||
// }
|
||||
//}
|
||||
|
||||
var trackers []*ResourceTracker
|
||||
|
||||
if len(natGatewayIds) != 0 {
|
||||
request := &ec2.DescribeNatGatewaysInput{}
|
||||
for natGatewayId := range natGatewayIds {
|
||||
|
|
Loading…
Reference in New Issue