Support multiple admin access CIDRs

This modifies the templates to appropriately create resources for
different access CIDRs specified in the cluster configuration.

On AWS this leads to the creation of multiple security group rules which
will not currently be cleaned up if a CIDR is removed.

This issue is tracked in kubernetes/kops#145

Changes:
* change AdminCIDR() to return slice of configured CIDRs
* aws: change templates to create security group rule per CIDR
* gce: set 'sourceRanges' for firewall rule to configured CIDRs
This commit is contained in:
Vincent Ambo 2016-09-09 17:51:58 +02:00
parent 6ceeb25d10
commit c0dad70d1f
6 changed files with 28 additions and 21 deletions

View File

@ -33,12 +33,15 @@ securityGroupRule/egress-api-lb:
cidr: 0.0.0.0/0
# HTTPS to the master ELB is allowed (for API access)
securityGroupRule/https-external-to-api:
# One security group rule is necessary per admin CIDR
{{ range $index, $cidr := AdminCIDR }}
securityGroupRule/https-external-to-api-{{ $index }}:
securityGroup: securityGroup/api.{{ ClusterName }}
cidr: {{ AdminCIDR }}
cidr: {{ $cidr }}
protocol: tcp
fromPort: 443
toPort: 443
{{ end }}
# Allow HTTPS to the master from the master ELB
securityGroupRule/https-elb-to-master:
@ -54,5 +57,3 @@ dnsName/{{ .MasterPublicName }}:
Zone: dnsZone/{{ .DNSZone }}
ResourceType: "A"
TargetLoadBalancer: loadBalancer/api.{{ ClusterName }}

View File

@ -3,9 +3,11 @@
# We need to open security groups directly to the master nodes (instead of via the ELB)
# HTTPS to the master is allowed (for API access)
securityGroupRule/https-external-to-master:
{{ range $index, $cidr := AdminCIDR }}
securityGroupRule/https-external-to-master-{{ $index }}:
securityGroup: securityGroup/masters.{{ ClusterName }}
cidr: {{ AdminCIDR }}
cidr: {{ $cidr }}
protocol: tcp
fromPort: 443
toPort: 443
{{ end }}

View File

@ -25,13 +25,15 @@ securityGroupRule/master-egress:
egress: true
cidr: 0.0.0.0/0
# SSH is open to AdminCIDR
securityGroupRule/ssh-external-to-master:
# SSH is open to AdminCIDR set
{{ range $index, $cidr := AdminCIDR }}
securityGroupRule/ssh-external-to-master-{{ $index }}:
securityGroup: securityGroup/masters.{{ ClusterName }}
cidr: {{ AdminCIDR }}
cidr: {{ $cidr }}
protocol: tcp
fromPort: 22
toPort: 22
{{ end }}
# Masters can talk to masters
securityGroupRule/all-master-to-master:

View File

@ -25,13 +25,15 @@ securityGroupRule/node-egress:
egress: true
cidr: 0.0.0.0/0
# SSH is open to the world
securityGroupRule/ssh-external-to-node:
# SSH is open to CIDRs defined in the cluster configuration
{{ range $index, $cidr := AdminCIDR }}
securityGroupRule/ssh-external-to-node-{{ $index }}:
securityGroup: securityGroup/nodes.{{ ClusterName }}
cidr: {{ AdminCIDR }}
cidr: {{ $cidr }}
protocol: tcp
fromPort: 22
toPort: 22
{{ end }}
# Nodes can talk to nodes
securityGroupRule/all-node-to-node:

View File

@ -15,6 +15,8 @@ firewallRule/{{ $networkName }}-default-internal:
# SSH is open to the world
firewallRule/{{ $networkName }}-default-ssh-{{ AdminCIDR }}:
network: network/default
sourceRanges: {{ AdminCIDR }}
allowed: tcp:22
sourceRanges:
{{ range $cidr := AdminCIDR }}
- {{ $cidr }}
{{ end }}

View File

@ -155,15 +155,13 @@ func (tf *TemplateFunctions) SharedZone(zone *api.ClusterZoneSpec) bool {
return zone.ProviderID != ""
}
// AdminCIDR returns the single CIDR that is allowed access to the admin ports of the cluster (22, 443 on master)
func (tf *TemplateFunctions) AdminCIDR() (string, error) {
// AdminCIDR returns the CIDRs that are allowed to access the admin ports of the cluster
// (22, 443 on master and 22 on nodes)
func (tf *TemplateFunctions) AdminCIDR() ([]string, error) {
if len(tf.cluster.Spec.AdminAccess) == 0 {
return "0.0.0.0/0", nil
return []string{"0.0.0.0/0"}, nil
}
if len(tf.cluster.Spec.AdminAccess) == 1 {
return tf.cluster.Spec.AdminAccess[0], nil
}
return "", fmt.Errorf("Multiple AdminAccess rules are not (currently) supported")
return tf.cluster.Spec.AdminAccess, nil
}
// IAMServiceEC2 returns the name of the IAM service for EC2 in the current region