Adding basic flannel support

This commit is contained in:
Matthew Mihok 2017-02-10 11:22:33 -05:00
parent 05eb1d5e8e
commit bc235765d1
23 changed files with 1047 additions and 14 deletions

View File

@ -151,7 +151,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringVar(&options.Image, "image", options.Image, "Image to use")
cmd.Flags().StringVar(&options.Networking, "networking", "kubenet", "Networking mode to use. kubenet (default), classic, external, kopeio-vxlan, weave, calico, canal.")
cmd.Flags().StringVar(&options.Networking, "networking", "kubenet", "Networking mode to use. kubenet (default), classic, external, kopeio-vxlan, weave, flannel, calico, canal.")
cmd.Flags().StringVar(&options.DNSZone, "dns-zone", options.DNSZone, "DNS hosted zone to use (defaults to longest matching zone)")
cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output")
@ -257,6 +257,8 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
cluster.Spec.Networking.Kopeio = &api.KopeioNetworkingSpec{}
case "weave":
cluster.Spec.Networking.Weave = &api.WeaveNetworkingSpec{}
case "flannel":
cluster.Spec.Networking.Flannel = &api.FlannelNetworkingSpec{}
case "calico":
cluster.Spec.Networking.Calico = &api.CalicoNetworkingSpec{}
case "canal":
@ -517,7 +519,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
case api.TopologyPrivate:
if !supportsPrivateTopology(cluster.Spec.Networking) {
return fmt.Errorf("Invalid networking option %s. Currently only '--networking kopeio-vxlan', '--networking weave', '--networking calico', '--networking canal' are supported for private topologies", c.Networking)
return fmt.Errorf("Invalid networking option %s. Currently only '--networking kopeio-vxlan', '--networking weave', '--networking flannel', '--networking calico', '--networking canal' are supported for private topologies", c.Networking)
}
cluster.Spec.Topology = &api.TopologySpec{
Masters: api.TopologyPrivate,
@ -730,7 +732,7 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
func supportsPrivateTopology(n *api.NetworkingSpec) bool {
if n.CNI != nil || n.Kopeio != nil || n.Weave != nil || n.Calico != nil || n.Canal != nil {
if n.CNI != nil || n.Kopeio != nil || n.Weave != nil || n.Flannel != nil || n.Calico != nil || n.Canal != nil {
return true
}
return false

View File

@ -61,6 +61,12 @@ func TestPrivateWeave(t *testing.T) {
runTest(t, "privateweave.example.com", "../../tests/integration/privateweave", "v1alpha2", true)
}
// TestPrivateFlannel runs the test on a configuration with private topology, flannel networking
func TestPrivateFlannel(t *testing.T) {
runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha1", true)
runTest(t, "privateflannel.example.com", "../../tests/integration/privateflannel", "v1alpha2", true)
}
// TestPrivateCalico runs the test on a configuration with private topology, calico networking
func TestPrivateCalico(t *testing.T) {
runTest(t, "privatecalico.example.com", "../../tests/integration/privatecalico", "v1alpha1", true)

View File

@ -29,7 +29,7 @@ kops create cluster
--master-zones stringSlice Zones in which to run masters (must be an odd number)
--model string Models to apply (separate multiple models with commas) (default "config,proto,cloudup")
--network-cidr string Set to override the default network CIDR
--networking string Networking mode to use. kubenet (default), classic, external, cni, kopeio-vxlan, weave, calico. (default "kubenet")
--networking string Networking mode to use. kubenet (default), classic, external, cni, kopeio-vxlan, weave, flannel, calico. (default "kubenet")
--node-count int32 Set the number of nodes
--node-security-groups stringSlice Add precreated additional security groups to nodes.
--node-size string Set instance size for nodes

View File

@ -28,8 +28,9 @@ Several different providers are currently built into kops:
1. kopeio-vxlan
2. [weave](https://github.com/weaveworks/weave-kube)
3. [Calico](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/)
4. [Canal (Flannel + Calico)](https://github.com/projectcalico/canal)
3. [flannel](https://github.com/coreos/flannel)
4. [Calico](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/)
5. [Canal (Flannel + Calico)](https://github.com/projectcalico/canal)
The manifests for the providers are included with kops, and you simply use `--networking provider-name`.
Replace the provider name with the names listed above with you `kops cluster create`. For instance
@ -37,7 +38,7 @@ to install `kopeio-vxlan` execute the following:
```console
$ kops create cluster --networking kopeio-vxlan
```
```
### CNI Networking
@ -114,7 +115,7 @@ The above will deploy a daemonset installation which requires K8s 1.4.x or above
#### More information about Calico
For Calico specific documentation please visit the [Calico Docs](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/).
For Calico specific documentation please visit the [Calico Docs](http://docs.projectcalico.org/v2.0/getting-started/kubernetes/).
#### Getting help with Calico
@ -133,7 +134,7 @@ Canal is a project that combines [Flannel](https://github.com/coreos/flannel) an
#### Installing Canal on a new Cluster
The following command sets up a cluster, in HA mode, with Canal as the CNI and networking policy provider
The following command sets up a cluster, in HA mode, with Canal as the CNI and networking policy provider
```console
$ export $ZONES=mylistofzones

View File

@ -362,6 +362,8 @@ func (c *Cluster) FillDefaults() error {
// OK
} else if c.Spec.Networking.Weave != nil {
// OK
} else if c.Spec.Networking.Flannel != nil {
// OK
} else if c.Spec.Networking.Calico != nil {
// OK
} else if c.Spec.Networking.Canal != nil {

View File

@ -24,6 +24,7 @@ type NetworkingSpec struct {
CNI *CNINetworkingSpec `json:"cni,omitempty"`
Kopeio *KopeioNetworkingSpec `json:"kopeio,omitempty"`
Weave *WeaveNetworkingSpec `json:"weave,omitempty"`
Flannel *FlannelNetworkingSpec `json:"flannel,omitempty"`
Calico *CalicoNetworkingSpec `json:"calico,omitempty"`
Canal *CanalNetworkingSpec `json:"canal,omitempty"`
}
@ -55,6 +56,10 @@ type KopeioNetworkingSpec struct {
type WeaveNetworkingSpec struct {
}
// Flannel declares that we want Flannel networking
type FlannelNetworkingSpec struct {
}
// Calico declares that we want Calico networking
type CalicoNetworkingSpec struct {
}

View File

@ -24,6 +24,7 @@ type NetworkingSpec struct {
CNI *CNINetworkingSpec `json:"cni,omitempty"`
Kopeio *KopeioNetworkingSpec `json:"kopeio,omitempty"`
Weave *WeaveNetworkingSpec `json:"weave,omitempty"`
Flannel *FlannelNetworkingSpec `json:"flannel,omitempty"`
Calico *CalicoNetworkingSpec `json:"calico,omitempty"`
Canal *CanalNetworkingSpec `json:"canal,omitempty"`
}
@ -55,6 +56,10 @@ type KopeioNetworkingSpec struct {
type WeaveNetworkingSpec struct {
}
// Flannel declares that we want Flannel networking
type FlannelNetworkingSpec struct {
}
// Calico declares that we want Calico networking
type CalicoNetworkingSpec struct {
}

View File

@ -69,6 +69,8 @@ func RegisterConversions(scheme *runtime.Scheme) error {
Convert_kops_FederationList_To_v1alpha1_FederationList,
Convert_v1alpha1_FederationSpec_To_kops_FederationSpec,
Convert_kops_FederationSpec_To_v1alpha1_FederationSpec,
Convert_v1alpha1_FlannelNetworkingSpec_To_kops_FlannelNetworkingSpec,
Convert_kops_FlannelNetworkingSpec_To_v1alpha1_FlannelNetworkingSpec,
Convert_v1alpha1_InstanceGroup_To_kops_InstanceGroup,
Convert_kops_InstanceGroup_To_v1alpha1_InstanceGroup,
Convert_v1alpha1_InstanceGroupList_To_kops_InstanceGroupList,
@ -824,6 +826,22 @@ func Convert_kops_FederationSpec_To_v1alpha1_FederationSpec(in *kops.FederationS
return autoConvert_kops_FederationSpec_To_v1alpha1_FederationSpec(in, out, s)
}
func autoConvert_v1alpha1_FlannelNetworkingSpec_To_kops_FlannelNetworkingSpec(in *FlannelNetworkingSpec, out *kops.FlannelNetworkingSpec, s conversion.Scope) error {
return nil
}
func Convert_v1alpha1_FlannelNetworkingSpec_To_kops_FlannelNetworkingSpec(in *FlannelNetworkingSpec, out *kops.FlannelNetworkingSpec, s conversion.Scope) error {
return autoConvert_v1alpha1_FlannelNetworkingSpec_To_kops_FlannelNetworkingSpec(in, out, s)
}
func autoConvert_kops_FlannelNetworkingSpec_To_v1alpha1_FlannelNetworkingSpec(in *kops.FlannelNetworkingSpec, out *FlannelNetworkingSpec, s conversion.Scope) error {
return nil
}
func Convert_kops_FlannelNetworkingSpec_To_v1alpha1_FlannelNetworkingSpec(in *kops.FlannelNetworkingSpec, out *FlannelNetworkingSpec, s conversion.Scope) error {
return autoConvert_kops_FlannelNetworkingSpec_To_v1alpha1_FlannelNetworkingSpec(in, out, s)
}
func autoConvert_v1alpha1_InstanceGroup_To_kops_InstanceGroup(in *InstanceGroup, out *kops.InstanceGroup, s conversion.Scope) error {
if err := api.Convert_v1_TypeMeta_To_v1_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
@ -1371,6 +1389,15 @@ func autoConvert_v1alpha1_NetworkingSpec_To_kops_NetworkingSpec(in *NetworkingSp
} else {
out.Weave = nil
}
if in.Flannel != nil {
in, out := &in.Flannel, &out.Flannel
*out = new(kops.FlannelNetworkingSpec)
if err := Convert_v1alpha1_FlannelNetworkingSpec_To_kops_FlannelNetworkingSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Flannel = nil
}
if in.Calico != nil {
in, out := &in.Calico, &out.Calico
*out = new(kops.CalicoNetworkingSpec)
@ -1451,6 +1478,15 @@ func autoConvert_kops_NetworkingSpec_To_v1alpha1_NetworkingSpec(in *kops.Network
} else {
out.Weave = nil
}
if in.Flannel != nil {
in, out := &in.Flannel, &out.Flannel
*out = new(FlannelNetworkingSpec)
if err := Convert_kops_FlannelNetworkingSpec_To_v1alpha1_FlannelNetworkingSpec(*in, *out, s); err != nil {
return err
}
} else {
out.Flannel = nil
}
if in.Calico != nil {
in, out := &in.Calico, &out.Calico
*out = new(CalicoNetworkingSpec)

View File

@ -24,6 +24,7 @@ type NetworkingSpec struct {
CNI *CNINetworkingSpec `json:"cni,omitempty"`
Kopeio *KopeioNetworkingSpec `json:"kopeio,omitempty"`
Weave *WeaveNetworkingSpec `json:"weave,omitempty"`
Flannel *FlannelNetworkingSpec `json:"flannel,omitempty"`
Calico *CalicoNetworkingSpec `json:"calico,omitempty"`
Canal *CanalNetworkingSpec `json:"canal,omitempty"`
}
@ -55,6 +56,10 @@ type KopeioNetworkingSpec struct {
type WeaveNetworkingSpec struct {
}
// Flannel declares that we want Flannel networking
type FlannelNetworkingSpec struct {
}
// Calico declares that we want Calico networking
type CalicoNetworkingSpec struct {
}

View File

@ -54,7 +54,7 @@ func (c *OptionsContext) UsesKubenet() (bool, error) {
} else if networking.External != nil {
// external is based on kubenet
return true, nil
} else if networking.CNI != nil || networking.Weave != nil || networking.Calico != nil || networking.Canal != nil {
} else if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil {
return false, nil
} else if networking.Kopeio != nil {
// Kopeio is based on kubenet / external

View File

@ -45,7 +45,7 @@ func (b *NetworkingOptionsBuilder) BuildOptions(o interface{}) error {
if networking == nil {
return fmt.Errorf("networking not set")
}
if networking.CNI != nil || networking.Weave != nil || networking.Calico != nil || networking.Canal != nil {
if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil {
options.Kubelet.NetworkPluginName = "cni"
if k8sVersion.Major == 1 && k8sVersion.Minor <= 4 {

View File

@ -108,6 +108,10 @@ func (b *FirewallModelBuilder) buildNodeRules(c *fi.ModelBuilderContext) error {
udpPorts = append(udpPorts, 6784)
}
if b.Cluster.Spec.Networking.Flannel != nil {
udpPorts = append(udpPorts, 8285)
}
if b.Cluster.Spec.Networking.Calico != nil {
// Calico needs to access etcd
// TODO: Remove, replace with etcd in calico manifest

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,96 @@
apiVersion: kops/v1alpha1
kind: Cluster
metadata:
creationTimestamp: "2016-12-12T04:13:14Z"
name: privateflannel.example.com
spec:
adminAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/privateflannel.example.com
etcdClusters:
- etcdMembers:
- name: us-test-1a
zone: us-test-1a
name: main
- etcdMembers:
- name: us-test-1a
zone: us-test-1a
name: events
kubernetesVersion: v1.4.6
masterInternalName: api.internal.privateflannel.example.com
masterPublicName: api.privateflannel.example.com
networkCIDR: 172.20.0.0/16
networking:
flannel: {}
nonMasqueradeCIDR: 100.64.0.0/10
topology:
bastion:
enable: true
idleTimeout: 300
machineType: t2.medium
masters: private
nodes: private
zones:
- cidr: 172.20.4.0/22
name: us-test-1a
privateCIDR: 172.20.32.0/19
---
apiVersion: kops/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: privateflannel.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
zones:
- us-test-1a
---
apiVersion: kops/v1alpha1
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: nodes
labels:
kops.k8s.io/cluster: privateflannel.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
zones:
- us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-14T15:32:41Z"
name: bastion
labels:
kops.k8s.io/cluster: privateflannel.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.micro
maxSize: 1
minSize: 1
role: Bastion
subnets:
- utility-us-test-1a

View File

@ -0,0 +1,99 @@
apiVersion: kops/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2016-12-12T04:13:14Z"
name: privateflannel.example.com
spec:
kubernetesApiAccess:
- 0.0.0.0/0
channel: stable
cloudProvider: aws
configBase: memfs://clusters.example.com/privateflannel.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: main
- etcdMembers:
- instanceGroup: master-us-test-1a
name: us-test-1a
name: events
kubernetesVersion: v1.4.6
masterInternalName: api.internal.privateflannel.example.com
masterPublicName: api.privateflannel.example.com
networkCIDR: 172.20.0.0/16
networking:
flannel: {}
nonMasqueradeCIDR: 100.64.0.0/10
sshAccess:
- 0.0.0.0/0
topology:
masters: private
nodes: private
subnets:
- cidr: 172.20.32.0/19
name: us-test-1a
type: Private
zone: us-test-1a
- cidr: 172.20.4.0/22
name: utility-us-test-1a
type: Utility
zone: us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: master-us-test-1a
labels:
kops.k8s.io/cluster: privateflannel.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: m3.medium
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-12T04:13:15Z"
name: nodes
labels:
kops.k8s.io/cluster: privateflannel.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.medium
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test-1a
---
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2016-12-14T15:32:41Z"
name: bastion
labels:
kops.k8s.io/cluster: privateflannel.example.com
spec:
associatePublicIp: true
image: kope.io/k8s-1.4-debian-jessie-amd64-hvm-ebs-2016-10-21
machineType: t2.micro
maxSize: 1
minSize: 1
role: Bastion
subnets:
- utility-us-test-1a

View File

@ -0,0 +1,618 @@
resource "aws_autoscaling_attachment" "bastion-privateflannel-example-com" {
elb = "${aws_elb.bastion-privateflannel-example-com.id}"
autoscaling_group_name = "${aws_autoscaling_group.bastion-privateflannel-example-com.id}"
}
resource "aws_autoscaling_attachment" "master-us-test-1a-masters-privateflannel-example-com" {
elb = "${aws_elb.api-privateflannel-example-com.id}"
autoscaling_group_name = "${aws_autoscaling_group.master-us-test-1a-masters-privateflannel-example-com.id}"
}
resource "aws_autoscaling_group" "bastion-privateflannel-example-com" {
name = "bastion.privateflannel.example.com"
launch_configuration = "${aws_launch_configuration.bastion-privateflannel-example-com.id}"
max_size = 1
min_size = 1
vpc_zone_identifier = ["${aws_subnet.utility-us-test-1a-privateflannel-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "privateflannel.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "bastion.privateflannel.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/bastion"
value = "1"
propagate_at_launch = true
}
}
resource "aws_autoscaling_group" "master-us-test-1a-masters-privateflannel-example-com" {
name = "master-us-test-1a.masters.privateflannel.example.com"
launch_configuration = "${aws_launch_configuration.master-us-test-1a-masters-privateflannel-example-com.id}"
max_size = 1
min_size = 1
vpc_zone_identifier = ["${aws_subnet.us-test-1a-privateflannel-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "privateflannel.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "master-us-test-1a.masters.privateflannel.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/master"
value = "1"
propagate_at_launch = true
}
}
resource "aws_autoscaling_group" "nodes-privateflannel-example-com" {
name = "nodes.privateflannel.example.com"
launch_configuration = "${aws_launch_configuration.nodes-privateflannel-example-com.id}"
max_size = 2
min_size = 2
vpc_zone_identifier = ["${aws_subnet.us-test-1a-privateflannel-example-com.id}"]
tag = {
key = "KubernetesCluster"
value = "privateflannel.example.com"
propagate_at_launch = true
}
tag = {
key = "Name"
value = "nodes.privateflannel.example.com"
propagate_at_launch = true
}
tag = {
key = "k8s.io/role/node"
value = "1"
propagate_at_launch = true
}
}
resource "aws_ebs_volume" "us-test-1a-etcd-events-privateflannel-example-com" {
availability_zone = "us-test-1a"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "us-test-1a.etcd-events.privateflannel.example.com"
"k8s.io/etcd/events" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
}
}
resource "aws_ebs_volume" "us-test-1a-etcd-main-privateflannel-example-com" {
availability_zone = "us-test-1a"
size = 20
type = "gp2"
encrypted = false
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "us-test-1a.etcd-main.privateflannel.example.com"
"k8s.io/etcd/main" = "us-test-1a/us-test-1a"
"k8s.io/role/master" = "1"
}
}
resource "aws_eip" "us-test-1a-privateflannel-example-com" {
vpc = true
}
resource "aws_elb" "api-privateflannel-example-com" {
name = "api-privateflannel"
listener = {
instance_port = 443
instance_protocol = "TCP"
lb_port = 443
lb_protocol = "TCP"
}
security_groups = ["${aws_security_group.api-elb-privateflannel-example-com.id}"]
subnets = ["${aws_subnet.utility-us-test-1a-privateflannel-example-com.id}"]
health_check = {
target = "TCP:443"
healthy_threshold = 2
unhealthy_threshold = 2
interval = 10
timeout = 5
}
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "api.privateflannel.example.com"
}
}
resource "aws_elb" "bastion-privateflannel-example-com" {
name = "bastion-privateflannel"
listener = {
instance_port = 22
instance_protocol = "TCP"
lb_port = 22
lb_protocol = "TCP"
}
security_groups = ["${aws_security_group.bastion-elb-privateflannel-example-com.id}"]
subnets = ["${aws_subnet.utility-us-test-1a-privateflannel-example-com.id}"]
health_check = {
target = "TCP:22"
healthy_threshold = 2
unhealthy_threshold = 2
interval = 10
timeout = 5
}
idle_timeout = 300
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "bastion.privateflannel.example.com"
}
}
resource "aws_iam_instance_profile" "bastions-privateflannel-example-com" {
name = "bastions.privateflannel.example.com"
roles = ["${aws_iam_role.bastions-privateflannel-example-com.name}"]
}
resource "aws_iam_instance_profile" "masters-privateflannel-example-com" {
name = "masters.privateflannel.example.com"
roles = ["${aws_iam_role.masters-privateflannel-example-com.name}"]
}
resource "aws_iam_instance_profile" "nodes-privateflannel-example-com" {
name = "nodes.privateflannel.example.com"
roles = ["${aws_iam_role.nodes-privateflannel-example-com.name}"]
}
resource "aws_iam_role" "bastions-privateflannel-example-com" {
name = "bastions.privateflannel.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_bastions.privateflannel.example.com_policy")}"
}
resource "aws_iam_role" "masters-privateflannel-example-com" {
name = "masters.privateflannel.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_masters.privateflannel.example.com_policy")}"
}
resource "aws_iam_role" "nodes-privateflannel-example-com" {
name = "nodes.privateflannel.example.com"
assume_role_policy = "${file("${path.module}/data/aws_iam_role_nodes.privateflannel.example.com_policy")}"
}
resource "aws_iam_role_policy" "bastions-privateflannel-example-com" {
name = "bastions.privateflannel.example.com"
role = "${aws_iam_role.bastions-privateflannel-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_bastions.privateflannel.example.com_policy")}"
}
resource "aws_iam_role_policy" "masters-privateflannel-example-com" {
name = "masters.privateflannel.example.com"
role = "${aws_iam_role.masters-privateflannel-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_masters.privateflannel.example.com_policy")}"
}
resource "aws_iam_role_policy" "nodes-privateflannel-example-com" {
name = "nodes.privateflannel.example.com"
role = "${aws_iam_role.nodes-privateflannel-example-com.name}"
policy = "${file("${path.module}/data/aws_iam_role_policy_nodes.privateflannel.example.com_policy")}"
}
resource "aws_internet_gateway" "privateflannel-example-com" {
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "privateflannel.example.com"
}
}
resource "aws_key_pair" "kubernetes-privateflannel-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157" {
key_name = "kubernetes.privateflannel.example.com-c4:a6:ed:9a:a8:89:b9:e2:c3:9c:d6:63:eb:9c:71:57"
public_key = "${file("${path.module}/data/aws_key_pair_kubernetes.privateflannel.example.com-c4a6ed9aa889b9e2c39cd663eb9c7157_public_key")}"
}
resource "aws_launch_configuration" "bastion-privateflannel-example-com" {
name_prefix = "bastion.privateflannel.example.com-"
image_id = "ami-12345678"
instance_type = "t2.micro"
key_name = "${aws_key_pair.kubernetes-privateflannel-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.bastions-privateflannel-example-com.id}"
security_groups = ["${aws_security_group.bastion-privateflannel-example-com.id}"]
associate_public_ip_address = true
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "master-us-test-1a-masters-privateflannel-example-com" {
name_prefix = "master-us-test-1a.masters.privateflannel.example.com-"
image_id = "ami-12345678"
instance_type = "m3.medium"
key_name = "${aws_key_pair.kubernetes-privateflannel-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.masters-privateflannel-example-com.id}"
security_groups = ["${aws_security_group.masters-privateflannel-example-com.id}"]
associate_public_ip_address = false
user_data = "${file("${path.module}/data/aws_launch_configuration_master-us-test-1a.masters.privateflannel.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
ephemeral_block_device = {
device_name = "/dev/sdc"
virtual_name = "ephemeral0"
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_launch_configuration" "nodes-privateflannel-example-com" {
name_prefix = "nodes.privateflannel.example.com-"
image_id = "ami-12345678"
instance_type = "t2.medium"
key_name = "${aws_key_pair.kubernetes-privateflannel-example-com-c4a6ed9aa889b9e2c39cd663eb9c7157.id}"
iam_instance_profile = "${aws_iam_instance_profile.nodes-privateflannel-example-com.id}"
security_groups = ["${aws_security_group.nodes-privateflannel-example-com.id}"]
associate_public_ip_address = false
user_data = "${file("${path.module}/data/aws_launch_configuration_nodes.privateflannel.example.com_user_data")}"
root_block_device = {
volume_type = "gp2"
volume_size = 20
delete_on_termination = true
}
lifecycle = {
create_before_destroy = true
}
}
resource "aws_nat_gateway" "us-test-1a-privateflannel-example-com" {
allocation_id = "${aws_eip.us-test-1a-privateflannel-example-com.id}"
subnet_id = "${aws_subnet.utility-us-test-1a-privateflannel-example-com.id}"
}
resource "aws_route" "0-0-0-0--0" {
route_table_id = "${aws_route_table.privateflannel-example-com.id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.privateflannel-example-com.id}"
}
resource "aws_route" "private-us-test-1a-0-0-0-0--0" {
route_table_id = "${aws_route_table.private-us-test-1a-privateflannel-example-com.id}"
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = "${aws_nat_gateway.us-test-1a-privateflannel-example-com.id}"
}
resource "aws_route53_record" "api-privateflannel-example-com" {
name = "api.privateflannel.example.com"
type = "A"
alias = {
name = "${aws_elb.api-privateflannel-example-com.dns_name}"
zone_id = "${aws_elb.api-privateflannel-example-com.zone_id}"
evaluate_target_health = false
}
zone_id = "/hostedzone/Z1AFAKE1ZON3YO"
}
resource "aws_route_table" "private-us-test-1a-privateflannel-example-com" {
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "private-us-test-1a.privateflannel.example.com"
}
}
resource "aws_route_table" "privateflannel-example-com" {
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "privateflannel.example.com"
}
}
resource "aws_route_table_association" "private-us-test-1a-privateflannel-example-com" {
subnet_id = "${aws_subnet.us-test-1a-privateflannel-example-com.id}"
route_table_id = "${aws_route_table.private-us-test-1a-privateflannel-example-com.id}"
}
resource "aws_route_table_association" "utility-us-test-1a-privateflannel-example-com" {
subnet_id = "${aws_subnet.utility-us-test-1a-privateflannel-example-com.id}"
route_table_id = "${aws_route_table.privateflannel-example-com.id}"
}
resource "aws_security_group" "api-elb-privateflannel-example-com" {
name = "api-elb.privateflannel.example.com"
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
description = "Security group for api ELB"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "api-elb.privateflannel.example.com"
}
}
resource "aws_security_group" "bastion-elb-privateflannel-example-com" {
name = "bastion-elb.privateflannel.example.com"
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
description = "Security group for bastion ELB"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "bastion-elb.privateflannel.example.com"
}
}
resource "aws_security_group" "bastion-privateflannel-example-com" {
name = "bastion.privateflannel.example.com"
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
description = "Security group for bastion"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "bastion.privateflannel.example.com"
}
}
resource "aws_security_group" "masters-privateflannel-example-com" {
name = "masters.privateflannel.example.com"
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
description = "Security group for masters"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "masters.privateflannel.example.com"
}
}
resource "aws_security_group" "nodes-privateflannel-example-com" {
name = "nodes.privateflannel.example.com"
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
description = "Security group for nodes"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "nodes.privateflannel.example.com"
}
}
resource "aws_security_group_rule" "all-master-to-master" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "all-master-to-node" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "all-node-to-node" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
}
resource "aws_security_group_rule" "api-elb-egress" {
type = "egress"
security_group_id = "${aws_security_group.api-elb-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "bastion-egress" {
type = "egress"
security_group_id = "${aws_security_group.bastion-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "bastion-elb-egress" {
type = "egress"
security_group_id = "${aws_security_group.bastion-elb-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "bastion-to-master-ssh" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.bastion-privateflannel-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
}
resource "aws_security_group_rule" "bastion-to-node-ssh" {
type = "ingress"
security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.bastion-privateflannel-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
}
resource "aws_security_group_rule" "https-api-elb-0-0-0-0--0" {
type = "ingress"
security_group_id = "${aws_security_group.api-elb-privateflannel-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "https-elb-to-master" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.api-elb-privateflannel-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
}
resource "aws_security_group_rule" "master-egress" {
type = "egress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "node-egress" {
type = "egress"
security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_security_group_rule" "node-to-master-tcp-4194" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 4194
to_port = 4194
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-tcp-443" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 443
to_port = 443
protocol = "tcp"
}
resource "aws_security_group_rule" "node-to-master-udp-8285" {
type = "ingress"
security_group_id = "${aws_security_group.masters-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.nodes-privateflannel-example-com.id}"
from_port = 8285
to_port = 8285
protocol = "udp"
}
resource "aws_security_group_rule" "ssh-elb-to-bastion" {
type = "ingress"
security_group_id = "${aws_security_group.bastion-privateflannel-example-com.id}"
source_security_group_id = "${aws_security_group.bastion-elb-privateflannel-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
}
resource "aws_security_group_rule" "ssh-external-to-bastion-elb-0-0-0-0--0" {
type = "ingress"
security_group_id = "${aws_security_group.bastion-elb-privateflannel-example-com.id}"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
resource "aws_subnet" "us-test-1a-privateflannel-example-com" {
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
cidr_block = "172.20.32.0/19"
availability_zone = "us-test-1a"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "us-test-1a.privateflannel.example.com"
}
}
resource "aws_subnet" "utility-us-test-1a-privateflannel-example-com" {
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
cidr_block = "172.20.4.0/22"
availability_zone = "us-test-1a"
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "utility-us-test-1a.privateflannel.example.com"
}
}
resource "aws_vpc" "privateflannel-example-com" {
cidr_block = "172.20.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "privateflannel.example.com"
}
}
resource "aws_vpc_dhcp_options" "privateflannel-example-com" {
domain_name = "us-test-1.compute.internal"
domain_name_servers = ["AmazonProvidedDNS"]
tags = {
KubernetesCluster = "privateflannel.example.com"
Name = "privateflannel.example.com"
}
}
resource "aws_vpc_dhcp_options_association" "privateflannel-example-com" {
vpc_id = "${aws_vpc.privateflannel-example-com.id}"
dhcp_options_id = "${aws_vpc_dhcp_options.privateflannel-example-com.id}"
}

View File

@ -0,0 +1,92 @@
kind: ServiceAccount
apiVersion: v1
metadata:
name: flannel
namespace: kube-system
labels:
role.kubernetes.io/networking: "1"
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
k8s-app: flannel
role.kubernetes.io/networking: "1"
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"forceAddress": true,
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "100.64.0.0/10",
"Backend": {
"Type": "udp"
}
}
---
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
k8s-app: flannel
role.kubernetes.io/networking: "1"
spec:
template:
metadata:
labels:
tier: node
app: flannel
role.kubernetes.io/networking: "1"
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.7.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.7.0
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -0,0 +1,3 @@
{
"mode": "0755"
}

View File

@ -190,6 +190,23 @@ func (b *BootstrapChannelBuilder) buildManifest() (*channelsapi.Addons, map[stri
manifests[key] = "addons/" + location
}
if b.cluster.Spec.Networking.Flannel != nil {
key := "networking.flannel"
version := "0.7.0"
// TODO: Create configuration object for cni providers (maybe create it but orphan it)?
location := key + "/v" + version + ".yaml"
addons.Spec.Addons = append(addons.Spec.Addons, &channelsapi.AddonSpec{
Name: fi.String(key),
Version: fi.String(version),
Selector: map[string]string{"role.kubernetes.io/networking": "1"},
Manifest: fi.String(location),
})
manifests[key] = "addons/" + location
}
if b.cluster.Spec.Networking.Calico != nil {
key := "networking.projectcalico.org"
version := "2.0"

View File

@ -49,6 +49,11 @@ func usesCNI(c *api.Cluster) bool {
return true
}
if networkConfig.Flannel != nil {
// Weave uses CNI
return true
}
if networkConfig.Calico != nil {
// Calico uses CNI
return true

View File

@ -45,7 +45,7 @@ func buildCloudupTags(cluster *api.Cluster) (sets.String, error) {
} else if networking.External != nil {
// external is based on kubenet
tags.Insert("_networking_kubenet", "_networking_external")
} else if networking.CNI != nil || networking.Weave != nil || networking.Calico != nil || networking.Canal != nil {
} else if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil {
tags.Insert("_networking_cni")
} else if networking.Kopeio != nil {
// TODO combine with the External
@ -108,7 +108,7 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
return nil, fmt.Errorf("Networking is not set, and should not be nil here")
}
if networking.CNI != nil || networking.Weave != nil || networking.Calico != nil || networking.Canal != nil {
if networking.CNI != nil || networking.Weave != nil || networking.Flannel != nil || networking.Calico != nil || networking.Canal != nil {
// external is based on cni, weave, flannel, calico, etc
tags.Insert("_networking_cni")
}
@ -139,7 +139,7 @@ func buildNodeupTags(role api.InstanceGroupRole, cluster *api.Cluster, clusterTa
// TODO: Replace with list of CNI plugins ?
if usesCNI(cluster) {
tags.Insert("_cni_bridge", "_cni_host_local", "_cni_loopback", "_cni_ptp")
tags.Insert("_cni_bridge", "_cni_host_local", "_cni_loopback", "_cni_ptp", "_cni_flannel")
//tags.Insert("_cni_tuning")
}

View File

@ -96,6 +96,40 @@ func TestBuildTags_CloudProvider_AWS_Weave(t *testing.T) {
}
}
func TestBuildTags_CloudProvider_AWS_Flannel(t *testing.T) {
c := buildCluster(nil)
networking := &api.NetworkingSpec{Flannel: &api.FlannelNetworkingSpec{}}
c.Spec.Networking = networking
tags, err := buildCloudupTags(c)
if err != nil {
t.Fatalf("buildCloudupTags error: %v", err)
}
if !tags.Has("_aws") {
t.Fatal("tag _aws not found")
}
if !tags.Has("_networking_cni") {
t.Fatal("tag _networking_cni not found")
}
if tags.Has("_networking_kubenet") {
t.Fatal("tag _networking_kubenet found")
}
nodeUpTags, err := buildNodeupTags(api.InstanceGroupRoleNode, c, tags)
if err != nil {
t.Fatalf("buildNodeupTags error: %v", err)
}
if !nodeUpTags.Has("_aws") {
t.Fatal("nodeUpTag _aws not found")
}
}
func TestBuildTags_CloudProvider_AWS_Calico(t *testing.T) {
c := buildCluster(nil)