mirror of https://github.com/kubernetes/kops.git
Use NodeupConfig for kube-proxy config
This commit is contained in:
parent
751c71b662
commit
b5eef1c129
|
@ -43,20 +43,11 @@ var _ fi.NodeupModelBuilder = &KubeProxyBuilder{}
|
|||
// Build is responsible for building the kube-proxy manifest
|
||||
// @TODO we should probably change this to a daemonset in the future and follow the kubeadm path
|
||||
func (b *KubeProxyBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
||||
if b.Cluster.Spec.KubeProxy.Enabled != nil && !*b.Cluster.Spec.KubeProxy.Enabled {
|
||||
if b.NodeupConfig.KubeProxy == nil {
|
||||
klog.V(2).Infof("Kube-proxy is disabled, will not create configuration for it.")
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.IsMaster {
|
||||
// If this is a master that is not isolated, run it as a normal node also (start kube-proxy etc)
|
||||
// This lets e.g. daemonset pods communicate with other pods in the system
|
||||
if fi.ValueOf(b.Cluster.Spec.Networking.IsolateControlPlane) {
|
||||
klog.V(2).Infof("Running on Master with IsolateMaster=true; skipping kube-proxy installation")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
pod, err := b.buildPod()
|
||||
if err != nil {
|
||||
|
@ -114,7 +105,7 @@ func (b *KubeProxyBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
|
||||
// buildPod is responsible constructing the pod spec
|
||||
func (b *KubeProxyBuilder) buildPod() (*v1.Pod, error) {
|
||||
c := b.Cluster.Spec.KubeProxy
|
||||
c := b.NodeupConfig.KubeProxy
|
||||
if c == nil {
|
||||
return nil, fmt.Errorf("KubeProxy not configured")
|
||||
}
|
||||
|
|
|
@ -45,6 +45,10 @@ func TestKubeProxyBuilder_buildPod(t *testing.T) {
|
|||
cluster.Spec.KubeProxy.CPURequest = resource.NewScaledQuantity(20, resource.Milli)
|
||||
cluster.Spec.KubeProxy.CPULimit = resource.NewScaledQuantity(30, resource.Milli)
|
||||
|
||||
nodeupConfig := &nodeup.Config{
|
||||
KubeProxy: cluster.Spec.KubeProxy,
|
||||
}
|
||||
|
||||
flags, _ := flagbuilder.BuildFlagsList(cluster.Spec.KubeProxy)
|
||||
|
||||
flags = append(flags, []string{
|
||||
|
@ -67,7 +71,7 @@ func TestKubeProxyBuilder_buildPod(t *testing.T) {
|
|||
fields{
|
||||
&NodeupModelContext{
|
||||
Cluster: cluster,
|
||||
NodeupConfig: &nodeup.Config{},
|
||||
NodeupConfig: nodeupConfig,
|
||||
kubernetesVersion: semver.Version{Major: 1, Minor: 20},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -135,8 +135,8 @@ func (b *SysctlBuilder) Build(c *fi.NodeupModelBuilderContext) error {
|
|||
}
|
||||
|
||||
// Running Flannel on Amazon Linux 2 needs custom settings
|
||||
if b.Cluster.Spec.Networking.Flannel != nil && b.Distribution == distributions.DistributionAmazonLinux2 {
|
||||
proxyMode := b.Cluster.Spec.KubeProxy.ProxyMode
|
||||
if b.Cluster.Spec.Networking.Flannel != nil && b.Distribution == distributions.DistributionAmazonLinux2 && b.NodeupConfig.KubeProxy != nil {
|
||||
proxyMode := b.NodeupConfig.KubeProxy.ProxyMode
|
||||
if proxyMode == "" || proxyMode == "iptables" {
|
||||
sysctls = append(sysctls,
|
||||
"# Flannel settings on Amazon Linux 2",
|
||||
|
|
|
@ -57,6 +57,8 @@ type Config struct {
|
|||
StaticManifests []*StaticManifest `json:"staticManifests,omitempty"`
|
||||
// KubeletConfig defines the kubelet configuration.
|
||||
KubeletConfig kops.KubeletConfigSpec
|
||||
// KubeProxy defines the kube-proxy configuration.
|
||||
KubeProxy *kops.KubeProxyConfig
|
||||
// SysctlParameters will configure kernel parameters using sysctl(8). When
|
||||
// specified, each parameter must follow the form variable=value, the way
|
||||
// it would appear in sysctl.conf.
|
||||
|
@ -191,6 +193,8 @@ func NewConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) (*Confi
|
|||
config.NvidiaGPU = buildNvidiaConfig(cluster, instanceGroup)
|
||||
}
|
||||
|
||||
config.KubeProxy = buildKubeProxy(cluster, instanceGroup)
|
||||
|
||||
if cluster.Spec.CloudProvider.AWS != nil {
|
||||
aws := cluster.Spec.CloudProvider.AWS
|
||||
warmPool := aws.WarmPool.ResolveDefaults(instanceGroup)
|
||||
|
@ -268,6 +272,22 @@ func buildNvidiaConfig(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup)
|
|||
return config
|
||||
}
|
||||
|
||||
// buildkubeProxy builds the kube-proxy configuration for an instance group.
|
||||
func buildKubeProxy(cluster *kops.Cluster, instanceGroup *kops.InstanceGroup) *kops.KubeProxyConfig {
|
||||
config := &kops.KubeProxyConfig{}
|
||||
if cluster.Spec.KubeProxy != nil {
|
||||
config = cluster.Spec.KubeProxy
|
||||
}
|
||||
if config.Enabled != nil && !*config.Enabled {
|
||||
return nil
|
||||
}
|
||||
if instanceGroup.IsControlPlane() && cluster.Spec.Networking.IsolateControlPlane != nil && *cluster.Spec.Networking.IsolateControlPlane {
|
||||
return nil
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func UsesInstanceIDForNodeName(cluster *kops.Cluster) bool {
|
||||
return cluster.Spec.ExternalCloudControllerManager != nil && cluster.Spec.GetCloudProvider() == kops.CloudProviderAWS
|
||||
}
|
||||
|
|
|
@ -378,7 +378,6 @@ func (b *BootstrapScript) Run(c *fi.CloudupContext) error {
|
|||
|
||||
spec := make(map[string]interface{})
|
||||
spec["cloudConfig"] = cs.CloudConfig
|
||||
spec["kubeProxy"] = cs.KubeProxy
|
||||
spec["kubelet"] = cs.Kubelet
|
||||
|
||||
if cs.KubeAPIServer != nil && cs.KubeAPIServer.EnableBootstrapAuthToken != nil {
|
||||
|
|
|
@ -183,7 +183,7 @@ func (c *NodeUpCommand) Run(out io.Writer) error {
|
|||
return fmt.Errorf("nodeup config hash mismatch (was %q, expected %q)", got, want)
|
||||
}
|
||||
|
||||
err = evaluateSpec(c, &nodeupConfig, bootConfig.CloudProvider)
|
||||
err = evaluateSpec(&nodeupConfig, bootConfig.CloudProvider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -448,7 +448,7 @@ func completeWarmingLifecycleAction(cloud awsup.AWSCloud, modelContext *model.No
|
|||
return nil
|
||||
}
|
||||
|
||||
func evaluateSpec(c *NodeUpCommand, nodeupConfig *nodeup.Config, cloudProvider api.CloudProviderID) error {
|
||||
func evaluateSpec(nodeupConfig *nodeup.Config, cloudProvider api.CloudProviderID) error {
|
||||
hostnameOverride, err := evaluateHostnameOverride(cloudProvider, nodeupConfig.UseInstanceIDForNodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -456,14 +456,12 @@ func evaluateSpec(c *NodeUpCommand, nodeupConfig *nodeup.Config, cloudProvider a
|
|||
|
||||
nodeupConfig.KubeletConfig.HostnameOverride = hostnameOverride
|
||||
|
||||
if c.cluster.Spec.KubeProxy == nil {
|
||||
c.cluster.Spec.KubeProxy = &api.KubeProxyConfig{}
|
||||
}
|
||||
|
||||
c.cluster.Spec.KubeProxy.HostnameOverride = hostnameOverride
|
||||
c.cluster.Spec.KubeProxy.BindAddress, err = evaluateBindAddress(c.cluster.Spec.KubeProxy.BindAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
if nodeupConfig.KubeProxy != nil {
|
||||
nodeupConfig.KubeProxy.HostnameOverride = hostnameOverride
|
||||
nodeupConfig.KubeProxy.BindAddress, err = evaluateBindAddress(nodeupConfig.KubeProxy.BindAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if nodeupConfig.ContainerRuntime == "docker" {
|
||||
|
|
Loading…
Reference in New Issue