cluster-api-provider-rke2/examples/templates/docker/cluster-template-disable-co...

186 lines
4.5 KiB
YAML

apiVersion: v1
kind: Namespace
metadata:
name: ${NAMESPACE}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: Cluster
metadata:
namespace: ${NAMESPACE}
name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
cidrBlocks:
- 10.45.0.0/16
services:
cidrBlocks:
- 10.46.0.0/16
serviceDomain: cluster.local
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: RKE2ControlPlane
name: ${CLUSTER_NAME}-control-plane
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerCluster
metadata:
name: ${CLUSTER_NAME}
namespace: ${NAMESPACE}
spec:
loadBalancer:
customHAProxyConfigTemplateRef:
name: ${CLUSTER_NAME}-lb-config
---
apiVersion: controlplane.cluster.x-k8s.io/v1beta1
kind: RKE2ControlPlane
metadata:
name: ${CLUSTER_NAME}-control-plane
namespace: ${NAMESPACE}
spec:
replicas: ${CONTROL_PLANE_MACHINE_COUNT}
agentConfig:
version: ${KUBERNETES_VERSION}+rke2r1
gzipUserData: false
serverConfig:
kubeAPIServer:
extraArgs:
- --anonymous-auth=true
disableComponents:
pluginComponents:
- "rke2-ingress-nginx"
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: controlplane
nodeDrainTimeout: 2m
nodeDeletionTimeout: 30s
nodeVolumeDetachTimeout: 5m
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: controlplane
namespace: ${NAMESPACE}
spec:
template:
spec: {}
---
apiVersion: cluster.x-k8s.io/v1beta1
kind: MachineDeployment
metadata:
name: worker-md-0
namespace: ${NAMESPACE}
spec:
clusterName: ${CLUSTER_NAME}
replicas: ${WORKER_MACHINE_COUNT}
selector:
matchLabels:
cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME}
template:
spec:
version: ${KUBERNETES_VERSION}+rke2r1
clusterName: ${CLUSTER_NAME}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: RKE2ConfigTemplate
name: ${CLUSTER_NAME}-agent
namespace: ${NAMESPACE}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
name: worker
namespace: ${NAMESPACE}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1beta1
kind: DockerMachineTemplate
metadata:
name: worker
namespace: ${NAMESPACE}
spec:
template:
spec: {}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1beta1
kind: RKE2ConfigTemplate
metadata:
namespace: ${NAMESPACE}
name: ${CLUSTER_NAME}-agent
spec:
template:
spec:
agentConfig: {}
gzipUserData: false
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ${CLUSTER_NAME}-lb-config
namespace: ${NAMESPACE}
data:
value: |-
# generated by kind
global
log /dev/log local0
log /dev/log local1 notice
daemon
# limit memory usage to approximately 18 MB
# (see https://github.com/kubernetes-sigs/kind/pull/3115)
maxconn 100000
resolvers docker
nameserver dns 127.0.0.11:53
defaults
log global
mode tcp
option dontlognull
# TODO: tune these
timeout connect 5000
timeout client 50000
timeout server 50000
# allow to boot despite dns don't resolve backends
default-server init-addr none
frontend stats
mode http
bind *:8404
stats enable
stats uri /stats
stats refresh 1s
stats admin if TRUE
frontend control-plane
bind *:{{ .FrontendControlPlanePort }}
{{ if .IPv6 -}}
bind :::{{ .FrontendControlPlanePort }};
{{- end }}
default_backend kube-apiservers
backend kube-apiservers
option httpchk GET /healthz
{{range $server, $backend := .BackendServers}}
server {{ $server }} {{ JoinHostPort $backend.Address $.BackendControlPlanePort }} check check-ssl verify none resolvers docker resolve-prefer {{ if $.IPv6 -}} ipv6 {{- else -}} ipv4 {{- end }}
{{- end}}
frontend rke2-join
bind *:9345
{{ if .IPv6 -}}
bind :::9345;
{{- end }}
default_backend rke2-servers
backend rke2-servers
option httpchk GET /v1-rke2/readyz
http-check expect status 403
{{range $server, $backend := .BackendServers}}
server {{ $server }} {{ $backend.Address }}:9345 check check-ssl verify none
{{- end}}