sync en zh yaml in content/zh/example/admin direcroty (#19766)

This commit is contained in:
chentanjun 2020-03-22 20:04:44 +08:00 committed by GitHub
parent 3dca72800c
commit d1233d3531
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 187 additions and 173 deletions

View File

@ -1,69 +1,69 @@
# This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster. # This is an example of how to setup cloud-controller-manger as a Daemonset in your cluster.
# It assumes that your masters can run pods and has the role node-role.kubernetes.io/master # It assumes that your masters can run pods and has the role node-role.kubernetes.io/master
# Note that this Daemonset will not work straight out of the box for your cloud, this is # Note that this Daemonset will not work straight out of the box for your cloud, this is
# meant to be a guideline. # meant to be a guideline.
--- ---
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: cloud-controller-manager name: cloud-controller-manager
namespace: kube-system namespace: kube-system
--- ---
kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding
metadata: metadata:
name: system:cloud-controller-manager name: system:cloud-controller-manager
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
name: cluster-admin name: cluster-admin
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: cloud-controller-manager name: cloud-controller-manager
namespace: kube-system namespace: kube-system
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
labels: labels:
k8s-app: cloud-controller-manager k8s-app: cloud-controller-manager
name: cloud-controller-manager name: cloud-controller-manager
namespace: kube-system namespace: kube-system
spec: spec:
selector: selector:
matchLabels: matchLabels:
k8s-app: cloud-controller-manager k8s-app: cloud-controller-manager
template: template:
metadata: metadata:
labels: labels:
k8s-app: cloud-controller-manager k8s-app: cloud-controller-manager
spec: spec:
serviceAccountName: cloud-controller-manager serviceAccountName: cloud-controller-manager
containers: containers:
- name: cloud-controller-manager - name: cloud-controller-manager
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager # for in-tree providers we use k8s.gcr.io/cloud-controller-manager
# this can be replaced with any other image for out-of-tree providers # this can be replaced with any other image for out-of-tree providers
image: k8s.gcr.io/cloud-controller-manager:v1.8.0 image: k8s.gcr.io/cloud-controller-manager:v1.8.0
command: command:
- /usr/local/bin/cloud-controller-manager - /usr/local/bin/cloud-controller-manager
- --cloud-provider=<YOUR_CLOUD_PROVIDER> # Add your own cloud provider here! - --cloud-provider=[YOUR_CLOUD_PROVIDER] # Add your own cloud provider here!
- --leader-elect=true - --leader-elect=true
- --use-service-account-credentials - --use-service-account-credentials
# these flags will vary for every cloud provider # these flags will vary for every cloud provider
- --allocate-node-cidrs=true - --allocate-node-cidrs=true
- --configure-cloud-routes=true - --configure-cloud-routes=true
- --cluster-cidr=172.17.0.0/16 - --cluster-cidr=172.17.0.0/16
tolerations: tolerations:
# this is required so CCM can bootstrap itself # this is required so CCM can bootstrap itself
- key: node.cloudprovider.kubernetes.io/uninitialized - key: node.cloudprovider.kubernetes.io/uninitialized
value: "true" value: "true"
effect: NoSchedule effect: NoSchedule
# this is to have the daemonset runnable on master nodes # this is to have the daemonset runnable on master nodes
# the taint may vary depending on your cluster setup # the taint may vary depending on your cluster setup
- key: node-role.kubernetes.io/master - key: node-role.kubernetes.io/master
effect: NoSchedule effect: NoSchedule
# this is to restrict CCM to only run on master nodes # this is to restrict CCM to only run on master nodes
# the node selector may vary depending on your cluster setup # the node selector may vary depending on your cluster setup
nodeSelector: nodeSelector:
node-role.kubernetes.io/master: "" node-role.kubernetes.io/master: ""

View File

@ -1,33 +1,33 @@
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: dns-autoscaler name: dns-autoscaler
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: dns-autoscaler k8s-app: dns-autoscaler
spec: spec:
selector: selector:
matchLabels: matchLabels:
k8s-app: dns-autoscaler k8s-app: dns-autoscaler
template: template:
metadata: metadata:
labels: labels:
k8s-app: dns-autoscaler k8s-app: dns-autoscaler
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.1 image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.6.0
resources: resources:
requests: requests:
cpu: "20m" cpu: 20m
memory: "10Mi" memory: 10Mi
command: command:
- /cluster-proportional-autoscaler - /cluster-proportional-autoscaler
- --namespace=kube-system - --namespace=kube-system
- --configmap=dns-autoscaler - --configmap=dns-autoscaler
- --target=<SCALE_TARGET> - --target=<SCALE_TARGET>
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate. # If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}} - --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"min":1}}
- --logtostderr=true - --logtostderr=true
- --v=2 - --v=2

View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: Pod
metadata:
name: dnsutils
namespace: default
spec:
containers:
- name: dnsutils
image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always

View File

@ -1,10 +1,10 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: constraints-cpu-demo-4 name: constraints-cpu-demo-3
spec: spec:
containers: containers:
- name: constraints-cpu-demo-4-ctr - name: constraints-cpu-demo-3-ctr
image: nginx image: nginx
resources: resources:
limits: limits:

View File

@ -1,5 +1,5 @@
kind: PersistentVolumeClaim
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim
metadata: metadata:
name: pvc-quota-demo-2 name: pvc-quota-demo-2
spec: spec:

View File

@ -1,5 +1,5 @@
kind: PersistentVolumeClaim
apiVersion: v1 apiVersion: v1
kind: PersistentVolumeClaim
metadata: metadata:
name: pvc-quota-demo name: pvc-quota-demo
spec: spec:

View File

@ -1,67 +1,67 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: my-scheduler name: my-scheduler
namespace: kube-system namespace: kube-system
--- ---
kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1
apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding
metadata: metadata:
name: my-scheduler-as-kube-scheduler name: my-scheduler-as-kube-scheduler
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: my-scheduler name: my-scheduler
namespace: kube-system namespace: kube-system
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
name: kube-scheduler name: system:kube-scheduler
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
--- ---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
labels: labels:
component: scheduler component: scheduler
tier: control-plane tier: control-plane
name: my-scheduler name: my-scheduler
namespace: kube-system namespace: kube-system
spec: spec:
selector: selector:
matchLabels: matchLabels:
component: scheduler component: scheduler
tier: control-plane tier: control-plane
replicas: 1 replicas: 1
template: template:
metadata: metadata:
labels: labels:
component: scheduler component: scheduler
tier: control-plane tier: control-plane
version: second version: second
spec: spec:
serviceAccountName: my-scheduler serviceAccountName: my-scheduler
containers: containers:
- command: - command:
- /usr/local/bin/kube-scheduler - /usr/local/bin/kube-scheduler
- --address=0.0.0.0 - --address=0.0.0.0
- --leader-elect=false - --leader-elect=false
- --scheduler-name=my-scheduler - --scheduler-name=my-scheduler
image: gcr.io/my-gcp-project/my-kube-scheduler:1.0 image: gcr.io/my-gcp-project/my-kube-scheduler:1.0
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
port: 10251 port: 10251
initialDelaySeconds: 15 initialDelaySeconds: 15
name: kube-second-scheduler name: kube-second-scheduler
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /healthz path: /healthz
port: 10251 port: 10251
resources: resources:
requests: requests:
cpu: '0.1' cpu: '0.1'
securityContext: securityContext:
privileged: false privileged: false
volumeMounts: [] volumeMounts: []
hostNetwork: false hostNetwork: false
hostPID: false hostPID: false
volumes: [] volumes: []