Merge pull request #4137 from thockin-tmp/gcr-vanity

Convert registry to k8s.gcr.io
This commit is contained in:
k8s-ci-robot 2018-02-20 08:54:39 -08:00 committed by GitHub
commit 4b8db1eee0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
50 changed files with 96 additions and 96 deletions

View File

@ -55,7 +55,7 @@ container_pull(
name = "debian_hyperkube_base_amd64",
# 'tag' is also supported, but digest is encouraged for reproducibility.
digest = "sha256:fc1b461367730660ac5a40c1eb2d1b23221829acf8a892981c12361383b3742b",
registry = "gcr.io",
repository = "google_containers/debian-hyperkube-base-amd64",
registry = "k8s.gcr.io",
repository = "debian-hyperkube-base-amd64",
tag = "0.8",
)

View File

@ -6,7 +6,7 @@ Note that you likely want to change `AWS_REGION` and `GROUP_NAME`, and probably
```bash
CLOUD_PROVIDER=aws
IMAGE=gcr.io/google_containers/cluster-autoscaler:v1.1.0
IMAGE=k8s.gcr.io/cluster-autoscaler:v1.1.0
MIN_NODES=1
MAX_NODES=5
AWS_REGION=us-east-1

View File

@ -3,7 +3,7 @@
## Creating a simple ingress
```
kubectl run echoheaders --image=gcr.io/google_containers/echoserver:1.4 --replicas=1 --port=8080
kubectl run echoheaders --image=k8s.gcr.io/echoserver:1.4 --replicas=1 --port=8080
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-x
kubectl expose deployment echoheaders --port=80 --target-port=8080 --name=echoheaders-y

View File

@ -30,7 +30,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
image: gcr.io/google_containers/defaultbackend:1.0
image: k8s.gcr.io/defaultbackend:1.0
livenessProbe:
httpGet:
path: /healthz
@ -101,7 +101,7 @@ spec:
spec:
terminationGracePeriodSeconds: 60
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.8.3
- image: k8s.gcr.io/nginx-ingress-controller:0.8.3
name: ingress-nginx
imagePullPolicy: Always
ports:

View File

@ -194,7 +194,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
image: gcr.io/google_containers/defaultbackend:1.3
image: k8s.gcr.io/defaultbackend:1.3
livenessProbe:
httpGet:
path: /healthz
@ -272,7 +272,7 @@ spec:
terminationGracePeriodSeconds: 60
serviceAccountName: nginx-ingress-controller
containers:
- image: gcr.io/google_containers/nginx-ingress-controller:0.9.0-beta.10
- image: k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.10
name: nginx-ingress-controller
imagePullPolicy: Always
ports:

View File

@ -194,7 +194,7 @@ spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
image: gcr.io/google_containers/defaultbackend:1.3
image: k8s.gcr.io/defaultbackend:1.3
livenessProbe:
httpGet:
path: /healthz
@ -274,7 +274,7 @@ spec:
terminationGracePeriodSeconds: 60
serviceAccountName: nginx-ingress-controller
containers:
- image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0
- image: k8s.gcr.io/nginx-ingress-controller:0.9.0
name: nginx-ingress-controller
imagePullPolicy: Always
ports:

View File

@ -76,7 +76,7 @@ spec:
memory: 200Mi
cpu: 200m
- name: addon-resizer
image: gcr.io/google_containers/addon-resizer:1.0
image: k8s.gcr.io/addon-resizer:1.0
resources:
limits:
cpu: 100m

View File

@ -76,7 +76,7 @@ spec:
memory: 200Mi
cpu: 200m
- name: addon-resizer
image: gcr.io/google_containers/addon-resizer:1.0
image: k8s.gcr.io/addon-resizer:1.0
resources:
limits:
cpu: 100m

View File

@ -76,7 +76,7 @@ spec:
memory: 500Mi
cpu: 300m
- name: addon-resizer
image: gcr.io/google_containers/addon-resizer:1.8.1
image: k8s.gcr.io/addon-resizer:1.8.1
resources:
limits:
cpu: 100m

View File

@ -37,7 +37,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.1.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.1.0
imagePullPolicy: Always
ports:
- name: http

View File

@ -26,7 +26,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.4.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.4.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:

View File

@ -26,7 +26,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.5.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:

View File

@ -56,7 +56,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.6.0
ports:
- containerPort: 9090
protocol: TCP

View File

@ -56,7 +56,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.1
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.6.1
ports:
- containerPort: 9090
protocol: TCP

View File

@ -57,7 +57,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.6.3
ports:
- containerPort: 9090
protocol: TCP

View File

@ -90,7 +90,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.7.0
ports:
- containerPort: 9090
protocol: TCP

View File

@ -90,7 +90,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.7.1
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.7.1
ports:
- containerPort: 9090
protocol: TCP

View File

@ -110,7 +110,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.0
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.0
ports:
- containerPort: 8443
protocol: TCP

View File

@ -114,7 +114,7 @@ spec:
spec:
containers:
- name: kubernetes-dashboard
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.1
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.1
ports:
- containerPort: 8443
protocol: TCP

View File

@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: fluentd-es
image: gcr.io/google_containers/fluentd-elasticsearch:1.22
image: k8s.gcr.io/fluentd-elasticsearch:1.22
command:
- '/bin/sh'
- '-c'
@ -89,7 +89,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:v2.4.1-2
- image: k8s.gcr.io/elasticsearch:v2.4.1-2
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
@ -146,7 +146,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:v4.6.1-1
image: k8s.gcr.io/kibana:v4.6.1-1
resources:
# keep request = limit to keep this container in guaranteed class
limits:

View File

@ -113,7 +113,7 @@ spec:
serviceAccountName: fluentd-es
containers:
- name: fluentd-es
image: gcr.io/google_containers/fluentd-elasticsearch:1.22
image: k8s.gcr.io/fluentd-elasticsearch:1.22
command:
- '/bin/sh'
- '-c'
@ -185,7 +185,7 @@ spec:
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: gcr.io/google_containers/elasticsearch:v2.4.1-2
- image: k8s.gcr.io/elasticsearch:v2.4.1-2
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
@ -242,7 +242,7 @@ spec:
spec:
containers:
- name: kibana-logging
image: gcr.io/google_containers/kibana:v4.6.1-1
image: k8s.gcr.io/kibana:v4.6.1-1
resources:
# keep request = limit to keep this container in guaranteed class
limits:

View File

@ -20,7 +20,7 @@ spec:
version: v1.1.0
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.1.0
- image: k8s.gcr.io/heapster:v1.1.0
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
@ -33,7 +33,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.3
- image: k8s.gcr.io/addon-resizer:1.3
name: heapster-nanny
resources:
limits:

View File

@ -24,7 +24,7 @@ spec:
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.2.0
- image: k8s.gcr.io/heapster:v1.2.0
name: heapster
livenessProbe:
httpGet:
@ -44,7 +44,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.6
- image: k8s.gcr.io/addon-resizer:1.6
name: heapster-nanny
resources:
limits:

View File

@ -24,7 +24,7 @@ spec:
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
containers:
- image: gcr.io/google_containers/heapster:v1.3.0
- image: k8s.gcr.io/heapster:v1.3.0
name: heapster
livenessProbe:
httpGet:
@ -44,7 +44,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.7
- image: k8s.gcr.io/addon-resizer:1.7
name: heapster-nanny
resources:
limits:

View File

@ -25,7 +25,7 @@ spec:
spec:
serviceAccountName: heapster
containers:
- image: gcr.io/google_containers/heapster:v1.3.0
- image: k8s.gcr.io/heapster:v1.3.0
name: heapster
livenessProbe:
httpGet:
@ -45,7 +45,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:1.7
- image: k8s.gcr.io/addon-resizer:1.7
name: heapster-nanny
resources:
limits:

View File

@ -25,7 +25,7 @@ spec:
spec:
serviceAccountName: heapster
containers:
- image: gcr.io/google_containers/heapster:v1.4.0
- image: k8s.gcr.io/heapster:v1.4.0
name: heapster
livenessProbe:
httpGet:
@ -45,7 +45,7 @@ spec:
command:
- /heapster
- --source=kubernetes.summary_api:''
- image: gcr.io/google_containers/addon-resizer:2.0
- image: k8s.gcr.io/addon-resizer:2.0
name: heapster-nanny
resources:
limits:

View File

@ -83,7 +83,7 @@ Apply the update to the container:
```bash
kubectl set image deployment/kube-dns -n kube-system \
dnsmasq=gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
dnsmasq=k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.5
```
Validate the change was applied to the deployment:
@ -108,8 +108,8 @@ You should see version 1.14.5 for the k8s-dns-dnsmasq-nanny-amd64 container:
```console
NAME IMAGE
kube-dns-1100866048-3lqm0 gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5,gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5,gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
kube-dns-1100866048-tjlv2 gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5,gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5,gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
kube-dns-1100866048-3lqm0 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.5,k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.5,k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.5
kube-dns-1100866048-tjlv2 k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64:1.14.5,k8s.gcr.io/k8s-dns-kube-dns-amd64:1.14.5,k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.5
```
### Kubernetes Versions 1.4.x - 1.5.x
@ -136,7 +136,7 @@ Upgrade the kube-dns container to the new version.
```bash
kubectl set image deployment/kube-dns -n kube-system \
dnsmasq=gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5
dnsmasq=k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5
```
Validate the change was applied to the deployment:
@ -157,8 +157,8 @@ You should see version 1.14.5 for the dnsmasq pod
```console
NAME IMAGE
kube-dns-4146767324-djthf gcr.io/google_containers/kubedns-amd64:1.9,gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5,gcr.io/google_containers/dnsmasq-metrics-amd64:1.0,gcr.io/google_containers/exechealthz-amd64:1.2
kube-dns-4146767324-kloxi gcr.io/google_containers/kubedns-amd64:1.9,gcr.io/google_containers/k8s-dns-dnsmasq-amd64:1.14.5,gcr.io/google_containers/dnsmasq-metrics-amd64:1.0,gcr.io/google_containers/exechealthz-amd64:1.2
kube-dns-4146767324-djthf k8s.gcr.io/kubedns-amd64:1.9,k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5,k8s.gcr.io/dnsmasq-metrics-amd64:1.0,k8s.gcr.io/exechealthz-amd64:1.2
kube-dns-4146767324-kloxi k8s.gcr.io/kubedns-amd64:1.9,k8s.gcr.io/k8s-dns-dnsmasq-amd64:1.14.5,k8s.gcr.io/dnsmasq-metrics-amd64:1.0,k8s.gcr.io/exechealthz-amd64:1.2
```
## More Information

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/debian-base-amd64:0.3
FROM k8s.gcr.io/debian-base-amd64:0.3
# Install packages:
# curl (to download golang)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/debian-base-amd64:0.3
FROM k8s.gcr.io/debian-base-amd64:0.3
# ca-certificates: Needed to talk to EC2 API
RUN apt-get update && apt-get install --yes ca-certificates \

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/debian-base-amd64:0.3
FROM k8s.gcr.io/debian-base-amd64:0.3
# TODO
# RUN apt-get update && apt-get install --yes --reinstall lsb-base
COPY /.build/dist/linux/amd64/kops-server /kops-server

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/debian-base-amd64:0.3
FROM k8s.gcr.io/debian-base-amd64:0.3
# Install packages:
# curl (to download golang)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/debian-base-amd64:0.3
FROM k8s.gcr.io/debian-base-amd64:0.3
# ca-certificates: Needed to talk to EC2 API
# e2fsprogs: Needed to mount / format ext4 filesytems

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
FROM gcr.io/google_containers/debian-base-amd64:0.2
FROM k8s.gcr.io/debian-base-amd64:0.2
RUN echo "deb-src http://security.debian.org/ jessie/updates main" >> /etc/apt/sources.list
RUN echo "deb-src http://ftp.us.debian.org/debian/ jessie main" >> /etc/apt/sources.list

View File

@ -243,7 +243,7 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) (*Protokube
}
// TODO this is dupicate code with etcd model
image := fmt.Sprintf("gcr.io/google_containers/etcd:%s", imageVersion)
image := fmt.Sprintf("k8s.gcr.io/etcd:%s", imageVersion)
// override image if set as API value
if etcdContainerImage != "" {
image = etcdContainerImage

View File

@ -128,7 +128,7 @@ func (a *AssetBuilder) RemapImage(image string) (string, error) {
normalized := image
// Remove the 'standard' kubernetes image prefix, just for sanity
normalized = strings.TrimPrefix(normalized, "gcr.io/google_containers/")
normalized = strings.TrimPrefix(normalized, "k8s.gcr.io/")
// We can't nest arbitrarily
// Some risk of collisions, but also -- and __ in the names appear to be blocked by docker hub

View File

@ -131,11 +131,11 @@ func Image(component string, clusterSpec *kops.ClusterSpec, assetsBuilder *asset
// TODO remove this, as it is an addon now
if component == "kube-dns" {
// TODO: Once we are shipping different versions, start to use them
return "gcr.io/google_containers/kubedns-amd64:1.3", nil
return "k8s.gcr.io/kubedns-amd64:1.3", nil
}
if !IsBaseURL(clusterSpec.KubernetesVersion) {
image := "gcr.io/google_containers/" + component + ":" + "v" + clusterSpec.KubernetesVersion
image := "k8s.gcr.io/" + component + ":" + "v" + clusterSpec.KubernetesVersion
image, err := assetsBuilder.RemapImage(image)
if err != nil {
@ -158,7 +158,7 @@ func Image(component string, clusterSpec *kops.ClusterSpec, assetsBuilder *asset
tag := strings.TrimSpace(string(b))
glog.V(2).Infof("Found tag %q for %q", tag, component)
return "gcr.io/google_containers/" + component + ":" + tag, nil
return "k8s.gcr.io/" + component + ":" + tag, nil
}
func GCETagForRole(clusterName string, role kops.InstanceGroupRole) string {

View File

@ -44,7 +44,7 @@ func (b *EtcdOptionsBuilder) BuildOptions(o interface{}) error {
}
// default to gcr.io
image := fmt.Sprintf("gcr.io/google_containers/etcd:%s", spec.EtcdClusters[0].Version)
image := fmt.Sprintf("k8s.gcr.io/etcd:%s", spec.EtcdClusters[0].Version)
// override image if set as API value
if spec.EtcdClusters[0].Image != "" {

View File

@ -202,7 +202,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
}
// Specify our pause image
image := "gcr.io/google_containers/pause-amd64:3.0"
image := "k8s.gcr.io/pause-amd64:3.0"
if image, err = b.Context.AssetBuilder.RemapImage(image); err != nil {
return err
}

View File

@ -8880,7 +8880,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
Properties: map[string]spec.Schema{
"names": {
SchemaProps: spec.SchemaProps{
Description: "Names by which this image is known. e.g. [\"gcr.io/google_containers/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]",
Description: "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]",
Type: []string{"array"},
Items: &spec.SchemaOrArray{
Schema: &spec.Schema{

View File

@ -325,11 +325,11 @@ func dummyNode(nodeMap map[string]string) v1.Node {
// images will be sorted from max to min in node status.
Images: []v1.ContainerImage{
{
Names: []string{"gcr.io/google_containers:v3", "gcr.io/google_containers:v4"},
Names: []string{"k8s.gcr.io:v3", "k8s.gcr.io:v4"},
SizeBytes: 456,
},
{
Names: []string{"gcr.io/google_containers:v1", "gcr.io/google_containers:v2"},
Names: []string{"k8s.gcr.io:v1", "k8s.gcr.io:v2"},
SizeBytes: 123,
},
},

View File

@ -82,7 +82,7 @@ func run() error {
flag.StringVar(&tlsKey, "tls-key", tlsKey, "Path to a file containing the private key for etcd server")
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
flags.StringVar(&dnsProviderID, "dns", "aws-route53", "DNS provider we should use (aws-route53, google-clouddns, coredns)")
flags.StringVar(&etcdImageSource, "etcd-image", "gcr.io/google_containers/etcd:2.2.1", "Etcd Source Container Registry")
flags.StringVar(&etcdImageSource, "etcd-image", "k8s.gcr.io/etcd:2.2.1", "Etcd Source Container Registry")
flags.StringVar(&etcdElectionTimeout, "etcd-election-timeout", etcdElectionTimeout, "time in ms for an election to timeout")
flags.StringVar(&etcdHeartbeatInterval, "etcd-heartbeat-interval", etcdHeartbeatInterval, "time in ms of a heartbeat interval")
flags.StringVar(&gossipSecret, "gossip-secret", gossipSecret, "Secret to use to secure gossip")

View File

@ -3,7 +3,7 @@ clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
dataDirName: data-main
imageSource: gcr.io/google_containers/etcd:2.2.1
imageSource: k8s.gcr.io/etcd:2.2.1
logFile: /var/log/etcd.log
peerPort: 2380
podName: etcd-server-main
@ -62,7 +62,7 @@ spec:
value: "100"
- name: ETCD_INITIAL_CLUSTER
value: node0=http://node0.internal:2380,node1=http://node1.internal:2380,node2=http://node2.internal:2380
image: gcr.io/google_containers/etcd:2.2.1
image: k8s.gcr.io/etcd:2.2.1
livenessProbe:
httpGet:
host: 127.0.0.1

View File

@ -3,7 +3,7 @@ clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
dataDirName: data-main
imageSource: gcr.io/google_containers/etcd:2.2.1
imageSource: k8s.gcr.io/etcd:2.2.1
logFile: /var/log/etcd.log
peerPort: 2380
podName: etcd-server-main
@ -56,7 +56,7 @@ spec:
value: token-main
- name: ETCD_INITIAL_CLUSTER
value: node0=http://node0.internal:2380,node1=http://node1.internal:2380,node2=http://node2.internal:2380
image: gcr.io/google_containers/etcd:2.2.1
image: k8s.gcr.io/etcd:2.2.1
livenessProbe:
httpGet:
host: 127.0.0.1

View File

@ -6,7 +6,7 @@ clusterName: etcd-main
clusterToken: token-main
cpuRequest: "200m"
dataDirName: data-main
imageSource: gcr.io/google_containers/etcd:2.2.1
imageSource: k8s.gcr.io/etcd:2.2.1
logFile: /var/log/etcd.log
peerCA: /srv/kubernetes/ca.crt
peerCert: /srv/kubernetes/etcd.pem
@ -74,7 +74,7 @@ spec:
value: /srv/kubernetes/etcd-key.pem
- name: ETCD_INITIAL_CLUSTER
value: node0=https://node0.internal:2380,node1=https://node1.internal:2380,node2=https://node2.internal:2380
image: gcr.io/google_containers/etcd:2.2.1
image: k8s.gcr.io/etcd:2.2.1
livenessProbe:
initialDelaySeconds: 15
tcpSocket:

View File

@ -179,7 +179,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersadditionaluserda
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: gcr.io/google_containers/kube-apiserver:v1.4.12
image: k8s.gcr.io/kube-apiserver:v1.4.12
insecurePort: 8080
logLevel: 2
securePort: 443
@ -192,7 +192,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersadditionaluserda
clusterCIDR: 100.96.0.0/11
clusterName: additionaluserdata.example.com
configureCloudRoutes: true
image: gcr.io/google_containers/kube-controller-manager:v1.4.12
image: k8s.gcr.io/kube-controller-manager:v1.4.12
leaderElection:
leaderElect: true
logLevel: 2
@ -202,10 +202,10 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersadditionaluserda
cpuRequest: 100m
featureGates: null
hostnameOverride: '@aws'
image: gcr.io/google_containers/kube-proxy:v1.4.12
image: k8s.gcr.io/kube-proxy:v1.4.12
logLevel: 2
kubeScheduler:
image: gcr.io/google_containers/kube-scheduler:v1.4.12
image: k8s.gcr.io/kube-scheduler:v1.4.12
leaderElection:
leaderElect: true
logLevel: 2
@ -225,7 +225,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersadditionaluserda
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
masterKubelet:
@ -244,7 +244,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersadditionaluserda
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 10.123.45.0/28
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
registerSchedulable: false
@ -457,7 +457,7 @@ Resources.AWSAutoScalingLaunchConfigurationnodesadditionaluserdataexamplecom.Pro
cpuRequest: 100m
featureGates: null
hostnameOverride: '@aws'
image: gcr.io/google_containers/kube-proxy:v1.4.12
image: k8s.gcr.io/kube-proxy:v1.4.12
logLevel: 2
kubelet:
allowPrivileged: true
@ -474,7 +474,7 @@ Resources.AWSAutoScalingLaunchConfigurationnodesadditionaluserdataexamplecom.Pro
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true

View File

@ -170,7 +170,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexampleco
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: gcr.io/google_containers/kube-apiserver:v1.4.12
image: k8s.gcr.io/kube-apiserver:v1.4.12
insecurePort: 8080
logLevel: 2
securePort: 443
@ -183,7 +183,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexampleco
clusterCIDR: 100.96.0.0/11
clusterName: minimal.example.com
configureCloudRoutes: true
image: gcr.io/google_containers/kube-controller-manager:v1.4.12
image: k8s.gcr.io/kube-controller-manager:v1.4.12
leaderElection:
leaderElect: true
logLevel: 2
@ -193,10 +193,10 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexampleco
cpuRequest: 100m
featureGates: null
hostnameOverride: '@aws'
image: gcr.io/google_containers/kube-proxy:v1.4.12
image: k8s.gcr.io/kube-proxy:v1.4.12
logLevel: 2
kubeScheduler:
image: gcr.io/google_containers/kube-scheduler:v1.4.12
image: k8s.gcr.io/kube-scheduler:v1.4.12
leaderElection:
leaderElect: true
logLevel: 2
@ -216,7 +216,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexampleco
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
masterKubelet:
@ -235,7 +235,7 @@ Resources.AWSAutoScalingLaunchConfigurationmasterustest1amastersminimalexampleco
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podCIDR: 10.123.45.0/28
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true
registerSchedulable: false
@ -427,7 +427,7 @@ Resources.AWSAutoScalingLaunchConfigurationnodesminimalexamplecom.Properties.Use
cpuRequest: 100m
featureGates: null
hostnameOverride: '@aws'
image: gcr.io/google_containers/kube-proxy:v1.4.12
image: k8s.gcr.io/kube-proxy:v1.4.12
logLevel: 2
kubelet:
allowPrivileged: true
@ -444,7 +444,7 @@ Resources.AWSAutoScalingLaunchConfigurationnodesminimalexamplecom.Properties.Use
networkPluginMTU: 9001
networkPluginName: kubenet
nonMasqueradeCIDR: 100.64.0.0/10
podInfraContainerImage: gcr.io/google_containers/pause-amd64:3.0
podInfraContainerImage: k8s.gcr.io/pause-amd64:3.0
podManifestPath: /etc/kubernetes/manifests
reconcileCIDR: true

View File

@ -116,9 +116,9 @@ spec:
serviceAccountName: cloud-controller-manager
containers:
- name: cloud-controller-manager
# for in-tree providers we use gcr.io/google_containers/cloud-controller-manager
# for in-tree providers we use k8s.gcr.io/cloud-controller-manager
# this can be replaced with any other image for out-of-tree providers
image: gcr.io/google_containers/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work?
image: k8s.gcr.io/cloud-controller-manager:v{{ .KubernetesVersion }} # Reviewers: Will this work?
command:
- /usr/local/bin/cloud-controller-manager
- --cloud-provider={{ .CloudProvider }}

View File

@ -33,7 +33,7 @@ spec:
spec:
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-{{Arch}}:1.1.2-r2
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.1.2-r2
resources:
requests:
cpu: "20m"
@ -96,7 +96,7 @@ spec:
containers:
- name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-{{Arch}}:1.14.5
image: k8s.gcr.io/k8s-dns-kube-dns-{{Arch}}:1.14.5
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -148,7 +148,7 @@ spec:
mountPath: /kube-dns-config
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-{{Arch}}:1.14.5
image: k8s.gcr.io/k8s-dns-dnsmasq-nanny-{{Arch}}:1.14.5
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -187,7 +187,7 @@ spec:
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
image: k8s.gcr.io/k8s-dns-sidecar-amd64:1.14.5
livenessProbe:
httpGet:
path: /metrics

View File

@ -32,7 +32,7 @@ spec:
spec:
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-{{Arch}}:1.0.0
image: k8s.gcr.io/cluster-proportional-autoscaler-{{Arch}}:1.0.0
resources:
requests:
cpu: "20m"
@ -83,7 +83,7 @@ spec:
spec:
containers:
- name: kubedns
image: gcr.io/google_containers/kubedns-{{Arch}}:1.9
image: k8s.gcr.io/kubedns-{{Arch}}:1.9
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -131,7 +131,7 @@ spec:
name: metrics
protocol: TCP
- name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-{{Arch}}:1.14.5
image: k8s.gcr.io/k8s-dns-dnsmasq-{{Arch}}:1.14.5
livenessProbe:
httpGet:
path: /healthz-dnsmasq
@ -159,7 +159,7 @@ spec:
cpu: 150m
memory: 10Mi
- name: dnsmasq-metrics
image: gcr.io/google_containers/dnsmasq-metrics-{{Arch}}:1.0
image: k8s.gcr.io/dnsmasq-metrics-{{Arch}}:1.0
livenessProbe:
httpGet:
path: /metrics
@ -180,7 +180,7 @@ spec:
requests:
memory: 10Mi
- name: healthz
image: gcr.io/google_containers/exechealthz-{{Arch}}:1.2
image: k8s.gcr.io/exechealthz-{{Arch}}:1.2
resources:
limits:
memory: 50Mi

View File

@ -97,7 +97,7 @@ func (d *dockerAPI) pullImage(name string) error {
for scanner.Scan() {
// {"status":"Already exists","progressDetail":{},"id":"a3ed95caeb02"}
// {"status":"Status: Image is up to date for gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0"}
// {"status":"Status: Image is up to date for k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0"}
glog.Infof("docker pull %s", scanner.Text())
}