Merge pull request #11184 from cloudnatix/kenji/gcp

Add GCE Router task
This commit is contained in:
Kubernetes Prow Robot 2021-04-24 00:37:15 -07:00 committed by GitHub
commit 0d9e2e7bb4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1398 additions and 21 deletions

View File

@ -152,6 +152,11 @@ func TestMinimalGCE(t *testing.T) {
newIntegrationTest("minimal-gce.example.com", "minimal_gce").runTestTerraformGCE(t)
}
// TestMinimalGCE runs tests on a minimal GCE configuration with private topology.
func TestMinimalGCEPrivate(t *testing.T) {
newIntegrationTest("minimal-gce-private.example.com", "minimal_gce_private").runTestTerraformGCE(t)
}
// TestHA runs the test on a simple HA configuration, similar to kops create cluster minimal.example.com --zones us-west-1a,us-west-1b,us-west-1c --master-count=3
func TestHA(t *testing.T) {
newIntegrationTest("ha.example.com", "ha").withZones(3).runTestTerraformAWS(t)

View File

@ -87,6 +87,8 @@ func (b *AutoscalingGroupModelBuilder) buildInstanceTemplate(c *fi.ModelBuilderC
// TODO: Support preemptible nodes?
Preemptible: fi.Bool(false),
HasExternalIP: fi.Bool(b.Cluster.Spec.Topology.Masters == kops.TopologyPublic),
Scopes: []string{
"compute-rw",
"monitoring",

View File

@ -19,6 +19,7 @@ package gcemodel
import (
"fmt"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
@ -64,7 +65,30 @@ func (b *NetworkModelBuilder) Build(c *fi.ModelBuilderContext) error {
t.GCEName = t.Name
c.AddTask(t)
}
// Create a CloudNAT for private topology.
if b.Cluster.Spec.Topology.Masters == kops.TopologyPrivate {
var hasPrivateSubnet bool
for _, subnet := range b.Cluster.Spec.Subnets {
if subnet.Type == kops.SubnetTypePrivate {
hasPrivateSubnet = true
break
}
}
if hasPrivateSubnet {
r := &gcetasks.Router{
Name: s(b.SafeObjectName("nat")),
Lifecycle: b.Lifecycle,
Network: s(b.LinkToNetwork().URL(b.Cluster.Spec.Project)),
Region: s(b.Region),
NATIPAllocationOption: s(gcetasks.NATIPAllocationOptionAutoOnly),
SourceSubnetworkIPRangesToNAT: s(gcetasks.SourceSubnetworkIPRangesAll),
}
c.AddTask(r)
}
}
return nil
}

View File

@ -44,6 +44,7 @@ const (
typeAddress = "Address"
typeRoute = "Route"
typeSubnet = "Subnet"
typeRouter = "Router"
typeDNSRecord = "DNSRecord"
)
@ -96,6 +97,7 @@ func ListResourcesGCE(gceCloud gce.GCECloud, clusterName string, region string)
// TODO: Find routes via instances (via instance groups)
d.listAddresses,
d.listSubnets,
d.listRouters,
}
for _, fn := range listFunctions {
resourceTrackers, err := fn()
@ -774,6 +776,61 @@ func deleteSubnet(cloud fi.Cloud, r *resources.Resource) error {
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) listRouters() ([]*resources.Resource, error) {
c := d.gceCloud
var resourceTrackers []*resources.Resource
ctx := context.Background()
err := c.Compute().Routers.List(c.Project(), c.Region()).Pages(ctx, func(page *compute.RouterList) error {
for _, o := range page.Items {
if !d.matchesClusterName(o.Name) {
klog.V(8).Infof("skipping Router with name %q", o.Name)
continue
}
resourceTracker := &resources.Resource{
Name: o.Name,
ID: o.Name,
Type: typeRouter,
Deleter: deleteRouter,
Obj: o,
}
klog.V(4).Infof("found resource: %s", o.SelfLink)
resourceTrackers = append(resourceTrackers, resourceTracker)
}
return nil
})
if err != nil {
return nil, fmt.Errorf("error listing routers: %v", err)
}
return resourceTrackers, nil
}
func deleteRouter(cloud fi.Cloud, r *resources.Resource) error {
c := cloud.(gce.GCECloud)
o := r.Obj.(*compute.Router)
klog.V(2).Infof("deleting GCE router %s", o.SelfLink)
u, err := gce.ParseGoogleCloudURL(o.SelfLink)
if err != nil {
return err
}
op, err := c.Compute().Routers.Delete(u.Project, u.Region, u.Name).Do()
if err != nil {
if gce.IsNotFound(err) {
klog.Infof("router not found, assuming deleted: %q", o.SelfLink)
return nil
}
return fmt.Errorf("error deleting router %s: %v", o.SelfLink, err)
}
return c.WaitForOp(op)
}
func (d *clusterDiscoveryGCE) matchesClusterName(name string) bool {
return d.matchesClusterNameMultipart(name, 1)
}

View File

@ -0,0 +1 @@
admin: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,334 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
gceServiceAccount: default
manageStorageClasses: true
multizone: true
nodeTags: minimal-gce-private-example-com-k8s-io-role-node
containerRuntime: containerd
containerd:
configOverride: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
logLevel: info
version: 1.4.4
docker:
skipInstall: true
encryptionConfig: null
etcdClusters:
events:
version: 3.4.13
main:
version: 3.4.13
kubeAPIServer:
allowPrivileged: true
anonymousAuth: false
apiAudiences:
- kubernetes.svc.default
apiServerCount: 1
authorizationMode: AlwaysAllow
bindAddress: 0.0.0.0
cloudProvider: gce
enableAdmissionPlugins:
- NamespaceLifecycle
- LimitRanger
- ServiceAccount
- PersistentVolumeLabel
- DefaultStorageClass
- DefaultTolerationSeconds
- MutatingAdmissionWebhook
- ValidatingAdmissionWebhook
- NodeRestriction
- ResourceQuota
etcdServers:
- http://127.0.0.1:4001
etcdServersOverrides:
- /events#http://127.0.0.1:4002
image: k8s.gcr.io/kube-apiserver:v1.21.0
kubeletPreferredAddressTypes:
- InternalIP
- Hostname
- ExternalIP
logLevel: 2
requestheaderAllowedNames:
- aggregator
requestheaderExtraHeaderPrefixes:
- X-Remote-Extra-
requestheaderGroupHeaders:
- X-Remote-Group
requestheaderUsernameHeaders:
- X-Remote-User
securePort: 443
serviceAccountIssuer: https://api.internal.minimal-gce-private.example.com
serviceAccountJWKSURI: https://api.internal.minimal-gce-private.example.com/openid/v1/jwks
serviceClusterIPRange: 100.64.0.0/13
storageBackend: etcd3
kubeControllerManager:
allocateNodeCIDRs: true
attachDetachReconcileSyncPeriod: 1m0s
cloudProvider: gce
clusterCIDR: 100.96.0.0/11
clusterName: minimal-gce-private-example-com
configureCloudRoutes: false
image: k8s.gcr.io/kube-controller-manager:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
useServiceAccountCredentials: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubeScheduler:
image: k8s.gcr.io/kube-scheduler:v1.21.0
leaderElection:
leaderElect: true
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: gce
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
hostnameOverride: '@gce'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
masterKubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: gce
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
hostnameOverride: '@gce'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- e4efdc6e7648078fbc35cb0e8855b57fa194087fe191338f820cfeda7f471f6a@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/mounter
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 96641849cb78a0a119223a427dfdc1ade88412ef791a14193212c8c8e29d447b@https://github.com/containerd/containerd/releases/download/v1.4.4/cri-containerd-cni-1.4.4-linux-amd64.tar.gz
- f90ed6dcef534e6d1ae17907dc7eb40614b8945ad4af7f0e98d2be7cde8165c6@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/protokube
- 9992e7eb2a2e93f799e5a9e98eb718637433524bc65f630357201a79f49b13d0@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/channels
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- 50c7e22cfbc3dbb4dde80840645c1482259ab25a13cfe821c7380446e6997e54@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/mounter
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 998b3b6669335f1a1d8c475fb7c211ed1e41c2ff37275939e2523666ccb7d910@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.6.tgz
- 2f599c3d54f4c4bdbcc95aaf0c7b513a845d8f9503ec5b34c9f86aa1bc34fc0c@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/protokube,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/protokube-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/protokube
- 9d842e3636a95de2315cdea2be7a282355aac0658ef0b86d5dc2449066538f13@https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/channels,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/channels-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/channels
ClusterName: minimal-gce-private.example.com
ConfigBase: memfs://tests/minimal-gce-private.example.com
InstanceGroupName: master-us-test1-a
InstanceGroupRole: Master
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: gce
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
hostnameOverride: '@gce'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kops.k8s.io/kops-controller-pki: ""
kubernetes.io/role: master
node-role.kubernetes.io/control-plane: ""
node-role.kubernetes.io/master: ""
node.kubernetes.io/exclude-from-external-load-balancers: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
registerSchedulable: false
channels:
- memfs://tests/minimal-gce-private.example.com/addons/bootstrap-channel.yaml
etcdManifests:
- memfs://tests/minimal-gce-private.example.com/manifests/etcd/main.yaml
- memfs://tests/minimal-gce-private.example.com/manifests/etcd/events.yaml
staticManifests:
- key: kube-apiserver-healthcheck
path: manifests/static/kube-apiserver-healthcheck.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
admin: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,237 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
NODEUP_URL_AMD64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/amd64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-amd64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/amd64/nodeup
NODEUP_HASH_AMD64=585fbda0f0a43184656b4bfc0cc5f0c0b85612faf43b8816acca1f99d422c924
NODEUP_URL_ARM64=https://artifacts.k8s.io/binaries/kops/1.21.0-alpha.1/linux/arm64/nodeup,https://github.com/kubernetes/kops/releases/download/v1.21.0-alpha.1/nodeup-linux-arm64,https://kubeupv2.s3.amazonaws.com/kops/1.21.0-alpha.1/linux/arm64/nodeup
NODEUP_HASH_ARM64=7603675379699105a9b9915ff97718ea99b1bbb01a4c184e2f827c8a96e8e865
sysctl -w net.ipv4.tcp_rmem='4096 12582912 16777216' || true
function ensure-install-dir() {
INSTALL_DIR="/opt/kops"
# On ContainerOS, we install under /var/lib/toolbox; /opt is ro and noexec
if [[ -d /var/lib/toolbox ]]; then
INSTALL_DIR="/var/lib/toolbox/kops"
fi
mkdir -p ${INSTALL_DIR}/bin
mkdir -p ${INSTALL_DIR}/conf
cd ${INSTALL_DIR}
}
# Retry a download until we get it. args: name, sha, url1, url2...
download-or-bust() {
local -r file="$1"
local -r hash="$2"
shift 2
urls=( $* )
while true; do
for url in "${urls[@]}"; do
commands=(
"curl -f --ipv4 --compressed -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only --compression=auto -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
"curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10"
"wget --inet4-only -O "${file}" --connect-timeout=20 --tries=6 --wait=10"
)
for cmd in "${commands[@]}"; do
echo "Attempting download with: ${cmd} {url}"
if ! (${cmd} "${url}"); then
echo "== Download failed with ${cmd} =="
continue
fi
if [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then
echo "== Hash validation of ${url} failed. Retrying. =="
rm -f "${file}"
else
if [[ -n "${hash}" ]]; then
echo "== Downloaded ${url} (SHA1 = ${hash}) =="
else
echo "== Downloaded ${url} =="
fi
return
fi
done
done
echo "All downloads failed; sleeping before retrying"
sleep 60
done
}
validate-hash() {
local -r file="$1"
local -r expected="$2"
local actual
actual=$(sha256sum ${file} | awk '{ print $1 }') || true
if [[ "${actual}" != "${expected}" ]]; then
echo "== ${file} corrupted, hash ${actual} doesn't match expected ${expected} =="
return 1
fi
}
function split-commas() {
echo $1 | tr "," "\n"
}
function try-download-release() {
local -r nodeup_urls=( $(split-commas "${NODEUP_URL}") )
if [[ -n "${NODEUP_HASH:-}" ]]; then
local -r nodeup_hash="${NODEUP_HASH}"
else
# TODO: Remove?
echo "Downloading sha256 (not found in env)"
download-or-bust nodeup.sha256 "" "${nodeup_urls[@]/%/.sha256}"
local -r nodeup_hash=$(cat nodeup.sha256)
fi
echo "Downloading nodeup (${nodeup_urls[@]})"
download-or-bust nodeup "${nodeup_hash}" "${nodeup_urls[@]}"
chmod +x nodeup
}
function download-release() {
case "$(uname -m)" in
x86_64*|i?86_64*|amd64*)
NODEUP_URL="${NODEUP_URL_AMD64}"
NODEUP_HASH="${NODEUP_HASH_AMD64}"
;;
aarch64*|arm64*)
NODEUP_URL="${NODEUP_URL_ARM64}"
NODEUP_HASH="${NODEUP_HASH_ARM64}"
;;
*)
echo "Unsupported host arch: $(uname -m)" >&2
exit 1
;;
esac
# In case of failure checking integrity of release, retry.
cd ${INSTALL_DIR}/bin
until try-download-release; do
sleep 15
echo "Couldn't download release. Retrying..."
done
echo "Running nodeup"
# We can't run in the foreground because of https://github.com/docker/docker/issues/23793
( cd ${INSTALL_DIR}/bin; ./nodeup --install-systemd-unit --conf=${INSTALL_DIR}/conf/kube_env.yaml --v=8 )
}
####################################################################################
/bin/systemd-machine-id-setup || echo "failed to set up ensure machine-id configured"
echo "== nodeup node config starting =="
ensure-install-dir
cat > conf/cluster_spec.yaml << '__EOF_CLUSTER_SPEC'
cloudConfig:
gceServiceAccount: default
manageStorageClasses: true
multizone: true
nodeTags: minimal-gce-private-example-com-k8s-io-role-node
containerRuntime: containerd
containerd:
configOverride: |
version = 2
[plugins]
[plugins."io.containerd.grpc.v1.cri"]
[plugins."io.containerd.grpc.v1.cri".containerd]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
runtime_type = "io.containerd.runc.v2"
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
SystemdCgroup = true
logLevel: info
version: 1.4.4
docker:
skipInstall: true
kubeProxy:
clusterCIDR: 100.96.0.0/11
cpuRequest: 100m
image: k8s.gcr.io/kube-proxy:v1.21.0
logLevel: 2
kubelet:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: gce
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
hostnameOverride: '@gce'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
__EOF_CLUSTER_SPEC
cat > conf/ig_spec.yaml << '__EOF_IG_SPEC'
{}
__EOF_IG_SPEC
cat > conf/kube_env.yaml << '__EOF_KUBE_ENV'
Assets:
amd64:
- 681c81b7934ae2bf38b9f12d891683972d1fbbf6d7d97e50940a47b139d41b35@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubelet
- 9f74f2fa7ee32ad07e17211725992248470310ca1988214518806b39b1dad9f0@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/kubectl
- e4efdc6e7648078fbc35cb0e8855b57fa194087fe191338f820cfeda7f471f6a@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/amd64/mounter
- 977824932d5667c7a37aa6a3cbba40100a6873e7bd97e83e8be837e3e7afd0a8@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz
- 96641849cb78a0a119223a427dfdc1ade88412ef791a14193212c8c8e29d447b@https://github.com/containerd/containerd/releases/download/v1.4.4/cri-containerd-cni-1.4.4-linux-amd64.tar.gz
arm64:
- 17832b192be5ea314714f7e16efd5e5f65347974bbbf41def6b02f68931380c4@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubelet
- a4dd7100f547a40d3e2f83850d0bab75c6ea5eb553f0a80adcf73155bef1fd0d@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/kubectl
- 50c7e22cfbc3dbb4dde80840645c1482259ab25a13cfe821c7380446e6997e54@https://storage.googleapis.com/kubernetes-release/release/v1.21.0/bin/linux/arm64/mounter
- ae13d7b5c05bd180ea9b5b68f44bdaa7bfb41034a2ef1d68fd8e1259797d642f@https://storage.googleapis.com/k8s-artifacts-cni/release/v0.8.7/cni-plugins-linux-arm64-v0.8.7.tgz
- 998b3b6669335f1a1d8c475fb7c211ed1e41c2ff37275939e2523666ccb7d910@https://download.docker.com/linux/static/stable/aarch64/docker-20.10.6.tgz
ClusterName: minimal-gce-private.example.com
ConfigBase: memfs://tests/minimal-gce-private.example.com
InstanceGroupName: nodes
InstanceGroupRole: Node
KubeletConfig:
anonymousAuth: false
cgroupDriver: systemd
cgroupRoot: /
cloudProvider: gce
clusterDNS: 100.64.0.10
clusterDomain: cluster.local
enableDebuggingHandlers: true
evictionHard: memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%,imagefs.available<10%,imagefs.inodesFree<5%
hairpinMode: promiscuous-bridge
hostnameOverride: '@gce'
kubeconfigPath: /var/lib/kubelet/kubeconfig
logLevel: 2
networkPluginName: cni
nodeLabels:
kubernetes.io/role: node
node-role.kubernetes.io/node: ""
nonMasqueradeCIDR: 100.64.0.0/10
podManifestPath: /etc/kubernetes/manifests
channels:
- memfs://tests/minimal-gce-private.example.com/addons/bootstrap-channel.yaml
__EOF_KUBE_ENV
download-release
echo "== nodeup node config done =="

View File

@ -0,0 +1 @@
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCtWu40XQo8dczLsCq0OWV+hxm9uV3WxeH9Kgh4sMzQxNtoU1pvW0XdjpkBesRKGoolfWeCLXWxpyQb1IaiMkKoz7MdhQ/6UKjMjP66aFWWp3pwD0uj0HuJ7tq4gKHKRYGTaZIRWpzUiANBrjugVgA+Sd7E/mYwc/DMXkIyRZbvhQ==

View File

@ -0,0 +1,86 @@
apiVersion: kops.k8s.io/v1alpha2
kind: Cluster
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
name: minimal-gce-private.example.com
spec:
api:
dns: {}
authorization:
alwaysAllow: {}
channel: stable
cloudProvider: gce
configBase: memfs://tests/minimal-gce-private.example.com
etcdClusters:
- etcdMembers:
- instanceGroup: master-us-test1-a
name: "1"
name: main
- etcdMembers:
- instanceGroup: master-us-test1-a
name: "1"
name: events
gceServiceAccount: default
iam:
legacy: false
kubelet:
anonymousAuth: false
kubernetesApiAccess:
- 0.0.0.0/0
kubernetesVersion: v1.21.0
masterPublicName: api.minimal-gce-private.example.com
networking:
cni: {}
nonMasqueradeCIDR: 100.64.0.0/10
project: testproject
sshAccess:
- 0.0.0.0/0
subnets:
- name: us-test1
region: us-test1
type: Private
topology:
dns:
type: Public
masters: private
nodes: private
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal-gce-private.example.com
name: master-us-test1-a
spec:
image: cos-cloud/cos-stable-57-9202-64-0
machineType: n1-standard-1
maxSize: 1
minSize: 1
role: Master
subnets:
- us-test1
zones:
- us-test1-a
---
apiVersion: kops.k8s.io/v1alpha2
kind: InstanceGroup
metadata:
creationTimestamp: "2017-01-01T00:00:00Z"
labels:
kops.k8s.io/cluster: minimal-gce-private.example.com
name: nodes
spec:
image: cos-cloud/cos-stable-57-9202-64-0
machineType: n1-standard-2
maxSize: 2
minSize: 2
role: Node
subnets:
- us-test1
zones:
- us-test1-a

View File

@ -0,0 +1,347 @@
locals {
cluster_name = "minimal-gce-private.example.com"
project = "us-test1"
region = "us-test1"
}
output "cluster_name" {
value = "minimal-gce-private.example.com"
}
output "project" {
value = "us-test1"
}
output "region" {
value = "us-test1"
}
provider "google" {
region = "us-test1"
}
resource "google_compute_disk" "d1-etcd-events-minimal-gce-private-example-com" {
labels = {
"k8s-io-cluster-name" = "minimal-gce-private-example-com"
"k8s-io-etcd-events" = "1-2f1"
"k8s-io-role-master" = "master"
}
name = "d1-etcd-events-minimal-gce-private-example-com"
size = 20
type = "pd-ssd"
zone = "us-test1-a"
}
resource "google_compute_disk" "d1-etcd-main-minimal-gce-private-example-com" {
labels = {
"k8s-io-cluster-name" = "minimal-gce-private-example-com"
"k8s-io-etcd-main" = "1-2f1"
"k8s-io-role-master" = "master"
}
name = "d1-etcd-main-minimal-gce-private-example-com"
size = 20
type = "pd-ssd"
zone = "us-test1-a"
}
resource "google_compute_firewall" "cidr-to-master-minimal-gce-private-example-com" {
allow {
ports = ["443"]
protocol = "tcp"
}
allow {
ports = ["4194"]
protocol = "tcp"
}
name = "cidr-to-master-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_ranges = ["100.64.0.0/10"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "cidr-to-node-minimal-gce-private-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
name = "cidr-to-node-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_ranges = ["100.64.0.0/10"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "kubernetes-master-https-minimal-gce-private-example-com" {
allow {
ports = ["443"]
protocol = "tcp"
}
name = "kubernetes-master-https-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "master-to-master-minimal-gce-private-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
name = "master-to-master-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "master-to-node-minimal-gce-private-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
name = "master-to-node-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "node-to-master-minimal-gce-private-example-com" {
allow {
ports = ["443"]
protocol = "tcp"
}
allow {
ports = ["4194"]
protocol = "tcp"
}
name = "node-to-master-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "node-to-node-minimal-gce-private-example-com" {
allow {
protocol = "tcp"
}
allow {
protocol = "udp"
}
allow {
protocol = "icmp"
}
allow {
protocol = "esp"
}
allow {
protocol = "ah"
}
allow {
protocol = "sctp"
}
name = "node-to-node-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "nodeport-external-to-node-minimal-gce-private-example-com" {
allow {
ports = ["30000-32767"]
protocol = "tcp"
}
allow {
ports = ["30000-32767"]
protocol = "udp"
}
name = "nodeport-external-to-node-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
}
resource "google_compute_firewall" "ssh-external-to-master-minimal-gce-private-example-com" {
allow {
ports = ["22"]
protocol = "tcp"
}
name = "ssh-external-to-master-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
}
resource "google_compute_firewall" "ssh-external-to-node-minimal-gce-private-example-com" {
allow {
ports = ["22"]
protocol = "tcp"
}
name = "ssh-external-to-node-minimal-gce-private-example-com"
network = google_compute_network.default.name
source_ranges = ["0.0.0.0/0"]
target_tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
}
resource "google_compute_instance_group_manager" "a-master-us-test1-a-minimal-gce-private-example-com" {
base_instance_name = "master-us-test1-a"
name = "a-master-us-test1-a-minimal-gce-private-example-com"
target_size = 1
version {
instance_template = google_compute_instance_template.master-us-test1-a-minimal-gce-private-example-com.self_link
}
zone = "us-test1-a"
}
resource "google_compute_instance_group_manager" "a-nodes-minimal-gce-private-example-com" {
base_instance_name = "nodes"
name = "a-nodes-minimal-gce-private-example-com"
target_size = 2
version {
instance_template = google_compute_instance_template.nodes-minimal-gce-private-example-com.self_link
}
zone = "us-test1-a"
}
resource "google_compute_instance_template" "master-us-test1-a-minimal-gce-private-example-com" {
can_ip_forward = true
disk {
auto_delete = true
boot = true
device_name = "persistent-disks-0"
disk_name = ""
disk_size_gb = 64
disk_type = "pd-standard"
interface = ""
mode = "READ_WRITE"
source = ""
source_image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-57-9202-64-0"
type = "PERSISTENT"
}
machine_type = "n1-standard-1"
metadata = {
"cluster-name" = "minimal-gce-private.example.com"
"kops-k8s-io-instance-group-name" = "master-us-test1-a"
"ssh-keys" = file("${path.module}/data/google_compute_instance_template_master-us-test1-a-minimal-gce-private-example-com_metadata_ssh-keys")
"startup-script" = file("${path.module}/data/google_compute_instance_template_master-us-test1-a-minimal-gce-private-example-com_metadata_startup-script")
}
name_prefix = "master-us-test1-a-minimal-asf34c-"
network_interface {
network = google_compute_network.default.name
}
scheduling {
automatic_restart = true
on_host_maintenance = "MIGRATE"
preemptible = false
}
service_account {
email = "default"
scopes = ["https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/devstorage.read_write", "https://www.googleapis.com/auth/ndev.clouddns.readwrite"]
}
tags = ["minimal-gce-private-example-com-k8s-io-role-master"]
}
resource "google_compute_instance_template" "nodes-minimal-gce-private-example-com" {
can_ip_forward = true
disk {
auto_delete = true
boot = true
device_name = "persistent-disks-0"
disk_name = ""
disk_size_gb = 128
disk_type = "pd-standard"
interface = ""
mode = "READ_WRITE"
source = ""
source_image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-57-9202-64-0"
type = "PERSISTENT"
}
machine_type = "n1-standard-2"
metadata = {
"cluster-name" = "minimal-gce-private.example.com"
"kops-k8s-io-instance-group-name" = "nodes"
"ssh-keys" = file("${path.module}/data/google_compute_instance_template_nodes-minimal-gce-private-example-com_metadata_ssh-keys")
"startup-script" = file("${path.module}/data/google_compute_instance_template_nodes-minimal-gce-private-example-com_metadata_startup-script")
}
name_prefix = "nodes-minimal-gce-private-4aopo5-"
network_interface {
network = google_compute_network.default.name
}
scheduling {
automatic_restart = true
on_host_maintenance = "MIGRATE"
preemptible = false
}
service_account {
email = "default"
scopes = ["https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/monitoring", "https://www.googleapis.com/auth/logging.write", "https://www.googleapis.com/auth/devstorage.read_only"]
}
tags = ["minimal-gce-private-example-com-k8s-io-role-node"]
}
resource "google_compute_network" "default" {
auto_create_subnetworks = true
name = "default"
}
resource "google_compute_router_nat" "nat-minimal-gce-private-example-com" {
name = "nat-minimal-gce-private-example-com"
nat_ip_allocate_option = "AUTO_ONLY"
region = "us-test1"
router = "nat-minimal-gce-private-example-com"
source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES"
}
resource "google_compute_router" "nat-minimal-gce-private-example-com" {
name = "nat-minimal-gce-private-example-com"
network = "https://www.googleapis.com/compute/v1/projects/testproject/global/networks/default"
}
terraform {
required_version = ">= 0.12.26"
required_providers {
google = {
"source" = "hashicorp/google"
"version" = ">= 2.19.0"
}
}
}

View File

@ -20,6 +20,8 @@ go_library(
"instancetemplate_fitask.go",
"network.go",
"network_fitask.go",
"router.go",
"router_fitask.go",
"storagebucketacl.go",
"storagebucketacl_fitask.go",
"storagebucketiam.go",

View File

@ -32,8 +32,12 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
)
const (
// terraform 0.12 with google cloud provider 3.2 will complain if the length of the name_prefix is more than 32
const InstanceTemplateNamePrefixMaxLength = 32
InstanceTemplateNamePrefixMaxLength = 32
accessConfigOneToOneNAT = "ONE_TO_ONE_NAT"
)
// InstanceTemplate represents a GCE InstanceTemplate
// +kops:fitask
@ -64,6 +68,9 @@ type InstanceTemplate struct {
Metadata map[string]fi.Resource
MachineType *string
// HasExternalIP is set to true when an external IP is allocated to an instance.
HasExternalIP *bool
// ID is the actual name
ID *string
}
@ -133,6 +140,17 @@ func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) {
if ni.Subnetwork != "" {
actual.Subnet = &Subnet{Name: fi.String(lastComponent(ni.Subnetwork))}
}
acs := ni.AccessConfigs
if len(acs) > 0 {
if len(acs) != 1 {
return nil, fmt.Errorf("unexpected number of access configs in template %q: %d", *actual.Name, len(acs))
}
if acs[0].Type != accessConfigOneToOneNAT {
return nil, fmt.Errorf("unexpected access type in template %q: %s", *actual.Name, acs[0].Type)
}
actual.HasExternalIP = fi.Bool(true)
}
}
for _, serviceAccount := range p.ServiceAccounts {
@ -249,14 +267,18 @@ func (e *InstanceTemplate) mapToGCE(project string, region string) (*compute.Ins
var networkInterfaces []*compute.NetworkInterface
ni := &compute.NetworkInterface{
Kind: "compute#networkInterface",
AccessConfigs: []*compute.AccessConfig{{
Kind: "compute#accessConfig",
//NatIP: *e.IPAddress.Address,
Type: "ONE_TO_ONE_NAT",
NetworkTier: "PREMIUM",
}},
Network: e.Network.URL(project),
}
if fi.BoolValue(e.HasExternalIP) {
ni.AccessConfigs = []*compute.AccessConfig{
{
Kind: "compute#accessConfig",
Type: accessConfigOneToOneNAT,
NetworkTier: "PREMIUM",
},
}
}
if e.Subnet != nil {
ni.Subnetwork = e.Subnet.URL(project, region)
}

View File

@ -0,0 +1,207 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcetasks
import (
"fmt"
"reflect"
compute "google.golang.org/api/compute/v1"
"k8s.io/klog/v2"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
)
const (
// NATIPAllocationOptionAutoOnly is specified when NAT IPs are allocated by Google Cloud.
NATIPAllocationOptionAutoOnly = "AUTO_ONLY"
// SourceSubnetworkIPRangesAll is specified when all of the IP ranges in every subnetwork are allowed to be NAT-ed.
SourceSubnetworkIPRangesAll = "ALL_SUBNETWORKS_ALL_IP_RANGES"
)
// +kops:fitask
// Router is a Router task.
type Router struct {
Name *string
Lifecycle *fi.Lifecycle
Network *string
Region *string
NATIPAllocationOption *string
SourceSubnetworkIPRangesToNAT *string
}
var _ fi.CompareWithID = &Router{}
// CompareWithID returns the name of the Router.
func (r *Router) CompareWithID() *string {
return r.Name
}
// Find discovers the Router in the cloud provider.
func (r *Router) Find(c *fi.Context) (*Router, error) {
cloud := c.Cloud.(gce.GCECloud)
found, err := cloud.Compute().Routers.Get(cloud.Project(), *r.Region, *r.Name).Do()
if err != nil {
if gce.IsNotFound(err) {
return nil, nil
}
return nil, fmt.Errorf("error listing Routers: %v", err)
}
if len(found.Nats) != 1 {
return nil, fmt.Errorf("unexpected number of nats found: %+v", found.Nats)
}
nat := found.Nats[0]
if a, e := found.SelfLink, r.url(cloud.Project()); a != e {
klog.Warningf("SelfLink did not match URL: %q vs %q", a, e)
}
return &Router{
Name: &found.Name,
Lifecycle: r.Lifecycle,
Network: &found.Network,
Region: &found.Region,
NATIPAllocationOption: &nat.NatIpAllocateOption,
SourceSubnetworkIPRangesToNAT: &nat.SourceSubnetworkIpRangesToNat,
}, nil
}
func (r *Router) url(project string) string {
u := gce.GoogleCloudURL{
Version: "v1",
Project: project,
Name: *r.Name,
Type: "routers",
Region: *r.Region,
}
return u.BuildURL()
}
// Run implements fi.Task.Run.
func (r *Router) Run(c *fi.Context) error {
return fi.DefaultDeltaRunMethod(r, c)
}
// CheckChanges returns an error if a change is not allowed.
func (*Router) CheckChanges(a, e, changes *Router) error {
if a == nil {
// Check if required fields are set when a new resource is created.
if e.Name == nil {
return fi.RequiredField("Name")
}
return nil
}
// Check if unchanegable fields won't be changed.
if changes.Name != nil {
return fi.CannotChangeField("Name")
}
// TODO(kenji): Check more fields.
return nil
}
// RenderGCE creates or updates a Router.
func (*Router) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Router) error {
if a == nil {
klog.V(2).Infof("Creating Cloud NAT Gateway %v", e.Name)
router := &compute.Router{
Name: *e.Name,
Network: *e.Network,
Nats: []*compute.RouterNat{
{
Name: *e.Name,
NatIpAllocateOption: *e.NATIPAllocationOption,
SourceSubnetworkIpRangesToNat: *e.SourceSubnetworkIPRangesToNAT,
},
},
}
if _, err := t.Cloud.Compute().Routers.Insert(t.Cloud.Project(), *e.Region, router).Do(); err != nil {
return fmt.Errorf("error creating Router: %v", err)
}
} else {
if !reflect.DeepEqual(changes, &Router{}) {
return fmt.Errorf("applying changes to Router is unsupported: %s", *e.Name)
}
}
return nil
}
type terraformRouterNat struct {
Name *string `json:"name,omitempty" cty:"name"`
Region *string `json:"region,omitempty" cty:"region"`
Router *string `json:"router,omitempty" cty:"router"`
NATIPAllocateOption *string `json:"nat_ip_allocate_option,omitempty" cty:"nat_ip_allocate_option"`
SourceSubnetworkIPRangesToNat *string `json:"source_subnetwork_ip_ranges_to_nat,omitempty" cty:"source_subnetwork_ip_ranges_to_nat"`
}
type terraformRouter struct {
Name *string `json:"name,omitempty" cty:"name"`
Network *string `json:"network,omitempty" cty:"network"`
Region *string `json:"region,omitempty" cty:"region"`
}
// RenderTerraform renders the Terraform config.
func (*Router) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Router) error {
tr := &terraformRouter{
Name: e.Name,
Network: e.Network,
}
err := t.RenderResource("google_compute_router", *e.Name, tr)
if err != nil {
return err
}
trn := &terraformRouterNat{
Name: e.Name,
Region: e.Region,
Router: e.Name,
NATIPAllocateOption: e.NATIPAllocationOption,
SourceSubnetworkIPRangesToNat: e.SourceSubnetworkIPRangesToNAT,
}
return t.RenderResource("google_compute_router_nat", *e.Name, trn)
}
// TerraformName returns the Terraform name.
func (r *Router) TerraformName() *terraform.Literal {
return terraform.LiteralProperty("google_compute_router_nat", *r.Name, "name")
}

View File

@ -0,0 +1,51 @@
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by fitask. DO NOT EDIT.
package gcetasks
import (
"k8s.io/kops/upup/pkg/fi"
)
// Router
var _ fi.HasLifecycle = &Router{}
// GetLifecycle returns the Lifecycle of the object, implementing fi.HasLifecycle
func (o *Router) GetLifecycle() *fi.Lifecycle {
return o.Lifecycle
}
// SetLifecycle sets the Lifecycle of the object, implementing fi.SetLifecycle
func (o *Router) SetLifecycle(lifecycle fi.Lifecycle) {
o.Lifecycle = &lifecycle
}
var _ fi.HasName = &Router{}
// GetName returns the Name of the object, implementing fi.HasName
func (o *Router) GetName() *string {
return o.Name
}
// String is the stringer function for the task, producing readable output using fi.TaskAsString
func (o *Router) String() string {
return fi.TaskAsString(o)
}