From d4c2cfaae7f900586d87bd468042aa0b112b7c89 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 31 Mar 2016 14:30:23 -0400 Subject: [PATCH] Initial version of upup: cloudup & nodeup * GCE support only * Key and secret generation * "Direct mode" makes API calls * "Dry run mode" previews the changes * Terraform output (though key generation not working for master ip) * cloud-init output (though debian image does not ship with cloud-init) --- upup/.gitignore | 2 + upup/DEVELOP.md | 42 ++ upup/Makefile | 31 + upup/README.md | 51 ++ upup/cmd/cloudup/main.go | 244 ++++++++ upup/cmd/nodeup/main.go | 51 ++ upup/glide.lock | 53 ++ upup/glide.yaml | 16 + upup/models/cloudup/_gce/defaults.options | 53 ++ upup/models/cloudup/_gce/master.yaml | 44 ++ upup/models/cloudup/_gce/network.yaml | 20 + upup/models/cloudup/_gce/nodes.yaml | 48 ++ .../_gce/resources/cloudinit.yaml.template | 1 + .../_gce/resources/cluster-name.template | 1 + .../_gce/resources/config.yaml.template | 39 ++ .../cloudup/_gce/resources/kube-env.template | 150 +++++ .../cloudup/_gce/resources/nodeup.sh.template | 139 +++++ upup/models/cloudup/pki/kubecfg | 3 + upup/models/cloudup/pki/kubelet | 3 + upup/models/cloudup/pki/master | 12 + .../etc/kubernetes/kube-master-addons.sh | 66 +++ .../etc/kubernetes/kube-master-addons.sh.meta | 3 + .../services/kube-master-addons.service | 9 + .../files/srv/kubernetes/ca.crt.template | 1 + .../files/srv/kubernetes/server.cert.template | 1 + .../files/srv/kubernetes/server.key.template | 1 + .../kubernetes/manifests/etcd-events.manifest | 65 +++ .../etc/kubernetes/manifests/etcd.manifest | 65 +++ .../etcd/files/var/log/etcd-events.log | 0 .../etcd/files/var/log/etcd-events.log.meta | 3 + .../etcd/files/var/log/etcd.log | 0 .../etcd/files/var/log/etcd.log.meta | 3 + .../etc/kubernetes/addons/namespace.yaml | 4 + .../files/etc/kubernetes/kube-addon-update.sh | 514 +++++++++++++++++ .../etc/kubernetes/kube-addon-update.sh.meta | 3 + .../files/etc/kubernetes/kube-addons.sh | 125 ++++ .../files/etc/kubernetes/kube-addons.sh.meta | 3 + .../kube-addons/services/kube-addons.service | 9 + .../kube-apiserver.manifest.template | 100 ++++ .../srv/kubernetes/basic_auth.csv.template | 1 + .../kubernetes/basic_auth.csv.template.meta | 3 + .../srv/kubernetes/known_tokens.csv.template | 3 + .../kubernetes/known_tokens.csv.template.meta | 3 + .../files/var/log/kube-apiserver.log | 0 .../files/var/log/kube-apiserver.log.meta | 3 + .../options/_aws/kube-apiserver.aws | 2 + .../options/_gce/kube-apiserver.gce | 2 + .../kube-apiserver/options/kube-apiserver | 17 + .../kube-controller-manager.template | 84 +++ .../files/var/log/kube-controller-manager.log | 0 .../var/log/kube-controller-manager.log.meta | 3 + .../options/_aws/kube-controller-manager.aws | 2 + .../options/_gce/kube-controller-manager.gce | 2 + .../options/kube-controller-manager | 10 + .../addons/dns/skydns-rc.yaml.template | 119 ++++ .../addons/dns/skydns-svc.yaml.template | 20 + .../kube-dns/options/kube-dns | 4 + .../manifests/kube-scheduler.template | 48 ++ .../files/var/log/kube-scheduler.log | 0 .../files/var/log/kube-scheduler.log.meta | 3 + .../kube-scheduler/options/kube-scheduler | 4 + .../etc/kubernetes/kube-node-unpacker.sh | 46 ++ .../etc/kubernetes/kube-node-unpacker.sh.meta | 3 + .../srv/salt/kube-bins/kube-proxy.tar.asset | 2 + .../services/kube-node-unpacker.service | 9 + .../usr/share/google/safe_format_and_mount | 147 +++++ .../share/google/safe_format_and_mount.meta | 3 + .../manifests/kube-proxy.manifest.template | 40 ++ .../var/lib/kube-proxy/kubeconfig.template | 16 + .../lib/kube-proxy/kubeconfig.template.meta | 3 + .../kube-proxy/files/var/log/kube-proxy.log | 0 .../files/var/log/kube-proxy.log.meta | 3 + .../kube-proxy/options/kube-proxy | 10 + .../files/etc/apt/apt.conf.d/20auto-upgrades | 4 + .../packages/unattended-upgrades | 0 .../files/etc/sysctl.d/99-ip_forward.conf | 2 + .../etc/sysctl.d/99-ip_forward.conf.meta | 3 + .../files/etc/sysconfig/docker.template | 2 + .../opt/kubernetes/helpers/docker-healthcheck | 45 ++ .../helpers/docker-healthcheck.meta | 3 + .../opt/kubernetes/helpers/docker-prestart | 21 + .../kubernetes/helpers/docker-prestart.meta | 3 + .../services/docker-healthcheck.service | 9 + .../services/docker-healthcheck.service.meta | 3 + .../services/docker-healthcheck.timer | 9 + .../docker/_systemd/services/docker.service | 21 + .../_systemd/services/docker.service.meta | 3 + .../files/usr/share/doc/docker/apache.txt | 4 + .../_e2e_storage_test_environment/e2e.options | 2 + .../docker/options/_kubenet/kubenet.options | 5 + upup/models/nodeup/docker/options/default | 5 + .../docker/packages/_jessie/docker-engine | 7 + .../nodeup/docker/packages/bridge-utils | 0 .../nodeup/docker/packages/libapparmor1 | 0 upup/models/nodeup/docker/packages/perl | 0 .../files/usr/local/bin/kubectl.asset | 2 + .../files/usr/local/bin/kubectl.asset.meta | 3 + .../files/etc/sysconfig/kubelet.template | 1 + .../kubelet/files/usr/local/bin/kubelet.asset | 2 + .../files/usr/local/bin/kubelet.asset.meta | 3 + .../files/var/lib/kubelet/kubeconfig.template | 17 + .../var/lib/kubelet/kubeconfig.template.meta | 3 + .../nodeup/kubelet/options/_aws/kubelet.aws | 5 + .../nodeup/kubelet/options/_gce/kubelet.gce | 8 + .../kubelet.kubernetes_master | 7 + upup/models/nodeup/kubelet/options/kubelet | 10 + .../nodeup/kubelet/services/kubelet.service | 11 + .../logrotate/files/etc/cron.hourly/logrotate | 2 + .../files/etc/cron.hourly/logrotate.meta | 3 + .../logrotate/files/etc/logrotate.d/docker | 10 + .../files/etc/logrotate.d/docker-containers | 10 + .../files/etc/logrotate.d/kube-addons | 10 + .../files/etc/logrotate.d/kube-apiserver | 10 + .../etc/logrotate.d/kube-controller-manager | 10 + .../files/etc/logrotate.d/kube-proxy | 10 + .../files/etc/logrotate.d/kube-scheduler | 10 + .../logrotate/files/etc/logrotate.d/kubelet | 10 + .../nodeup/logrotate/packages/logrotate | 0 upup/models/nodeup/ntp/_aws/packages/ntp | 0 upup/models/nodeup/ntp/_aws/services/ntp | 0 .../packages/apt-transport-https | 0 .../top/_debian_family/packages/nfs-common | 0 .../top/_debian_family/packages/python-apt | 0 .../nodeup/top/_debian_family/packages/socat | 0 .../nodeup/top/_redhat_family/packages/git | 0 .../nodeup/top/_redhat_family/packages/python | 0 upup/models/nodeup/top/_ubuntu/packages/git | 0 .../top/_ubuntu/packages/netcat-traditional | 0 .../usr/local/share/doc/kubernetes/LICENSES | 1 + upup/models/nodeup/top/packages/curl | 0 upup/pkg/fi/assetstore.go | 283 +++++++++ upup/pkg/fi/ca.go | 238 ++++++++ upup/pkg/fi/changes.go | 133 +++++ upup/pkg/fi/cloud.go | 10 + upup/pkg/fi/cloudup/config.go | 171 ++++++ upup/pkg/fi/cloudup/gce/gce_apitarget.go | 19 + upup/pkg/fi/cloudup/gce/gce_cloud.go | 51 ++ upup/pkg/fi/cloudup/gce/gce_url.go | 82 +++ upup/pkg/fi/cloudup/gce/utils.go | 30 + upup/pkg/fi/cloudup/gcetasks/QUESTIONS.md | 5 + upup/pkg/fi/cloudup/gcetasks/disk.go | 131 +++++ upup/pkg/fi/cloudup/gcetasks/firewall_rule.go | 181 ++++++ upup/pkg/fi/cloudup/gcetasks/instance.go | 425 ++++++++++++++ .../fi/cloudup/gcetasks/instance_template.go | 420 ++++++++++++++ upup/pkg/fi/cloudup/gcetasks/ip_address.go | 105 ++++ .../gcetasks/managed_instance_group.go | 114 ++++ upup/pkg/fi/cloudup/gcetasks/network.go | 115 ++++ upup/pkg/fi/cloudup/gcetasks/subnet.go | 94 +++ upup/pkg/fi/cloudup/loader.go | 536 ++++++++++++++++++ upup/pkg/fi/cloudup/terraform/literal.go | 30 + upup/pkg/fi/cloudup/terraform/target.go | 83 +++ upup/pkg/fi/compare_with_id.go | 10 + upup/pkg/fi/context.go | 130 +++++ upup/pkg/fi/default_methods.go | 67 +++ upup/pkg/fi/dryrun_target.go | 180 ++++++ upup/pkg/fi/errors.go | 13 + upup/pkg/fi/files.go | 135 +++++ upup/pkg/fi/fitasks/pki.go | 116 ++++ upup/pkg/fi/fs_castore.go | 297 ++++++++++ upup/pkg/fi/fs_secretstore.go | 102 ++++ upup/pkg/fi/hash.go | 46 ++ upup/pkg/fi/http.go | 72 +++ upup/pkg/fi/loader/options_loader.go | 114 ++++ upup/pkg/fi/loader/tree_walker.go | 151 +++++ upup/pkg/fi/nodeup/README.md | 73 +++ upup/pkg/fi/nodeup/build_flags.go | 58 ++ .../fi/nodeup/cloudinit/cloud_init_target.go | 171 ++++++ upup/pkg/fi/nodeup/command.go | 85 +++ upup/pkg/fi/nodeup/config.go | 135 +++++ upup/pkg/fi/nodeup/loader.go | 202 +++++++ upup/pkg/fi/nodeup/local/local_target.go | 12 + upup/pkg/fi/nodeup/nodetasks/asset.go | 6 + upup/pkg/fi/nodeup/nodetasks/file.go | 149 +++++ upup/pkg/fi/nodeup/nodetasks/package.go | 170 ++++++ upup/pkg/fi/nodeup/nodetasks/service.go | 285 ++++++++++ .../fi/nodeup/nodetasks/update_packages.go | 55 ++ upup/pkg/fi/options.go | 117 ++++ upup/pkg/fi/resources.go | 195 +++++++ upup/pkg/fi/secrets.go | 43 ++ upup/pkg/fi/target.go | 6 + upup/pkg/fi/task.go | 5 + upup/pkg/fi/topological_sort.go | 162 ++++++ upup/pkg/fi/utils/json.go | 14 + upup/pkg/fi/utils/marshal.go | 220 +++++++ upup/pkg/fi/utils/reflect.go | 166 ++++++ upup/pkg/fi/utils/sanitize.go | 19 + upup/pkg/fi/utils/uris.go | 69 +++ upup/pkg/fi/utils/yaml.go | 20 + upup/pkg/fi/values.go | 58 ++ 189 files changed, 10086 insertions(+) create mode 100644 upup/.gitignore create mode 100644 upup/DEVELOP.md create mode 100644 upup/Makefile create mode 100644 upup/README.md create mode 100644 upup/cmd/cloudup/main.go create mode 100644 upup/cmd/nodeup/main.go create mode 100644 upup/glide.lock create mode 100644 upup/glide.yaml create mode 100644 upup/models/cloudup/_gce/defaults.options create mode 100644 upup/models/cloudup/_gce/master.yaml create mode 100644 upup/models/cloudup/_gce/network.yaml create mode 100644 upup/models/cloudup/_gce/nodes.yaml create mode 100644 upup/models/cloudup/_gce/resources/cloudinit.yaml.template create mode 100644 upup/models/cloudup/_gce/resources/cluster-name.template create mode 100644 upup/models/cloudup/_gce/resources/config.yaml.template create mode 100644 upup/models/cloudup/_gce/resources/kube-env.template create mode 100755 upup/models/cloudup/_gce/resources/nodeup.sh.template create mode 100644 upup/models/cloudup/pki/kubecfg create mode 100644 upup/models/cloudup/pki/kubelet create mode 100644 upup/models/cloudup/pki/master create mode 100644 upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh create mode 100644 upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh.meta create mode 100644 upup/models/nodeup/_kubernetes_master/_kube-master-addons/services/kube-master-addons.service create mode 100644 upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/ca.crt.template create mode 100644 upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.cert.template create mode 100644 upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.key.template create mode 100644 upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd-events.manifest create mode 100644 upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd.manifest create mode 100644 upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log create mode 100644 upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log.meta create mode 100644 upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log create mode 100644 upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/addons/namespace.yaml create mode 100644 upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh create mode 100644 upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh create mode 100644 upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-addons/services/kube-addons.service create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/etc/kubernetes/manifests/kube-apiserver.manifest.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_aws/kube-apiserver.aws create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_gce/kube-apiserver.gce create mode 100644 upup/models/nodeup/_kubernetes_master/kube-apiserver/options/kube-apiserver create mode 100644 upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/etc/kubernetes/manifests/kube-controller-manager.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log create mode 100644 upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_aws/kube-controller-manager.aws create mode 100644 upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_gce/kube-controller-manager.gce create mode 100644 upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/kube-controller-manager create mode 100644 upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-rc.yaml.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-svc.yaml.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-dns/options/kube-dns create mode 100644 upup/models/nodeup/_kubernetes_master/kube-scheduler/files/etc/kubernetes/manifests/kube-scheduler.template create mode 100644 upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log create mode 100644 upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log.meta create mode 100644 upup/models/nodeup/_kubernetes_master/kube-scheduler/options/kube-scheduler create mode 100644 upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh create mode 100644 upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh.meta create mode 100644 upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/srv/salt/kube-bins/kube-proxy.tar.asset create mode 100644 upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/services/kube-node-unpacker.service create mode 100644 upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount create mode 100644 upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount.meta create mode 100644 upup/models/nodeup/_kubernetes_pool/kube-proxy/files/etc/kubernetes/manifests/kube-proxy.manifest.template create mode 100644 upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template create mode 100644 upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template.meta create mode 100644 upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log create mode 100644 upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log.meta create mode 100644 upup/models/nodeup/_kubernetes_pool/kube-proxy/options/kube-proxy create mode 100644 upup/models/nodeup/auto-upgrades/_debian_family/files/etc/apt/apt.conf.d/20auto-upgrades create mode 100644 upup/models/nodeup/auto-upgrades/_debian_family/packages/unattended-upgrades create mode 100644 upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf create mode 100644 upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf.meta create mode 100644 upup/models/nodeup/docker/_systemd/files/etc/sysconfig/docker.template create mode 100644 upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck create mode 100644 upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck.meta create mode 100644 upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart create mode 100644 upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart.meta create mode 100644 upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service create mode 100644 upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service.meta create mode 100644 upup/models/nodeup/docker/_systemd/services/docker-healthcheck.timer create mode 100644 upup/models/nodeup/docker/_systemd/services/docker.service create mode 100644 upup/models/nodeup/docker/_systemd/services/docker.service.meta create mode 100644 upup/models/nodeup/docker/files/usr/share/doc/docker/apache.txt create mode 100644 upup/models/nodeup/docker/options/_e2e_storage_test_environment/e2e.options create mode 100644 upup/models/nodeup/docker/options/_kubenet/kubenet.options create mode 100644 upup/models/nodeup/docker/options/default create mode 100644 upup/models/nodeup/docker/packages/_jessie/docker-engine create mode 100644 upup/models/nodeup/docker/packages/bridge-utils create mode 100644 upup/models/nodeup/docker/packages/libapparmor1 create mode 100644 upup/models/nodeup/docker/packages/perl create mode 100644 upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset create mode 100644 upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset.meta create mode 100644 upup/models/nodeup/kubelet/files/etc/sysconfig/kubelet.template create mode 100644 upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset create mode 100644 upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset.meta create mode 100644 upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template create mode 100644 upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template.meta create mode 100644 upup/models/nodeup/kubelet/options/_aws/kubelet.aws create mode 100644 upup/models/nodeup/kubelet/options/_gce/kubelet.gce create mode 100644 upup/models/nodeup/kubelet/options/_kubernetes_master/kubelet.kubernetes_master create mode 100644 upup/models/nodeup/kubelet/options/kubelet create mode 100644 upup/models/nodeup/kubelet/services/kubelet.service create mode 100644 upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate create mode 100644 upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate.meta create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/docker create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/docker-containers create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-addons create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-apiserver create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-controller-manager create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-proxy create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-scheduler create mode 100644 upup/models/nodeup/logrotate/files/etc/logrotate.d/kubelet create mode 100644 upup/models/nodeup/logrotate/packages/logrotate create mode 100644 upup/models/nodeup/ntp/_aws/packages/ntp create mode 100644 upup/models/nodeup/ntp/_aws/services/ntp create mode 100644 upup/models/nodeup/top/_debian_family/packages/apt-transport-https create mode 100644 upup/models/nodeup/top/_debian_family/packages/nfs-common create mode 100644 upup/models/nodeup/top/_debian_family/packages/python-apt create mode 100644 upup/models/nodeup/top/_debian_family/packages/socat create mode 100644 upup/models/nodeup/top/_redhat_family/packages/git create mode 100644 upup/models/nodeup/top/_redhat_family/packages/python create mode 100644 upup/models/nodeup/top/_ubuntu/packages/git create mode 100644 upup/models/nodeup/top/_ubuntu/packages/netcat-traditional create mode 100644 upup/models/nodeup/top/files/usr/local/share/doc/kubernetes/LICENSES create mode 100644 upup/models/nodeup/top/packages/curl create mode 100644 upup/pkg/fi/assetstore.go create mode 100644 upup/pkg/fi/ca.go create mode 100644 upup/pkg/fi/changes.go create mode 100644 upup/pkg/fi/cloud.go create mode 100644 upup/pkg/fi/cloudup/config.go create mode 100644 upup/pkg/fi/cloudup/gce/gce_apitarget.go create mode 100644 upup/pkg/fi/cloudup/gce/gce_cloud.go create mode 100644 upup/pkg/fi/cloudup/gce/gce_url.go create mode 100644 upup/pkg/fi/cloudup/gce/utils.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/QUESTIONS.md create mode 100644 upup/pkg/fi/cloudup/gcetasks/disk.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/firewall_rule.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/instance.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/instance_template.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/ip_address.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/managed_instance_group.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/network.go create mode 100644 upup/pkg/fi/cloudup/gcetasks/subnet.go create mode 100644 upup/pkg/fi/cloudup/loader.go create mode 100644 upup/pkg/fi/cloudup/terraform/literal.go create mode 100644 upup/pkg/fi/cloudup/terraform/target.go create mode 100644 upup/pkg/fi/compare_with_id.go create mode 100644 upup/pkg/fi/context.go create mode 100644 upup/pkg/fi/default_methods.go create mode 100644 upup/pkg/fi/dryrun_target.go create mode 100644 upup/pkg/fi/errors.go create mode 100644 upup/pkg/fi/files.go create mode 100644 upup/pkg/fi/fitasks/pki.go create mode 100644 upup/pkg/fi/fs_castore.go create mode 100644 upup/pkg/fi/fs_secretstore.go create mode 100644 upup/pkg/fi/hash.go create mode 100644 upup/pkg/fi/http.go create mode 100644 upup/pkg/fi/loader/options_loader.go create mode 100644 upup/pkg/fi/loader/tree_walker.go create mode 100644 upup/pkg/fi/nodeup/README.md create mode 100644 upup/pkg/fi/nodeup/build_flags.go create mode 100644 upup/pkg/fi/nodeup/cloudinit/cloud_init_target.go create mode 100644 upup/pkg/fi/nodeup/command.go create mode 100644 upup/pkg/fi/nodeup/config.go create mode 100644 upup/pkg/fi/nodeup/loader.go create mode 100644 upup/pkg/fi/nodeup/local/local_target.go create mode 100644 upup/pkg/fi/nodeup/nodetasks/asset.go create mode 100644 upup/pkg/fi/nodeup/nodetasks/file.go create mode 100644 upup/pkg/fi/nodeup/nodetasks/package.go create mode 100644 upup/pkg/fi/nodeup/nodetasks/service.go create mode 100644 upup/pkg/fi/nodeup/nodetasks/update_packages.go create mode 100644 upup/pkg/fi/options.go create mode 100644 upup/pkg/fi/resources.go create mode 100644 upup/pkg/fi/secrets.go create mode 100644 upup/pkg/fi/target.go create mode 100644 upup/pkg/fi/task.go create mode 100644 upup/pkg/fi/topological_sort.go create mode 100644 upup/pkg/fi/utils/json.go create mode 100644 upup/pkg/fi/utils/marshal.go create mode 100644 upup/pkg/fi/utils/reflect.go create mode 100644 upup/pkg/fi/utils/sanitize.go create mode 100644 upup/pkg/fi/utils/uris.go create mode 100644 upup/pkg/fi/utils/yaml.go create mode 100644 upup/pkg/fi/values.go diff --git a/upup/.gitignore b/upup/.gitignore new file mode 100644 index 0000000000..7c2a5cdd67 --- /dev/null +++ b/upup/.gitignore @@ -0,0 +1,2 @@ +vendor/ +.build/ diff --git a/upup/DEVELOP.md b/upup/DEVELOP.md new file mode 100644 index 0000000000..8386c37825 --- /dev/null +++ b/upup/DEVELOP.md @@ -0,0 +1,42 @@ +Random scribblings useful for development... + + +## Developing nodeup + +ssh ${HOST} sudo mkdir -p /opt/nodeup/state +ssh ${HOST} sudo chown -R ${USER} /opt/nodeup + +go install k8s.io/kube-deploy/upup/... && rsync ~/k8s/bin/nodeup ${HOST}:/opt/nodeup/nodeup && rsync --delete -avz trees/ ${HOST}:/opt/nodeup/trees/ \ +&& rsync state/node.yaml ${HOST}:/opt/nodeup/state/node.yaml \ +&& ssh ${HOST} sudo /opt/nodeup/nodeup --v=2 --template=/opt/nodeup/trees/nodeup --state=/opt/nodeup/state --tags=kubernetes_pool,debian_family,gce,systemd + + +# Random misc + +Extract the master node config from a terraform output + +cat tf/k8s.tf.json | jq -r '.resource.google_compute_instance["kubernetes-master"].metadata.config' > state/node.yaml + + + +TODOS +====== + +* Implement number-of-tags prioritization +* Allow files ending in .md to be ignored. Useful for comments. +* Better dependency tracking on systemd services? +* Automatically use different file mode if starts with #! ? +* Support .static under files to allow for files ending in .template? +* How to inherit options +* Allow customization of ordering? Maybe prefix based. +* Cache hashes in-process (along with timestamp?) so we don't hash the kubernetes binary bundle repeatedly +* Fix the fact that we hash assets twice +* Confirm that we drop support for init.d +* Can we just use JSON custom marshalling instead of all our reflection stuff (or at least lighten the load) + +* Do we officially publish https://storage.googleapis.com/kubernetes-release/release/v1.2.2/kubernetes-server-linux-amd64.tar.gz (ie just the server tar.gz)? + +* Need to start docker-healthcheck once + +* Can we replace some or all of nodeup config with pkg/apis/componentconfig/types.go ? + \ No newline at end of file diff --git a/upup/Makefile b/upup/Makefile new file mode 100644 index 0000000000..eb03165b24 --- /dev/null +++ b/upup/Makefile @@ -0,0 +1,31 @@ +gocode: + glide install + go install k8s.io/kube-deploy/upup/cmd/... + +tar: gocode + rm -rf .build/tar + mkdir -p .build/tar/nodeup/root + cp ${GOPATH}/bin/nodeup .build/tar/nodeup/root + cp -r models/nodeup/ .build/tar/nodeup/root/model/ + tar czvf .build/nodeup.tar.gz -C .build/tar/ . + tar tvf .build/nodeup.tar.gz + (sha1sum .build/nodeup.tar.gz | cut -d' ' -f1) > .build/nodeup.tar.gz.sha1 + +upload: tar + rm -rf .build/s3 + mkdir -p .build/s3/nodeup + cp .build/nodeup.tar.gz .build/s3/nodeup/ + cp .build/nodeup.tar.gz.sha1 .build/s3/nodeup/ + aws s3 sync .build/s3/ s3://kubeupv2/ + aws s3api put-object-acl --bucket kubeupv2 --key nodeup/nodeup.tar.gz --acl public-read + aws s3api put-object-acl --bucket kubeupv2 --key nodeup/nodeup.tar.gz.sha1 --acl public-read + +push: tar + scp .build/nodeup.tar.gz ${TARGET}:/tmp/ + ssh ${TARGET} sudo tar zxf /tmp/nodeup.tar.gz -C /var/cache/kubernetes-install + +push-dry: push + ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /var/cache/kubernetes-install/nodeup/root/nodeup --conf=metadata://gce/config --dryrun --v=8 --template=/var/cache/kubernetes-install/nodeup/root/model + +push-run: push + ssh ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /var/cache/kubernetes-install/nodeup/root/nodeup --conf=metadata://gce/config --v=8 --template=/var/cache/kubernetes-install/nodeup/root/model diff --git a/upup/README.md b/upup/README.md new file mode 100644 index 0000000000..17aedcffd1 --- /dev/null +++ b/upup/README.md @@ -0,0 +1,51 @@ +## UpUp - CloudUp & NodeUp + +CloudUp and NodeUp are two tools that are aiming to replace kube-up: +the easiest way to get a production Kubernetes up and running. + +(Currently work in progress, but working. Some of these statements are forward-looking.) + +Some of the more interesting features: + +* Written in go, so hopefully easier to maintain and extend, as complexity inevitably increases +* Uses a state-sync model, so we get things like a dry-run mode and idempotency automatically +* Based on a simple meta-model defined in a directory tree +* Can produce configurations in other formats (currently Terraform & Cloud-Init), so that we can have working + configurations for other tools also. + +## Bringing up a cluster + +Set `YOUR_GCE_PROJECT`, then: + +``` +cd upup +make +${GOPATH}/bin/cloudup --v=0 --logtostderr -cloud=gce -zone=us-central1-f -project=$YOUR_GCE_PROJECT -name=kubernetes -kubernetes-version=1.2.2 +``` + +If you have problems, please set `--v=8 --logtostderr` and open an issue, and ping justinsb on slack! + +For now, we don't build a local kubectl file. So just ssh to the master, and run kubectl from there: + +``` +gcloud compute ssh kubernetes-master +... +kubectl get nodes +kubectl get pods --all-namespaces +``` + +## Other interesting modes: + +See changes that would be applied: `${GOPATH}/bin/cloudup --dryrun` + +Build a terrform model: `${GOPATH}/bin/cloudup $NORMAL_ARGS --target=terraform > tf/k8s.tf.json` + +# How it works + +Everything is driven by a local configuration directory tree, called the "model". The model represents +the desired state of the world. + +Each file in the tree describes a Task. + +On the nodeup side, Tasks can manage files, systemd services, packages etc. +On the cloudup side, Tasks manage cloud resources: instances, networks, disks etc. diff --git a/upup/cmd/cloudup/main.go b/upup/cmd/cloudup/main.go new file mode 100644 index 0000000000..0c48721086 --- /dev/null +++ b/upup/cmd/cloudup/main.go @@ -0,0 +1,244 @@ +package main + +import ( + "flag" + "fmt" + "github.com/golang/glog" + "io/ioutil" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gcetasks" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/loader" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "os" + "path" + "strings" +) + +func main() { + dryrun := false + flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done") + target := "direct" + flag.StringVar(&target, "target", target, "Target - direct, terraform") + configFile := "" + flag.StringVar(&configFile, "conf", configFile, "Configuration file to load") + modelDir := "models/cloudup" + flag.StringVar(&modelDir, "model", modelDir, "Source directory to use as model") + stateDir := "./state" + flag.StringVar(&stateDir, "state", stateDir, "Directory to use to store local state") + nodeModelDir := "models/nodeup" + flag.StringVar(&nodeModelDir, "nodemodel", nodeModelDir, "Source directory to use as model for node configuration") + + // TODO: Replace all these with a direct binding to the CloudConfig + // (we have plenty of reflection helpers if one isn't already available!) + config := &cloudup.CloudConfig{} + flag.StringVar(&config.CloudProvider, "cloud", config.CloudProvider, "Cloud provider to use - gce, aws") + flag.StringVar(&config.Zone, "zone", config.Zone, "Cloud zone to target (warning - will be replaced by region)") + flag.StringVar(&config.Project, "project", config.Project, "Project to use (must be set on GCE)") + flag.StringVar(&config.ClusterName, "name", config.ClusterName, "Name for cluster") + flag.StringVar(&config.KubernetesVersion, "kubernetes-version", config.KubernetesVersion, "Version of kubernetes to run") + //flag.StringVar(&config.Region, "region", config.Region, "Cloud region to target") + + flag.Parse() + + if dryrun { + target = "dryrun" + } + + cmd := &CreateClusterCmd{ + Config: config, + ModelDir: modelDir, + StateDir: stateDir, + Target: target, + NodeModelDir: nodeModelDir, + } + + if configFile != "" { + //confFile := path.Join(cmd.StateDir, "kubernetes.yaml") + err := cmd.LoadConfig(configFile) + if err != nil { + glog.Errorf("error loading config: %v", err) + os.Exit(1) + } + } + + err := cmd.Run() + if err != nil { + glog.Errorf("error running command: %v", err) + os.Exit(1) + } + + glog.Infof("Completed successfully") +} + +type CreateClusterCmd struct { + // Config is the cluster configuration + Config *cloudup.CloudConfig + // ModelDir is the directory in which the cloudup model is found + ModelDir string + // StateDir is a directory in which we store state (such as the PKI tree) + StateDir string + // Target specifies how we are operating e.g. direct to GCE, or AWS, or dry-run, or terraform + Target string + // The directory in which the node model is found + NodeModelDir string +} + +func (c *CreateClusterCmd) LoadConfig(configFile string) error { + conf, err := ioutil.ReadFile(configFile) + if err != nil { + return fmt.Errorf("error loading configuration file %q: %v", configFile, err) + } + err = utils.YamlUnmarshal(conf, c.Config) + if err != nil { + return fmt.Errorf("error parsing configuration file %q: %v", configFile, err) + } + return nil +} + +func (c *CreateClusterCmd) Run() error { + if c.StateDir == "" { + return fmt.Errorf("state dir is required") + } + + if c.Config.CloudProvider == "" { + return fmt.Errorf("must specify CloudProvider. Specify with -cloud") + } + + tags := make(map[string]struct{}) + + l := &cloudup.Loader{} + l.Init() + + caStore, err := fi.NewFilesystemCAStore(path.Join(c.StateDir, "pki")) + if err != nil { + return fmt.Errorf("error building CA store: %v", err) + } + secretStore, err := fi.NewFilesystemSecretStore(path.Join(c.StateDir, "secrets")) + if err != nil { + return fmt.Errorf("error building secret store: %v", err) + } + + if len(c.Config.Assets) == 0 { + if c.Config.KubernetesVersion == "" { + return fmt.Errorf("Must either specify a KubernetesVersion (-kubernetes-version) or provide an asset with the release bundle") + } + defaultReleaseAsset := fmt.Sprintf("https://storage.googleapis.com/kubernetes-release/release/v%s/kubernetes-server-linux-amd64.tar.gz", c.Config.KubernetesVersion) + glog.Infof("Adding default kubernetes release asset: %s", defaultReleaseAsset) + // TODO: Verify it exists, get the hash (that will check that KubernetesVersion is valid) + c.Config.Assets = append(c.Config.Assets, defaultReleaseAsset) + } + + if c.Config.NodeUp.Location == "" { + location := "https://kubeupv2.s3.amazonaws.com/nodeup/nodeup.tar.gz" + glog.Infof("Using default nodeup location: %q", location) + c.Config.NodeUp.Location = location + } + + var cloud fi.Cloud + + var project string + var region string + + checkExisting := true + + switch c.Config.CloudProvider { + case "gce": + tags["_gce"] = struct{}{} + l.AddTypes(map[string]interface{}{ + "persistentDisk": &gcetasks.PersistentDisk{}, + "instance": &gcetasks.Instance{}, + "instanceTemplate": &gcetasks.InstanceTemplate{}, + "network": &gcetasks.Network{}, + "managedInstanceGroup": &gcetasks.ManagedInstanceGroup{}, + "firewallRule": &gcetasks.FirewallRule{}, + "ipAddress": &gcetasks.IPAddress{}, + }) + + // For now a zone to be specified... + // This will be replace with a region when we go full HA + zone := c.Config.Zone + if zone == "" { + return fmt.Errorf("Must specify a zone (use -zone)") + } + tokens := strings.Split(zone, "-") + if len(tokens) <= 2 { + return fmt.Errorf("Invalid Zone: %v", zone) + } + region = tokens[0] + "-" + tokens[1] + + project = c.Config.Project + if project == "" { + return fmt.Errorf("project is required for GCE") + } + gceCloud, err := gce.NewGCECloud(region, project) + if err != nil { + return err + } + cloud = gceCloud + + default: + return fmt.Errorf("unknown CloudProvider %q", c.Config.CloudProvider) + } + + l.Tags = tags + l.CAStore = caStore + l.SecretStore = secretStore + l.StateDir = c.StateDir + l.NodeModelDir = c.NodeModelDir + l.OptionsLoader = loader.NewOptionsLoader(c.Config) + + taskMap, err := l.Build(c.ModelDir) + if err != nil { + glog.Exitf("error building: %v", err) + } + + if c.Config.ClusterName == "" { + return fmt.Errorf("ClusterName is required") + } + + if c.Config.Zone == "" { + return fmt.Errorf("Zone is required") + } + + var target fi.Target + + switch c.Target { + case "direct": + switch c.Config.CloudProvider { + case "gce": + target = gce.NewGCEAPITarget(cloud.(*gce.GCECloud)) + default: + return fmt.Errorf("direct configuration not supported with CloudProvider:%q", c.Config.CloudProvider) + } + + case "terraform": + checkExisting = false + target = terraform.NewTerraformTarget(region, project, os.Stdout) + + case "dryrun": + target = fi.NewDryRunTarget(os.Stdout) + default: + return fmt.Errorf("unsupported target type %q", c.Target) + } + + context, err := fi.NewContext(target, cloud, caStore, checkExisting) + if err != nil { + glog.Exitf("error building context: %v", err) + } + defer context.Close() + + err = context.RunTasks(taskMap) + if err != nil { + glog.Exitf("error running tasks: %v", err) + } + + err = target.Finish(taskMap) + if err != nil { + glog.Exitf("error closing target: %v", err) + } + + return nil +} diff --git a/upup/cmd/nodeup/main.go b/upup/cmd/nodeup/main.go new file mode 100644 index 0000000000..1b195ce173 --- /dev/null +++ b/upup/cmd/nodeup/main.go @@ -0,0 +1,51 @@ +package main + +import ( + "flag" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup" + "os" +) + +func main() { + flagModel := "model" + flag.StringVar(&flagModel, "model", flagModel, "directory to use as model for desired configuration") + var flagConf string + flag.StringVar(&flagConf, "conf", "node.yaml", "configuration location") + var flagAssetDir string + flag.StringVar(&flagAssetDir, "assets", "/var/cache/nodeup", "the location for the local asset cache") + + dryrun := false + flag.BoolVar(&dryrun, "dryrun", false, "Don't create cloud resources; just show what would be done") + target := "direct" + flag.StringVar(&target, "target", target, "Target - direct, cloudinit") + + flag.Parse() + + if dryrun { + target = "dryrun" + } + + flag.Set("logtostderr", "true") + flag.Parse() + + if flagConf == "" { + glog.Exitf("--conf is required") + } + + config := &nodeup.NodeConfig{} + cmd := &nodeup.NodeUpCommand{ + Config: config, + ConfigLocation: flagConf, + ModelDir: flagModel, + Target: target, + AssetDir: flagAssetDir, + } + err := cmd.Run(os.Stdout) + if err != nil { + glog.Exitf("error running nodeup: %v", err) + os.Exit(1) + } + fmt.Printf("success") +} diff --git a/upup/glide.lock b/upup/glide.lock new file mode 100644 index 0000000000..921cd12095 --- /dev/null +++ b/upup/glide.lock @@ -0,0 +1,53 @@ +hash: 67b60195692c44c9e3be82ced106118324e5de46357a1c11a2942aef8675816e +updated: 2016-05-06T15:48:38.735466083-04:00 +imports: +- name: github.com/cloudfoundry-incubator/candiedyaml + version: 99c3df83b51532e3615f851d8c2dbb638f5313bf +- name: github.com/ghodss/yaml + version: e8e0db9016175449df0e9c4b6e6995a9433a395c +- name: github.com/golang/glog + version: 23def4e6c14b4da8ac2ed8007337bc5eb5007998 +- name: github.com/golang/protobuf + version: 7cc19b78d562895b13596ddce7aafb59dd789318 + subpackages: + - proto +- name: golang.org/x/net + version: 7e42c0e1329bb108f7376a7618a2871ab90f1c4d + subpackages: + - context + - context/ctxhttp +- name: golang.org/x/oauth2 + version: e86e2718db89775a4604abc10a5d3a5672e7336e + subpackages: + - google + - internal + - jws + - jwt +- name: google.golang.org/api + version: f9a4669e07732c84854dce1f5c451c22427228fb + subpackages: + - compute/v1 + - googleapi + - storage/v1 + - gensupport + - googleapi/internal/uritemplates +- name: google.golang.org/appengine + version: e234e71924d4aa52444bc76f2f831f13fa1eca60 + subpackages: + - urlfetch + - internal + - internal/app_identity + - internal/modules + - internal/urlfetch + - internal/base + - internal/datastore + - internal/log + - internal/remote_api +- name: google.golang.org/cloud + version: 200292f09e3aaa34878d801ab71fe823b1f7d36a + subpackages: + - compute/metadata + - internal +- name: google.golang.org/grpc + version: 9604a2bb7dd81d87c2873a9580258465f3c311c8 +devImports: [] diff --git a/upup/glide.yaml b/upup/glide.yaml new file mode 100644 index 0000000000..2b1cab8162 --- /dev/null +++ b/upup/glide.yaml @@ -0,0 +1,16 @@ +package: k8s.io/kube-deploy/upup +import: +- package: github.com/ghodss/yaml +- package: github.com/golang/glog +- package: golang.org/x/net + subpackages: + - context +- package: golang.org/x/oauth2 + subpackages: + - google +- package: google.golang.org/api + subpackages: + - compute/v1 + - googleapi + - storage/v1 +- package: google.golang.org/grpc diff --git a/upup/models/cloudup/_gce/defaults.options b/upup/models/cloudup/_gce/defaults.options new file mode 100644 index 0000000000..f7c9e4e91a --- /dev/null +++ b/upup/models/cloudup/_gce/defaults.options @@ -0,0 +1,53 @@ +ClusterName: {{ .InstancePrefix }} +InstancePrefix: kubernetes +AllocateNodeCIDRs: true +Multizone: true + +ServiceClusterIPRange: 10.0.0.0/16 +ClusterIPRange: 10.244.0.0/16 +MasterInternalIP: 172.20.0.9 +MasterIPRange: 10.246.0.0/24 +NetworkProvider: none + +AdmissionControl: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,PersistentVolumeLabel + +EnableClusterMonitoring: none +EnableL7LoadBalancing: none +EnableClusterUI: true + +EnableClusterDNS: true +DNSReplicas: 1 +DNSServerIP: 10.0.0.10 +DNSDomain: cluster.local + +EnableClusterLogging: true +EnableNodeLogging: true +LoggingDestination: elasticsearch +ElasticsearchLoggingReplicas: 1 + +MasterImage: k8s-1-2-debian-jessie-amd64-2016-04-17 +MasterName: {{ .InstancePrefix }}-master +MasterTag: {{ .InstancePrefix }}-master +{{ if gt .NodeCount 500 }} +MasterMachineType: n1-standard-32 +{{ else if gt .NodeCount 250 }} +MasterMachineType: n1-standard-16 +{{ else if gt .NodeCount 100 }} +MasterMachineType: n1-standard-8 +{{ else if gt .NodeCount 10 }} +MasterMachineType: n1-standard-4 +{{ else if gt .NodeCount 5 }} +MasterMachineType: n1-standard-2 +{{ else }} +MasterMachineType: n1-standard-1 +{{ end }} +MasterVolumeType: pd-ssd +MasterVolumeSize: 20 + +NodeImage: k8s-1-2-debian-jessie-amd64-2016-04-17 +NodeCount: 2 +NodeTag: {{ .InstancePrefix }}-minion +NodeInstancePrefix: {{ .InstancePrefix }}-minion +NodeMachineType: n1-standard-2 + +KubeUser: admin \ No newline at end of file diff --git a/upup/models/cloudup/_gce/master.yaml b/upup/models/cloudup/_gce/master.yaml new file mode 100644 index 0000000000..89488352d4 --- /dev/null +++ b/upup/models/cloudup/_gce/master.yaml @@ -0,0 +1,44 @@ +# TODO: Support multiple masters + +persistentDisk/{{ .MasterName }}-pd: + zone: {{ .Zone }} + sizeGB: {{ or .MasterVolumeSize 20 }} + volumeType: {{ or .MasterVolumeType "pd-ssd" }} + +# Open master HTTPS +firewallRule/{{ .MasterName }}-https: + network: network/default + sourceRanges: 0.0.0.0/0 + targetTags: {{ .MasterTag }} + allowed: tcp:443 + +# Allocate master IP +ipAddress/{{ .MasterName }}-ip: + address: {{ .MasterPublicIP }} + +# Master instance +instance/{{ .MasterName }}: + ipaddress: ipAddress/{{ .MasterName }}-ip + zone: {{ .Zone }} + machineType: {{ .MasterMachineType }} + image: {{ .MasterImage }} + tags: {{ .MasterTag }} + network: network/default + scopes: + - storage-ro + - compute-rw + - monitoring + - logging-write + canIpForward: true + disks: + master-pd: persistentDisk/{{ .MasterName }}-pd + metadata: + #kube-env: resources/kube-env +{{ if eq .NodeInit "cloudinit" }} + config: resources/cloudinit.yaml _kubernetes_master +{{ else }} + startup-script: resources/nodeup.sh + config: resources/config.yaml _kubernetes_master +{{ end }} + cluster-name: resources/cluster-name + preemptible: false \ No newline at end of file diff --git a/upup/models/cloudup/_gce/network.yaml b/upup/models/cloudup/_gce/network.yaml new file mode 100644 index 0000000000..6aaab69e30 --- /dev/null +++ b/upup/models/cloudup/_gce/network.yaml @@ -0,0 +1,20 @@ +{{ $networkName := "default" }} + +network/{{ $networkName }}: + cidr: 10.240.0.0/16 + +# Allow all internal traffic +firewallRule/{{ $networkName }}-default-internal: + network: network/{{$networkName}} + sourceRanges: 10.0.0.0/8 + allowed: + - tcp:1-65535 + - udp:1-65535 + - icmp + +# SSH is open to the world +firewallRule/{{ $networkName }}-default-ssh: + network: network/default + sourceRanges: 0.0.0.0/0 + allowed: tcp:22 + diff --git a/upup/models/cloudup/_gce/nodes.yaml b/upup/models/cloudup/_gce/nodes.yaml new file mode 100644 index 0000000000..9b34518aa9 --- /dev/null +++ b/upup/models/cloudup/_gce/nodes.yaml @@ -0,0 +1,48 @@ +# TODO: Support multiple instance groups + +instanceTemplate/{{ .NodeInstancePrefix }}-template: + network: network/default + machineType: {{ .NodeMachineType }} + # TODO: Make configurable + bootDiskType: pd-standard + bootDiskSizeGB: 100 + bootDiskImage: {{ .NodeImage }} + canIpForward: true + # TODO: Support preemptible nodes? + preemptible: false + scopes: + - compute-rw + - monitoring + - logging-write + - storage-ro + metadata: + # kube-env: resources/kube-env +{{ if eq .NodeInit "cloudinit" }} + # TODO: we should probably always store the config somewhere + config: resources/cloudinit.yaml _kubernetes_master +{{ else }} + startup-script: resources/nodeup.sh + config: resources/config.yaml _kubernetes_pool +{{ end }} + cluster-name: resources/cluster-name + tags: + - {{ .NodeTag }} + +managedInstanceGroup/{{ .NodeInstancePrefix }}-group: + zone: {{ .Zone }} + baseInstanceName: {{ .NodeInstancePrefix }} + targetSize: {{ .NodeCount }} + instanceTemplate: instanceTemplate/{{ .NodeInstancePrefix }}-template + +# Allow traffic from nodes -> nodes +firewallRule/{{ .NodeTag }}-all: + network: network/default + sourceRanges: {{ .ClusterIPRange }} + targetTags: {{ .NodeTag }} + allowed: + - tcp + - udp + - icmp + - esp + - ah + - sctp diff --git a/upup/models/cloudup/_gce/resources/cloudinit.yaml.template b/upup/models/cloudup/_gce/resources/cloudinit.yaml.template new file mode 100644 index 0000000000..2dcb3a1a11 --- /dev/null +++ b/upup/models/cloudup/_gce/resources/cloudinit.yaml.template @@ -0,0 +1 @@ +{{ BuildNodeConfig "cloudinit" "resources/config.yaml.template" Args }} \ No newline at end of file diff --git a/upup/models/cloudup/_gce/resources/cluster-name.template b/upup/models/cloudup/_gce/resources/cluster-name.template new file mode 100644 index 0000000000..4422d72df9 --- /dev/null +++ b/upup/models/cloudup/_gce/resources/cluster-name.template @@ -0,0 +1 @@ +{{ .ClusterName }} \ No newline at end of file diff --git a/upup/models/cloudup/_gce/resources/config.yaml.template b/upup/models/cloudup/_gce/resources/config.yaml.template new file mode 100644 index 0000000000..c118162145 --- /dev/null +++ b/upup/models/cloudup/_gce/resources/config.yaml.template @@ -0,0 +1,39 @@ +Kubelet: + Certificate: {{ Base64Encode (CA.Cert "kubelet").AsString }} + Key: {{ Base64Encode (CA.PrivateKey "kubelet").AsString }} + +NodeUp: + Location: https://kubeupv2.s3.amazonaws.com/nodeup/nodeup.tar.gz + +CACertificate: {{ Base64Encode (CA.Cert "ca").AsString }} + +APIServer: + Certificate: {{ Base64Encode (CA.Cert "master").AsString }} + Key: {{ Base64Encode (CA.PrivateKey "master").AsString }} + +KubeUser: {{ .KubeUser }} +KubePassword: {{ (Secrets.Secret "kube").AsString }} + +Tokens: + admin: {{ (Secrets.Secret "admin").AsString }} + kubelet: {{ (Secrets.Secret "kubelet").AsString }} + kube-proxy: {{ (Secrets.Secret "kube-proxy").AsString }} + "system:scheduler": {{ (Secrets.Secret "system:scheduler").AsString }} + "system:controller_manager": {{ (Secrets.Secret "system:controller_manager").AsString }} + "system:logging": {{ (Secrets.Secret "system:logging").AsString }} + "system:monitoring": {{ (Secrets.Secret "system:monitoring").AsString }} + "system:dns": {{ (Secrets.Secret "system:dns").AsString }} + +Tags: +{{ range $tag := Args }} + - {{ $tag }} +{{ end }} + - _gce + - _jessie + - _debian_family + - _systemd + +Assets: +{{ range $asset := .Assets }} + - {{ $asset }} +{{ end }} diff --git a/upup/models/cloudup/_gce/resources/kube-env.template b/upup/models/cloudup/_gce/resources/kube-env.template new file mode 100644 index 0000000000..39ef90ce64 --- /dev/null +++ b/upup/models/cloudup/_gce/resources/kube-env.template @@ -0,0 +1,150 @@ +INSTANCE_PREFIX: {{ .InstancePrefix }} +NODE_INSTANCE_PREFIX: {{ .NodeInstancePrefix }} +CLUSTER_IP_RANGE: {{ .ClusterIPRange }} + +#{ +#url, hash, err := k.ServerBinaryTar.Resolve(fi.HashAlgorithmSHA1) +#if err != nil { +#return nil, err +#} +#SERVER_BINARY_TAR_URL"] = url +#SERVER_BINARY_TAR_HASH"] = hash +#} + +#{ +#url, hash, err := k.SaltTar.Resolve(fi.HashAlgorithmSHA1) +#if err != nil { +#return nil, err +#} +#SALT_TAR_URL"] = url +#SALT_TAR_HASH"] = hash +#} + +SERVICE_CLUSTER_IP_RANGE: {{ .ServiceClusterIPRange }} + +KUBERNETES_MASTER_NAME: {{ .MasterName }} + +ALLOCATE_NODE_CIDRS: {{ .AllocateNodeCIDRs }} + +ENABLE_CLUSTER_MONITORING: {{ .EnableClusterMonitoring }} +ENABLE_L7_LOADBALANCING: {{ .EnableL7LoadBalancing }} +ENABLE_CLUSTER_LOGGING: {{ .EnableClusterLogging }} +ENABLE_CLUSTER_UI: {{ .EnableClusterUI }} +ENABLE_NODE_LOGGING: {{ .EnableNodeLogging }} +LOGGING_DESTINATION: {{ .LoggingDestination }} +ELASTICSEARCH_LOGGING_REPLICAS: {{ .ElasticsearchLoggingReplicas }} +ENABLE_CLUSTER_DNS: {{ .EnableClusterDNS }} +ENABLE_CLUSTER_REGISTRY: {{ .EnableClusterRegistry }} +CLUSTER_REGISTRY_DISK: {{ .ClusterRegistryDisk }} +CLUSTER_REGISTRY_DISK_SIZE: {{ .ClusterRegistryDiskSize }} +DNS_REPLICAS: {{.DNSReplicas }} +DNS_SERVER_IP: {{ .DNSServerIP }} +DNS_DOMAIN: {{ .DNSDomain }} + +KUBELET_TOKEN: {{ .KubeletToken }} +KUBE_PROXY_TOKEN: {{ .KubeProxyToken }} +ADMISSION_CONTROL: {{ .AdmissionControl }} +MASTER_IP_RANGE: {{ .MasterIPRange }} +RUNTIME_CONFIG: {{ .RuntimeConfig }} + +CA_CERT: {{ Base64Encode (CA.Cert "ca").AsString }} +KUBELET_CERT: {{ Base64Encode (CA.Cert "kubelet").AsString }} +KUBELET_KEY: {{ Base64Encode (CA.PrivateKey "kubelet").AsString }} + +NETWORK_PROVIDER: {{ .NetworkProvider }} +HAIRPIN_MODE: {{ .HairpinMode }} +OPENCONTRAIL_TAG: {{ .OpencontrailTag }} +OPENCONTRAIL_KUBERNETES_TAG: {{ .OpencontrailKubernetesTag }} +OPENCONTRAIL_PUBLIC_SUBNET: {{ .OpencontrailPublicSubnet }} +E2E_STORAGE_TEST_ENVIRONMENT: {{ .E2EStorageTestEnvironment }} +KUBE_IMAGE_TAG: {{ .KubeImageTag }} +KUBE_DOCKER_REGISTRY: {{ .KubeDockerRegistry }} +KUBE_ADDON_REGISTRY: {{ .KubeAddonRegistry }} +MULTIZONE: {{ .Multizone }} +NON_MASQUERADE_CIDR: {{ .NonMasqueradeCidr }} + +KUBELET_PORT: {{ .KubeletPort }} + +KUBE_APISERVER_REQUEST_TIMEOUT: {{ .KubeApiserverRequestTimeout }} + +TERMINATED_POD_GC_THRESHOLD: {{ .TerminatedPodGcThreshold }} + +#if k.OsDistribution == "trusty" { +#KUBE_MANIFESTS_TAR_URL: .KubeManifestsTarURL }} +#KUBE_MANIFESTS_TAR_HASH: .KubeManifestsTarSha256 }} +#} + +TEST_CLUSTER: {{ .TestCluster }} + +KUBELET_TEST_ARGS: {{ .KubeletTestArgs }} + +KUBELET_TEST_LOG_LEVEL: {{ .KubeletTestLogLevel }} + +DOCKER_TEST_LOG_LEVEL: {{ .DockerTestLogLevel }} + +ENABLE_CUSTOM_METRICS: {{ .EnableCustomMetrics }} + +# if .Target.IsMaster + +# If the user requested that the master be part of the cluster, set the +# environment variable to program the master kubelet to register itself. +{{ if .RegisterMasterKubelet }} +KUBELET_APISERVER: {{ .MasterName }} +{{ end }} + +KUBERNETES_MASTER: true +KUBE_USER: {{ .KubeUser }} +KUBE_PASSWORD: {{ .KubePassword }} +KUBE_BEARER_TOKEN: {{ .BearerToken }} +MASTER_CERT: {{ Base64Encode (CA.Cert "master").AsString }} +MASTER_KEY: {{ Base64Encode (CA.PrivateKey "master").AsString }} +KUBECFG_CERT: {{ Base64Encode (CA.Cert "kubecfg").AsString }} +KUBECFG_KEY: {{ Base64Encode (CA.PrivateKey "kubecfg").AsString }} + +ENABLE_MANIFEST_URL: {{ .EnableManifestURL }} +MANIFEST_URL: {{ .ManifestURL }} +MANIFEST_URL_HEADER: {{ .ManifestURLHeader }} +NUM_NODES: {{.NodeCount }} + +APISERVER_TEST_ARGS: {{ .ApiserverTestArgs }} + +APISERVER_TEST_LOG_LEVEL: {{ .ApiserverTestLogLevel }} + +CONTROLLER_MANAGER_TEST_ARGS: {{ .ControllerManagerTestArgs }} + +CONTROLLER_MANAGER_TEST_LOG_LEVEL: {{ .ControllerManagerTestLogLevel }} + +SCHEDULER_TEST_ARGS: {{ .SchedulerTestArgs }} + +SCHEDULER_TEST_LOG_LEVEL: {{ .SchedulerTestLogLevel }} + +# else + +# Node-only vars + +KUBERNETES_MASTER: false +ZONE: {{ .Zone }} +EXTRA_DOCKER_OPTS: {{ .ExtraDockerOpts }} +MANIFEST_URL: {{ .ManifestURL }} + +KUBEPROXY_TEST_ARGS: {{ .KubeProxyTestArgs }} + +KUBEPROXY_TEST_LOG_LEVEL: {{ .KubeProxyTestLogLevel }} + +# end + +NODE_LABELS: {{ .NodeLabels }} + +#if k.OsDistribution == "coreos" { +#// CoreOS-only env vars. TODO(yifan): Make them available on other distros. +#KUBE_MANIFESTS_TAR_URL: .KubeManifestsTarURL }} +#KUBE_MANIFESTS_TAR_HASH: .KubeManifestsTarSha256 }} +#KUBERNETES_CONTAINER_RUNTIME: .ContainerRuntime }} +#RKT_VERSION: .RktVersion }} +#RKT_PATH: .RktPath }} +#KUBERNETES_CONFIGURE_CBR0: .KubernetesConfigureCbr0 }} +#} + +# This next bit for changes vs kube-up: +# https://github.com/kubernetes/kubernetes/issues/23264 +CA_KEY: {{ Base64Encode (CA.PrivateKey "ca").AsString }} diff --git a/upup/models/cloudup/_gce/resources/nodeup.sh.template b/upup/models/cloudup/_gce/resources/nodeup.sh.template new file mode 100755 index 0000000000..801fa9b542 --- /dev/null +++ b/upup/models/cloudup/_gce/resources/nodeup.sh.template @@ -0,0 +1,139 @@ +#!/bin/bash +# Copyright 2016 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +NODEUP_TAR_URL={{ .NodeUp.Location }} +NODEUP_TAR_HASH={{ .NodeUp.Hash }} + +function ensure-basic-networking() { + # Deal with GCE networking bring-up race. (We rely on DNS for a lot, + # and it's just not worth doing a whole lot of startup work if this + # isn't ready yet.) + until getent hosts metadata.google.internal &>/dev/null; do + echo 'Waiting for functional DNS (trying to resolve metadata.google.internal)...' + sleep 3 + done + until getent hosts $(hostname -f || echo _error_) &>/dev/null; do + echo 'Waiting for functional DNS (trying to resolve my own FQDN)...' + sleep 3 + done + until getent hosts $(hostname -i || echo _error_) &>/dev/null; do + echo 'Waiting for functional DNS (trying to resolve my own IP)...' + sleep 3 + done + + echo "Networking functional on $(hostname) ($(hostname -i))" +} + +function ensure-install-dir() { + INSTALL_DIR="/var/cache/kubernetes-install" + mkdir -p ${INSTALL_DIR} + cd ${INSTALL_DIR} +} + +function curl-metadata() { + curl --fail --retry 5 --silent -H 'Metadata-Flavor: Google' "http://metadata/computeMetadata/v1/instance/attributes/${1}" +} + +# Retry a download until we get it. Takes a hash and a set of URLs. +# +# $1 is the sha1 of the URL. Can be "" if the sha1 is unknown. +# $2+ are the URLs to download. +download-or-bust() { + local -r hash="$1" + shift 1 + + urls=( $* ) + while true; do + for url in "${urls[@]}"; do + local file="${url##*/}" + rm -f "${file}" + if ! curl -f --ipv4 -Lo "${file}" --connect-timeout 20 --retry 6 --retry-delay 10 "${url}"; then + echo "== Failed to download ${url}. Retrying. ==" + elif [[ -n "${hash}" ]] && ! validate-hash "${file}" "${hash}"; then + echo "== Hash validation of ${url} failed. Retrying. ==" + else + if [[ -n "${hash}" ]]; then + echo "== Downloaded ${url} (SHA1 = ${hash}) ==" + else + echo "== Downloaded ${url} ==" + fi + return + fi + done + done +} + +validate-hash() { + local -r file="$1" + local -r expected="$2" + local actual + + actual=$(sha1sum ${file} | awk '{ print $1 }') || true + if [[ "${actual}" != "${expected}" ]]; then + echo "== ${file} corrupted, sha1 ${actual} doesn't match expected ${expected} ==" + return 1 + fi +} + +function split-commas() { + echo $1 | tr "," "\n" +} + +function try-download-release() { + # TODO(zmerlynn): Now we REALLY have no excuse not to do the reboot + # optimization. + + local -r nodeup_tar_urls=( $(split-commas "${NODEUP_TAR_URL}") ) + local -r nodeup_tar="${nodeup_tar_urls[0]##*/}" + if [[ -n "${NODEUP_TAR_HASH:-}" ]]; then + local -r nodeup_tar_hash="${NODEUP_TAR_HASH}" + else + # TODO: Remove? + echo "Downloading binary release sha1 (not found in env)" + download-or-bust "" "${nodeup_tar_urls[@]/.tar.gz/.tar.gz.sha1}" + local -r nodeup_tar_hash=$(cat "${nodeup_tar}.sha1") + fi + + echo "Downloading binary release tar (${nodeup_tar_urls[@]})" + download-or-bust "${nodeup_tar_hash}" "${nodeup_tar_urls[@]}" + + echo "Unpacking and checking integrity of nodeup" + rm -rf nodeupcurl https://kubeupv2.s3.amazonaws.com/nodeup/nodeup.tar.gz + + tar xzf "${nodeup_tar}" && tar tzf "${nodeup_tar}" > /dev/null +} + +function download-release() { + # In case of failure checking integrity of release, retry. + until try-download-release; do + sleep 15 + echo "Couldn't download release. Retrying..." + done + + echo "Running release install script" + ( cd nodeup/root; ./nodeup --conf=metadata://{{ .CloudProvider }}/config --v=8 ) +} + +#################################################################################### + +echo "== nodeup node config starting ==" +ensure-basic-networking +ensure-install-dir +download-release +echo "== nodeup node config done ==" diff --git a/upup/models/cloudup/pki/kubecfg b/upup/models/cloudup/pki/kubecfg new file mode 100644 index 0000000000..9f9b49b5cf --- /dev/null +++ b/upup/models/cloudup/pki/kubecfg @@ -0,0 +1,3 @@ +subject: + CommonName: kubecfg +type: client diff --git a/upup/models/cloudup/pki/kubelet b/upup/models/cloudup/pki/kubelet new file mode 100644 index 0000000000..ce9bfc902c --- /dev/null +++ b/upup/models/cloudup/pki/kubelet @@ -0,0 +1,3 @@ +subject: + CommonName: kubelet +type: client diff --git a/upup/models/cloudup/pki/master b/upup/models/cloudup/pki/master new file mode 100644 index 0000000000..32daaef18e --- /dev/null +++ b/upup/models/cloudup/pki/master @@ -0,0 +1,12 @@ +subject: + CommonName: kubernetes-master +type: server +alternateNames: + - kubernetes + - kubernetes.default + - kubernetes.default.svc + - kubernetes.default.svc.{{ .DNSDomain }} + - {{ .MasterName }} + - {{ .MasterPublicIP }} + - {{ .MasterInternalIP }} + - {{ .WellKnownServiceIP 1 }} diff --git a/upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh b/upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh new file mode 100644 index 0000000000..94c301fe12 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# loadedImageFlags is a bit-flag to track which docker images loaded successfully. +let loadedImageFlags=0 + +while true; do + restart_docker=false + + if which docker 1>/dev/null 2>&1; then + + timeout 30 docker load -i /srv/salt/kube-bins/kube-apiserver.tar 1>/dev/null 2>&1 + rc=$? + if [[ $rc == 0 ]]; then + let loadedImageFlags="$loadedImageFlags|1" + elif [[ $rc == 124 ]]; then + restart_docker=true + fi + + timeout 30 docker load -i /srv/salt/kube-bins/kube-scheduler.tar 1>/dev/null 2>&1 + rc=$? + if [[ $rc == 0 ]]; then + let loadedImageFlags="$loadedImageFlags|2" + elif [[ $rc == 124 ]]; then + restart_docker=true + fi + + timeout 30 docker load -i /srv/salt/kube-bins/kube-controller-manager.tar 1>/dev/null 2>&1 + rc=$? + if [[ $rc == 0 ]]; then + let loadedImageFlags="$loadedImageFlags|4" + elif [[ $rc == 124 ]]; then + restart_docker=true + fi + fi + + # required docker images got installed. exit while loop. + if [[ $loadedImageFlags == 7 ]]; then break; fi + + # Sometimes docker load hang, restart docker daemon resolve the issue + if [[ $restart_docker ]]; then + if ! service docker restart; then # Try systemctl if there's no service command. + systemctl restart docker + fi + fi + + # sleep for 15 seconds before attempting to load docker images again + sleep 15 + +done + +# Now exit. After kube-push, salt will notice that the service is down and it +# will start it and new docker images will be loaded. \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh.meta b/upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/_kube-master-addons/files/etc/kubernetes/kube-master-addons.sh.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/_kube-master-addons/services/kube-master-addons.service b/upup/models/nodeup/_kubernetes_master/_kube-master-addons/services/kube-master-addons.service new file mode 100644 index 0000000000..f0502bdc78 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/_kube-master-addons/services/kube-master-addons.service @@ -0,0 +1,9 @@ +[Unit] +Description=Kubernetes-Master Addon Object Manager +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/etc/kubernetes/kube-master-addons.sh + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/ca.crt.template b/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/ca.crt.template new file mode 100644 index 0000000000..4059aa9e3e --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/ca.crt.template @@ -0,0 +1 @@ +{{ .CACertificate.AsString }} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.cert.template b/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.cert.template new file mode 100644 index 0000000000..41910eade9 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.cert.template @@ -0,0 +1 @@ +{{ .APIServer.Certificate.AsString }} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.key.template b/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.key.template new file mode 100644 index 0000000000..c49bfd269b --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/certs/files/srv/kubernetes/server.key.template @@ -0,0 +1 @@ +{{ .APIServer.Key.AsString }} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd-events.manifest b/upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd-events.manifest new file mode 100644 index 0000000000..a9677f6c02 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd-events.manifest @@ -0,0 +1,65 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"etcd-server-events", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "etcd-container", + "image": "gcr.io/google_containers/etcd:2.2.1", + "resources": { + "requests": { + "cpu": "100m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2381 --addr 127.0.0.1:4002 --bind-addr 127.0.0.1:4002 --data-dir /var/etcd/data-events 1>>/var/log/etcd-events.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 4002, + "path": "/health" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "ports":[ + { "name": "serverport", + "containerPort": 2381, + "hostPort": 2381 + },{ + "name": "clientport", + "containerPort": 4002, + "hostPort": 4002 + } + ], + "volumeMounts": [ + {"name": "varetcd", + "mountPath": "/var/etcd", + "readOnly": false + }, + {"name": "varlogetcd", + "mountPath": "/var/log/etcd-events.log", + "readOnly": false + } + ] + } +], +"volumes":[ + { "name": "varetcd", + "hostPath": { + "path": "/mnt/master-pd/var/etcd"} + }, + { "name": "varlogetcd", + "hostPath": { + "path": "/var/log/etcd-events.log"} + } +] +}} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd.manifest b/upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd.manifest new file mode 100644 index 0000000000..541acc9eaa --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/etcd/files/etc/kubernetes/manifests/etcd.manifest @@ -0,0 +1,65 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"etcd-server", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "etcd-container", + "image": "gcr.io/google_containers/etcd:2.2.1", + "resources": { + "requests": { + "cpu": "200m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2380 --addr 127.0.0.1:4001 --bind-addr 127.0.0.1:4001 --data-dir /var/etcd/data 1>>/var/log/etcd.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 4001, + "path": "/health" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "ports":[ + { "name": "serverport", + "containerPort": 2380, + "hostPort": 2380 + },{ + "name": "clientport", + "containerPort": 4001, + "hostPort": 4001 + } + ], + "volumeMounts": [ + {"name": "varetcd", + "mountPath": "/var/etcd", + "readOnly": false + }, + {"name": "varlogetcd", + "mountPath": "/var/log/etcd.log", + "readOnly": false + } + ] + } +], +"volumes":[ + { "name": "varetcd", + "hostPath": { + "path": "/mnt/master-pd/var/etcd"} + }, + { "name": "varlogetcd", + "hostPath": { + "path": "/var/log/etcd.log"} + } +] +}} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log b/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log.meta b/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log.meta new file mode 100644 index 0000000000..56d0e34103 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd-events.log.meta @@ -0,0 +1,3 @@ +{ + "ifNotExists": true +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log b/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log.meta b/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log.meta new file mode 100644 index 0000000000..56d0e34103 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/etcd/files/var/log/etcd.log.meta @@ -0,0 +1,3 @@ +{ + "ifNotExists": true +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/addons/namespace.yaml b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/addons/namespace.yaml new file mode 100644 index 0000000000..986f4b4822 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/addons/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system diff --git a/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh new file mode 100644 index 0000000000..5f20bffa7c --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh @@ -0,0 +1,514 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The business logic for whether a given object should be created +# was already enforced by salt, and /etc/kubernetes/addons is the +# managed result is of that. Start everything below that directory. + +# Parameters +# $1 path to add-ons + + +# LIMITATIONS +# 1. controllers are not updated unless their name is changed +# 3. Services will not be updated unless their name is changed, +# but for services we actually want updates without name change. +# 4. Json files are not handled at all. Currently addons must be +# in yaml files +# 5. exit code is probably not always correct (I haven't checked +# carefully if it works in 100% cases) +# 6. There are no unittests +# 8. Will not work if the total length of paths to addons is greater than +# bash can handle. Probably it is not a problem: ARG_MAX=2097152 on GCE. +# 9. Performance issue: yaml files are read many times in a single execution. + +# cosmetic improvements to be done +# 1. improve the log function; add timestamp, file name, etc. +# 2. logging doesn't work from files that print things out. +# 3. kubectl prints the output to stderr (the output should be captured and then +# logged) + + + +# global config +KUBECTL=${TEST_KUBECTL:-} # substitute for tests +KUBECTL=${KUBECTL:-${KUBECTL_BIN:-}} +KUBECTL=${KUBECTL:-/usr/local/bin/kubectl} +if [[ ! -x ${KUBECTL} ]]; then + echo "ERROR: kubectl command (${KUBECTL}) not found or is not executable" 1>&2 + exit 1 +fi + +# If an add-on definition is incorrect, or a definition has just disappeared +# from the local directory, the script will still keep on retrying. +# The script does not end until all retries are done, so +# one invalid manifest may block updates of other add-ons. +# Be careful how you set these parameters +NUM_TRIES=1 # will be updated based on input parameters +DELAY_AFTER_ERROR_SEC=${TEST_DELAY_AFTER_ERROR_SEC:=10} + + +# remember that you can't log from functions that print some output (because +# logs are also printed on stdout) +# $1 level +# $2 message +function log() { + # manage log levels manually here + + # add the timestamp if you find it useful + case $1 in + DB3 ) +# echo "$1: $2" + ;; + DB2 ) +# echo "$1: $2" + ;; + DBG ) +# echo "$1: $2" + ;; + INFO ) + echo "$1: $2" + ;; + WRN ) + echo "$1: $2" + ;; + ERR ) + echo "$1: $2" + ;; + * ) + echo "INVALID_LOG_LEVEL $1: $2" + ;; + esac +} + +#$1 yaml file path +function get-object-kind-from-file() { + # prints to stdout, so log cannot be used + #WARNING: only yaml is supported + cat $1 | ${PYTHON} -c ''' +try: + import pipes,sys,yaml + y = yaml.load(sys.stdin) + labels = y["metadata"]["labels"] + if ("kubernetes.io/cluster-service", "true") not in labels.iteritems(): + # all add-ons must have the label "kubernetes.io/cluster-service". + # Otherwise we are ignoring them (the update will not work anyway) + print "ERROR" + else: + print y["kind"] +except Exception, ex: + print "ERROR" + ''' +} + +# $1 yaml file path +# returns a string of the form / (we call it nsnames) +function get-object-nsname-from-file() { + # prints to stdout, so log cannot be used + #WARNING: only yaml is supported + #addons that do not specify a namespace are assumed to be in "default". + cat $1 | ${PYTHON} -c ''' +try: + import pipes,sys,yaml + y = yaml.load(sys.stdin) + labels = y["metadata"]["labels"] + if ("kubernetes.io/cluster-service", "true") not in labels.iteritems(): + # all add-ons must have the label "kubernetes.io/cluster-service". + # Otherwise we are ignoring them (the update will not work anyway) + print "ERROR" + else: + try: + print "%s/%s" % (y["metadata"]["namespace"], y["metadata"]["name"]) + except Exception, ex: + print "default/%s" % y["metadata"]["name"] +except Exception, ex: + print "ERROR" + ''' +} + +# $1 addon directory path +# $2 addon type (e.g. ReplicationController) +# echoes the string with paths to files containing addon for the given type +# works only for yaml files (!) (ignores json files) +function get-addon-paths-from-disk() { + # prints to stdout, so log cannot be used + local -r addon_dir=$1 + local -r obj_type=$2 + local kind + local file_path + for file_path in $(find ${addon_dir} -name \*.yaml); do + kind=$(get-object-kind-from-file ${file_path}) + # WARNING: assumption that the topmost indentation is zero (I'm not sure yaml allows for topmost indentation) + if [[ "${kind}" == "${obj_type}" ]]; then + echo ${file_path} + fi + done +} + +# waits for all subprocesses +# returns 0 if all of them were successful and 1 otherwise +function wait-for-jobs() { + local rv=0 + local pid + for pid in $(jobs -p); do + wait ${pid} + if [[ $? -ne 0 ]]; then + rv=1; + log ERR "error in pid ${pid}" + fi + log DB2 "pid ${pid} completed, current error code: ${rv}" + done + return ${rv} +} + + +function run-until-success() { + local -r command=$1 + local tries=$2 + local -r delay=$3 + local -r command_name=$1 + while [ ${tries} -gt 0 ]; do + log DBG "executing: '$command'" + # let's give the command as an argument to bash -c, so that we can use + # && and || inside the command itself + /bin/bash -c "${command}" && \ + log DB3 "== Successfully executed ${command_name} at $(date -Is) ==" && \ + return 0 + let tries=tries-1 + log INFO "== Failed to execute ${command_name} at $(date -Is). ${tries} tries remaining. ==" + sleep ${delay} + done + return 1 +} + +# $1 object type +# returns a list of / pairs (nsnames) +function get-addon-nsnames-from-server() { + local -r obj_type=$1 + "${KUBECTL}" get "${obj_type}" --all-namespaces -o go-template="{{range.items}}{{.metadata.namespace}}/{{.metadata.name}} {{end}}" --api-version=v1 -l kubernetes.io/cluster-service=true +} + +# returns the characters after the last separator (including) +# If the separator is empty or if it doesn't appear in the string, +# an empty string is printed +# $1 input string +# $2 separator (must be single character, or empty) +function get-suffix() { + # prints to stdout, so log cannot be used + local -r input_string=$1 + local -r separator=$2 + local suffix + + if [[ "${separator}" == "" ]]; then + echo "" + return + fi + + if [[ "${input_string}" == *"${separator}"* ]]; then + suffix=$(echo "${input_string}" | rev | cut -d "${separator}" -f1 | rev) + echo "${separator}${suffix}" + else + echo "" + fi +} + +# returns the characters up to the last '-' (without it) +# $1 input string +# $2 separator +function get-basename() { + # prints to stdout, so log cannot be used + local -r input_string=$1 + local -r separator=$2 + local suffix + suffix="$(get-suffix ${input_string} ${separator})" + # this will strip the suffix (if matches) + echo ${input_string%$suffix} +} + +function delete-object() { + local -r obj_type=$1 + local -r namespace=$2 + local -r obj_name=$3 + log INFO "Deleting ${obj_type} ${namespace}/${obj_name}" + + run-until-success "${KUBECTL} delete --namespace=${namespace} ${obj_type} ${obj_name}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC} +} + +function create-object() { + local -r obj_type=$1 + local -r file_path=$2 + + local nsname_from_file + nsname_from_file=$(get-object-nsname-from-file ${file_path}) + if [[ "${nsname_from_file}" == "ERROR" ]]; then + log INFO "Cannot read object name from ${file_path}. Ignoring" + return 1 + fi + IFS='/' read namespace obj_name <<< "${nsname_from_file}" + + log INFO "Creating new ${obj_type} from file ${file_path} in namespace ${namespace}, name: ${obj_name}" + # this will keep on failing if the ${file_path} disappeared in the meantime. + # Do not use too many retries. + run-until-success "${KUBECTL} create --namespace=${namespace} -f ${file_path}" ${NUM_TRIES} ${DELAY_AFTER_ERROR_SEC} +} + +function update-object() { + local -r obj_type=$1 + local -r namespace=$2 + local -r obj_name=$3 + local -r file_path=$4 + log INFO "updating the ${obj_type} ${namespace}/${obj_name} with the new definition ${file_path}" + delete-object ${obj_type} ${namespace} ${obj_name} + create-object ${obj_type} ${file_path} +} + +# deletes the objects from the server +# $1 object type +# $2 a list of object nsnames +function delete-objects() { + local -r obj_type=$1 + local -r obj_nsnames=$2 + local namespace + local obj_name + for nsname in ${obj_nsnames}; do + IFS='/' read namespace obj_name <<< "${nsname}" + delete-object ${obj_type} ${namespace} ${obj_name} & + done +} + +# creates objects from the given files +# $1 object type +# $2 a list of paths to definition files +function create-objects() { + local -r obj_type=$1 + local -r file_paths=$2 + local file_path + for file_path in ${file_paths}; do + # Remember that the file may have disappear by now + # But we don't want to check it here because + # such race condition may always happen after + # we check it. Let's have the race + # condition happen a bit more often so that + # we see that our tests pass anyway. + create-object ${obj_type} ${file_path} & + done +} + +# updates objects +# $1 object type +# $2 a list of update specifications +# each update specification is a ';' separated pair: ; +function update-objects() { + local -r obj_type=$1 # ignored + local -r update_spec=$2 + local objdesc + local nsname + local obj_name + local namespace + + for objdesc in ${update_spec}; do + IFS=';' read nsname file_path <<< "${objdesc}" + IFS='/' read namespace obj_name <<< "${nsname}" + + update-object ${obj_type} ${namespace} ${obj_name} ${file_path} & + done +} + +# Global variables set by function match-objects. +nsnames_for_delete="" # a list of object nsnames to be deleted +for_update="" # a list of pairs ; for objects that should be updated +nsnames_for_ignore="" # a list of object nsnames that will be ignored +new_files="" # a list of file paths that weren't matched by any existing objects (these objects must be created now) + + +# $1 path to files with objects +# $2 object type in the API (ReplicationController or Service) +# $3 name separator (single character or empty) +function match-objects() { + local -r addon_dir=$1 + local -r obj_type=$2 + local -r separator=$3 + + # output variables (globals) + nsnames_for_delete="" + for_update="" + nsnames_for_ignore="" + new_files="" + + addon_nsnames_on_server=$(get-addon-nsnames-from-server "${obj_type}") + # if the api server is unavailable then abandon the update for this cycle + if [[ $? -ne 0 ]]; then + log ERR "unable to query ${obj_type} - exiting" + exit 1 + fi + + addon_paths_in_files=$(get-addon-paths-from-disk "${addon_dir}" "${obj_type}") + + log DB2 "addon_nsnames_on_server=${addon_nsnames_on_server}" + log DB2 "addon_paths_in_files=${addon_paths_in_files}" + + local matched_files="" + + local basensname_on_server="" + local nsname_on_server="" + local suffix_on_server="" + local nsname_from_file="" + local suffix_from_file="" + local found=0 + local addon_path="" + + # objects that were moved between namespaces will have different nsname + # because the namespace is included. So they will be treated + # like different objects and not updated but deleted and created again + # (in the current version update is also delete+create, so it does not matter) + for nsname_on_server in ${addon_nsnames_on_server}; do + basensname_on_server=$(get-basename ${nsname_on_server} ${separator}) + suffix_on_server="$(get-suffix ${nsname_on_server} ${separator})" + + log DB3 "Found existing addon ${nsname_on_server}, basename=${basensname_on_server}" + + # check if the addon is present in the directory and decide + # what to do with it + # this is not optimal because we're reading the files over and over + # again. But for small number of addons it doesn't matter so much. + found=0 + for addon_path in ${addon_paths_in_files}; do + nsname_from_file=$(get-object-nsname-from-file ${addon_path}) + if [[ "${nsname_from_file}" == "ERROR" ]]; then + log INFO "Cannot read object name from ${addon_path}. Ignoring" + continue + else + log DB2 "Found object name '${nsname_from_file}' in file ${addon_path}" + fi + suffix_from_file="$(get-suffix ${nsname_from_file} ${separator})" + + log DB3 "matching: ${basensname_on_server}${suffix_from_file} == ${nsname_from_file}" + if [[ "${basensname_on_server}${suffix_from_file}" == "${nsname_from_file}" ]]; then + log DB3 "matched existing ${obj_type} ${nsname_on_server} to file ${addon_path}; suffix_on_server=${suffix_on_server}, suffix_from_file=${suffix_from_file}" + found=1 + matched_files="${matched_files} ${addon_path}" + if [[ "${suffix_on_server}" == "${suffix_from_file}" ]]; then + nsnames_for_ignore="${nsnames_for_ignore} ${nsname_from_file}" + else + for_update="${for_update} ${nsname_on_server};${addon_path}" + fi + break + fi + done + if [[ ${found} -eq 0 ]]; then + log DB2 "No definition file found for replication controller ${nsname_on_server}. Scheduling for deletion" + nsnames_for_delete="${nsnames_for_delete} ${nsname_on_server}" + fi + done + + log DB3 "matched_files=${matched_files}" + + + # note that if the addon file is invalid (or got removed after listing files + # but before we managed to match it) it will not be matched to any + # of the existing objects. So we will treat it as a new file + # and try to create its object. + for addon_path in ${addon_paths_in_files}; do + echo ${matched_files} | grep "${addon_path}" >/dev/null + if [[ $? -ne 0 ]]; then + new_files="${new_files} ${addon_path}" + fi + done +} + + + +function reconcile-objects() { + local -r addon_path=$1 + local -r obj_type=$2 + local -r separator=$3 # name separator + match-objects ${addon_path} ${obj_type} ${separator} + + log DBG "${obj_type}: nsnames_for_delete=${nsnames_for_delete}" + log DBG "${obj_type}: for_update=${for_update}" + log DBG "${obj_type}: nsnames_for_ignore=${nsnames_for_ignore}" + log DBG "${obj_type}: new_files=${new_files}" + + delete-objects "${obj_type}" "${nsnames_for_delete}" + # wait for jobs below is a protection against changing the basename + # of a replication controllerm without changing the selector. + # If we don't wait, the new rc may be created before the old one is deleted + # In such case the old one will wait for all its pods to be gone, but the pods + # are created by the new replication controller. + # passing --cascade=false could solve the problem, but we want + # all orphan pods to be deleted. + wait-for-jobs + deleteResult=$? + + create-objects "${obj_type}" "${new_files}" + update-objects "${obj_type}" "${for_update}" + + local nsname + for nsname in ${nsnames_for_ignore}; do + log DB2 "The ${obj_type} ${nsname} is already up to date" + done + + wait-for-jobs + createUpdateResult=$? + + if [[ ${deleteResult} -eq 0 ]] && [[ ${createUpdateResult} -eq 0 ]]; then + return 0 + else + return 1 + fi +} + +function update-addons() { + local -r addon_path=$1 + # be careful, reconcile-objects uses global variables + reconcile-objects ${addon_path} ReplicationController "-" & + reconcile-objects ${addon_path} Deployment "-" & + + # We don't expect names to be versioned for the following kinds, so + # we match the entire name, ignoring version suffix. + # That's why we pass an empty string as the version separator. + # If the description differs on disk, the object should be recreated. + # This is not implemented in this version. + reconcile-objects ${addon_path} Service "" & + reconcile-objects ${addon_path} PersistentVolume "" & + reconcile-objects ${addon_path} PersistentVolumeClaim "" & + + wait-for-jobs + if [[ $? -eq 0 ]]; then + log INFO "== Kubernetes addon update completed successfully at $(date -Is) ==" + else + log WRN "== Kubernetes addon update completed with errors at $(date -Is) ==" + fi +} + +# input parameters: +# $1 input directory +# $2 retry period in seconds - the script will retry api-server errors for approximately +# this amound of time (it is not very precise), at interval equal $DELAY_AFTER_ERROR_SEC. +# + +if [[ $# -ne 2 ]]; then + echo "Illegal number of parameters. Usage $0 addon-dir [retry-period]" 1>&2 + exit 1 +fi + +NUM_TRIES=$(($2 / ${DELAY_AFTER_ERROR_SEC})) +if [[ ${NUM_TRIES} -le 0 ]]; then + NUM_TRIES=1 +fi + +addon_path=$1 +update-addons ${addon_path} diff --git a/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh.meta b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addon-update.sh.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh new file mode 100644 index 0000000000..24db74af53 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh @@ -0,0 +1,125 @@ +#!/bin/bash + +# Copyright 2014 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The business logic for whether a given object should be created +# was already enforced by salt, and /etc/kubernetes/addons is the +# managed result is of that. Start everything below that directory. +KUBECTL=${KUBECTL_BIN:-/usr/local/bin/kubectl} + +ADDON_CHECK_INTERVAL_SEC=${TEST_ADDON_CHECK_INTERVAL_SEC:-600} + +SYSTEM_NAMESPACE=kube-system +trusty_master=${TRUSTY_MASTER:-false} + +function ensure_python() { + if ! python --version > /dev/null 2>&1; then + echo "No python on the machine, will use a python image" + local -r PYTHON_IMAGE=gcr.io/google_containers/python:v1 + export PYTHON="docker run --interactive --rm --net=none ${PYTHON_IMAGE} python" + else + export PYTHON=python + fi +} + +# $1 filename of addon to start. +# $2 count of tries to start the addon. +# $3 delay in seconds between two consecutive tries +# $4 namespace +function start_addon() { + local -r addon_filename=$1; + local -r tries=$2; + local -r delay=$3; + local -r namespace=$4 + + create-resource-from-string "$(cat ${addon_filename})" "${tries}" "${delay}" "${addon_filename}" "${namespace}" +} + +# $1 string with json or yaml. +# $2 count of tries to start the addon. +# $3 delay in seconds between two consecutive tries +# $4 name of this object to use when logging about it. +# $5 namespace for this object +function create-resource-from-string() { + local -r config_string=$1; + local tries=$2; + local -r delay=$3; + local -r config_name=$4; + local -r namespace=$5; + while [ ${tries} -gt 0 ]; do + echo "${config_string}" | ${KUBECTL} --namespace="${namespace}" apply -f - && \ + echo "== Successfully started ${config_name} in namespace ${namespace} at $(date -Is)" && \ + return 0; + let tries=tries-1; + echo "== Failed to start ${config_name} in namespace ${namespace} at $(date -Is). ${tries} tries remaining. ==" + sleep ${delay}; + done + return 1; +} + +# The business logic for whether a given object should be created +# was already enforced by salt, and /etc/kubernetes/addons is the +# managed result is of that. Start everything below that directory. +echo "== Kubernetes addon manager started at $(date -Is) with ADDON_CHECK_INTERVAL_SEC=${ADDON_CHECK_INTERVAL_SEC} ==" + +ensure_python + +# Load the kube-env, which has all the environment variables we care +# about, in a flat yaml format. +kube_env_yaml="/var/cache/kubernetes-install/kube_env.yaml" +if [ ! -e "${kubelet_kubeconfig_file}" ]; then + eval $(${PYTHON} -c ''' +import pipes,sys,yaml + +for k,v in yaml.load(sys.stdin).iteritems(): + print("readonly {var}={value}".format(var = k, value = pipes.quote(str(v)))) +''' < "${kube_env_yaml}") +fi + + +# Create the namespace that will be used to host the cluster-level add-ons. +start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" & + +# Wait for the default service account to be created in the kube-system namespace. +token_found="" +while [ -z "${token_found}" ]; do + sleep .5 + token_found=$(${KUBECTL} get --namespace="${SYSTEM_NAMESPACE}" serviceaccount default -o go-template="{{with index .secrets 0}}{{.name}}{{end}}" || true) +done + +echo "== default service account in the ${SYSTEM_NAMESPACE} namespace has token ${token_found} ==" + +# Create admission_control objects if defined before any other addon services. If the limits +# are defined in a namespace other than default, we should still create the limits for the +# default namespace. +for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \*.json \)); do + start_addon "${obj}" 100 10 default & + echo "++ obj ${obj} is created ++" +done + +# Check if the configuration has changed recently - in case the user +# created/updated/deleted the files on the master. +while true; do + start_sec=$(date +"%s") + #kube-addon-update.sh must be deployed in the same directory as this file + `dirname $0`/kube-addon-update.sh /etc/kubernetes/addons ${ADDON_CHECK_INTERVAL_SEC} + end_sec=$(date +"%s") + len_sec=$((${end_sec}-${start_sec})) + # subtract the time passed from the sleep time + if [[ ${len_sec} -lt ${ADDON_CHECK_INTERVAL_SEC} ]]; then + sleep_time=$((${ADDON_CHECK_INTERVAL_SEC}-${len_sec})) + sleep ${sleep_time} + fi +done diff --git a/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh.meta b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-addons/files/etc/kubernetes/kube-addons.sh.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-addons/services/kube-addons.service b/upup/models/nodeup/_kubernetes_master/kube-addons/services/kube-addons.service new file mode 100644 index 0000000000..293ca10ba7 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-addons/services/kube-addons.service @@ -0,0 +1,9 @@ +[Unit] +Description=Kubernetes Addon Object Manager +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/etc/kubernetes/kube-addons.sh + +[Install] +WantedBy=multi-user.target diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/etc/kubernetes/manifests/kube-apiserver.manifest.template b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/etc/kubernetes/manifests/kube-apiserver.manifest.template new file mode 100644 index 0000000000..5f3b6eb2e0 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/etc/kubernetes/manifests/kube-apiserver.manifest.template @@ -0,0 +1,100 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"kube-apiserver", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "kube-apiserver", + "image": "{{ .APIServer.Image }}", + "resources": { + "requests": { + "cpu": "250m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/kube-apiserver {{ BuildFlags .APIServer }} 1>>/var/log/kube-apiserver.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 8080, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "ports":[ + { "name": "https", + "containerPort": {{ .APIServer.SecurePort }}, + "hostPort": {{ .APIServer.SecurePort }} },{ + "name": "local", + "containerPort": 8080, + "hostPort": 8080} + ], + "volumeMounts": [ + {"name": "usrsharessl","mountPath": "/usr/share/ssl", "readOnly": true}, {"name": "usrssl","mountPath": "/usr/ssl", "readOnly": true}, {"name": "usrlibssl","mountPath": "/usr/lib/ssl", "readOnly": true}, {"name": "usrlocalopenssl","mountPath": "/usr/local/openssl", "readOnly": true}, + + { "name": "srvkube", + "mountPath": "{{ .APIServer.PathSrvKubernetes }}", + "readOnly": true}, + { "name": "logfile", + "mountPath": "/var/log/kube-apiserver.log", + "readOnly": false}, + { "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true}, + { "name": "varssl", + "mountPath": "/var/ssl", + "readOnly": true}, + { "name": "etcopenssl", + "mountPath": "/etc/openssl", + "readOnly": true}, + { "name": "etcpkitls", + "mountPath": "/etc/pki/tls", + "readOnly": true}, + { "name": "srvsshproxy", + "mountPath": "{{ .APIServer.PathSrvSshproxy }}", + "readOnly": false} + ] + } +], +"volumes":[ + {"name": "usrsharessl","hostPath": {"path": "/usr/share/ssl"}}, {"name": "usrssl","hostPath": {"path": "/usr/ssl"}}, {"name": "usrlibssl","hostPath": {"path": "/usr/lib/ssl"}}, {"name": "usrlocalopenssl","hostPath": {"path": "/usr/local/openssl"}}, + + { "name": "srvkube", + "hostPath": { + "path": "{{ .APIServer.PathSrvKubernetes }}"} + }, + { "name": "logfile", + "hostPath": { + "path": "/var/log/kube-apiserver.log"} + }, + { "name": "etcssl", + "hostPath": { + "path": "/etc/ssl"} + }, + { "name": "varssl", + "hostPath": { + "path": "/var/ssl"} + }, + { "name": "etcopenssl", + "hostPath": { + "path": "/etc/openssl"} + }, + { "name": "etcpkitls", + "hostPath": { + "path": "/etc/pki/tls"} + }, + { "name": "srvsshproxy", + "hostPath": { + "path": "{{ .APIServer.PathSrvSshproxy }}"} + } +] +}} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template new file mode 100644 index 0000000000..c1776e01fc --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template @@ -0,0 +1 @@ +{{ .KubePassword }},{{ .KubeUser }},admin diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template.meta b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template.meta new file mode 100644 index 0000000000..26aab40054 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/basic_auth.csv.template.meta @@ -0,0 +1,3 @@ +{ + "mode": "0600" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template new file mode 100644 index 0000000000..20c427c8c2 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template @@ -0,0 +1,3 @@ +{{ range $id, $token := .Tokens }} +{{ $token }},{{ $id }},{{ $id }} +{{ end }} diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template.meta b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template.meta new file mode 100644 index 0000000000..26aab40054 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/srv/kubernetes/known_tokens.csv.template.meta @@ -0,0 +1,3 @@ +{ + "mode": "0600" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log.meta b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log.meta new file mode 100644 index 0000000000..56d0e34103 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/files/var/log/kube-apiserver.log.meta @@ -0,0 +1,3 @@ +{ + "ifNotExists": true +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_aws/kube-apiserver.aws b/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_aws/kube-apiserver.aws new file mode 100644 index 0000000000..dfa9e70039 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_aws/kube-apiserver.aws @@ -0,0 +1,2 @@ +APIServer: + CloudProvider: aws diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_gce/kube-apiserver.gce b/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_gce/kube-apiserver.gce new file mode 100644 index 0000000000..2bc27bcbf1 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/_gce/kube-apiserver.gce @@ -0,0 +1,2 @@ +APIServer: + CloudProvider: gce diff --git a/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/kube-apiserver b/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/kube-apiserver new file mode 100644 index 0000000000..9e3996a2f9 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-apiserver/options/kube-apiserver @@ -0,0 +1,17 @@ +APIServer: + SecurePort: 443 + PathSrvKubernetes: /srv/kubernetes + PathSrvSshproxy: /srv/sshproxy + Image: gcr.io/google_containers/kube-apiserver:v1.2.2 + Address: 127.0.0.1 + EtcdServers: http://127.0.0.1:4001 + EtcdServersOverrides: /events#http://127.0.0.1:4002 + AdmissionControl: NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,PersistentVolumeLabel + ServiceClusterIPRange: 10.0.0.0/16 + ClientCAFile: /srv/kubernetes/ca.crt + BasicAuthFile: /srv/kubernetes/basic_auth.csv + TLSCertFile: /srv/kubernetes/server.cert + TLSPrivateKeyFile: /srv/kubernetes/server.key + TokenAuthFile: /srv/kubernetes/known_tokens.csv + LogLevel: 2 + AllowPrivileged: true diff --git a/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/etc/kubernetes/manifests/kube-controller-manager.template b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/etc/kubernetes/manifests/kube-controller-manager.template new file mode 100644 index 0000000000..b40ac8ccf4 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/etc/kubernetes/manifests/kube-controller-manager.template @@ -0,0 +1,84 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"kube-controller-manager", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "kube-controller-manager", + "image": "{{ .KubeControllerManager.Image }}", + "resources": { + "requests": { + "cpu": "200m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/kube-controller-manager {{ BuildFlags .KubeControllerManager }} 1>>/var/log/kube-controller-manager.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 10252, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "volumeMounts": [ + {"name": "usrsharessl","mountPath": "/usr/share/ssl", "readOnly": true}, {"name": "usrssl","mountPath": "/usr/ssl", "readOnly": true}, {"name": "usrlibssl","mountPath": "/usr/lib/ssl", "readOnly": true}, {"name": "usrlocalopenssl","mountPath": "/usr/local/openssl", "readOnly": true}, + { "name": "srvkube", + "mountPath": "{{ .KubeControllerManager.PathSrvKubernetes }}", + "readOnly": true}, + { "name": "logfile", + "mountPath": "/var/log/kube-controller-manager.log", + "readOnly": false}, + { "name": "etcssl", + "mountPath": "/etc/ssl", + "readOnly": true}, + { "name": "varssl", + "mountPath": "/var/ssl", + "readOnly": true}, + { "name": "etcopenssl", + "mountPath": "/etc/openssl", + "readOnly": true}, + { "name": "etcpkitls", + "mountPath": "/etc/pki/tls", + "readOnly": true} + ] + } +], +"volumes":[ + {"name": "usrsharessl","hostPath": {"path": "/usr/share/ssl"}}, {"name": "usrssl","hostPath": {"path": "/usr/ssl"}}, {"name": "usrlibssl","hostPath": {"path": "/usr/lib/ssl"}}, {"name": "usrlocalopenssl","hostPath": {"path": "/usr/local/openssl"}}, + + { "name": "srvkube", + "hostPath": { + "path": "{{ .KubeControllerManager.PathSrvKubernetes }}"} + }, + { "name": "logfile", + "hostPath": { + "path": "/var/log/kube-controller-manager.log"} + }, + { "name": "etcssl", + "hostPath": { + "path": "/etc/ssl"} + }, + { "name": "varssl", + "hostPath": { + "path": "/var/ssl"} + }, + { "name": "etcopenssl", + "hostPath": { + "path": "/etc/openssl"} + }, + { "name": "etcpkitls", + "hostPath": { + "path": "/etc/pki/tls"} + } +] +}} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log.meta b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log.meta new file mode 100644 index 0000000000..56d0e34103 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/files/var/log/kube-controller-manager.log.meta @@ -0,0 +1,3 @@ +{ + "ifNotExists": true +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_aws/kube-controller-manager.aws b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_aws/kube-controller-manager.aws new file mode 100644 index 0000000000..f718055e2b --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_aws/kube-controller-manager.aws @@ -0,0 +1,2 @@ +KubeControllerManager: + CloudProvider: aws diff --git a/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_gce/kube-controller-manager.gce b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_gce/kube-controller-manager.gce new file mode 100644 index 0000000000..cc74380ac9 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/_gce/kube-controller-manager.gce @@ -0,0 +1,2 @@ +KubeControllerManager: + CloudProvider: gce diff --git a/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/kube-controller-manager b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/kube-controller-manager new file mode 100644 index 0000000000..f9da8f6972 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-controller-manager/options/kube-controller-manager @@ -0,0 +1,10 @@ +KubeControllerManager: + PathSrvKubernetes: /srv/kubernetes + Image: gcr.io/google_containers/kube-controller-manager:v1.2.2 + Master: 127.0.0.1:8080 + ClusterName: kubernetes + ClusterCIDR: 10.244.0.0/16 + AllocateNodeCIDRs: true + ServiceAccountPrivateKeyFile: /srv/kubernetes/server.key + LogLevel: 2 + RootCAFile: /srv/kubernetes/ca.crt \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-rc.yaml.template b/upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-rc.yaml.template new file mode 100644 index 0000000000..615b8a9e39 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-rc.yaml.template @@ -0,0 +1,119 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-dns-v10 + namespace: kube-system + labels: + k8s-app: kube-dns + version: v10 + kubernetes.io/cluster-service: "true" +spec: + replicas: {{ .DNS.Replicas }} + selector: + k8s-app: kube-dns + version: v10 + template: + metadata: + labels: + k8s-app: kube-dns + version: v10 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: etcd + image: gcr.io/google_containers/etcd:2.0.9 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + command: + - /usr/local/bin/etcd + - -data-dir + - /var/etcd/data + - -listen-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -advertise-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -initial-cluster-token + - skydns-etcd + volumeMounts: + - name: etcd-storage + mountPath: /var/etcd/data + - name: kube2sky + image: gcr.io/google_containers/kube2sky:1.12 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + command: + - /kube2sky + args: + - -domain={{ .DNS.Domain }} + - name: skydns + image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + command: + - /skydns + args: + - -machines=http://127.0.0.1:4001 + - -addr=0.0.0.0:53 + - -ns-rotate=false + - -domain={{ .DNS.Domain }}. + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 1 + timeoutSeconds: 5 + - name: healthz + image: gcr.io/google_containers/exechealthz:1.0 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + command: + - /exechealthz + args: + - -cmd=nslookup kubernetes.default.svc.{{ .DNS.Domain }} 127.0.0.1 >/dev/null + - -port=8080 + ports: + - containerPort: 8080 + protocol: TCP + volumes: + - name: etcd-storage + emptyDir: {} + dnsPolicy: Default # Don't use cluster DNS. \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-svc.yaml.template b/upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-svc.yaml.template new file mode 100644 index 0000000000..0f27040d6b --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-dns/files/etc/kubernetes/addons/dns/skydns-svc.yaml.template @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: {{ .DNS.ServerIP }} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/upup/models/nodeup/_kubernetes_master/kube-dns/options/kube-dns b/upup/models/nodeup/_kubernetes_master/kube-dns/options/kube-dns new file mode 100644 index 0000000000..1384f2eab9 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-dns/options/kube-dns @@ -0,0 +1,4 @@ +DNS: + Replicas: 1 + ServerIP: 10.0.0.10 + Domain: cluster.local diff --git a/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/etc/kubernetes/manifests/kube-scheduler.template b/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/etc/kubernetes/manifests/kube-scheduler.template new file mode 100644 index 0000000000..8907f0dfce --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/etc/kubernetes/manifests/kube-scheduler.template @@ -0,0 +1,48 @@ +{ +"apiVersion": "v1", +"kind": "Pod", +"metadata": { + "name":"kube-scheduler", + "namespace": "kube-system" +}, +"spec":{ +"hostNetwork": true, +"containers":[ + { + "name": "kube-scheduler", + "image": "{{ .KubeScheduler.Image }}", + "resources": { + "requests": { + "cpu": "100m" + } + }, + "command": [ + "/bin/sh", + "-c", + "/usr/local/bin/kube-scheduler {{ BuildFlags .KubeScheduler }} 1>>/var/log/kube-scheduler.log 2>&1" + ], + "livenessProbe": { + "httpGet": { + "host": "127.0.0.1", + "port": 10251, + "path": "/healthz" + }, + "initialDelaySeconds": 15, + "timeoutSeconds": 15 + }, + "volumeMounts": [ + { + "name": "logfile", + "mountPath": "/var/log/kube-scheduler.log", + "readOnly": false + } + ] + } +], +"volumes":[ + { "name": "logfile", + "hostPath": { + "path": "/var/log/kube-scheduler.log"} + } +] +}} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log b/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log.meta b/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log.meta new file mode 100644 index 0000000000..56d0e34103 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-scheduler/files/var/log/kube-scheduler.log.meta @@ -0,0 +1,3 @@ +{ + "ifNotExists": true +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_master/kube-scheduler/options/kube-scheduler b/upup/models/nodeup/_kubernetes_master/kube-scheduler/options/kube-scheduler new file mode 100644 index 0000000000..502307d07f --- /dev/null +++ b/upup/models/nodeup/_kubernetes_master/kube-scheduler/options/kube-scheduler @@ -0,0 +1,4 @@ +KubeScheduler: + Image: gcr.io/google_containers/kube-scheduler:v1.2.2 + Master: 127.0.0.1:8080 + LogLevel: 2 diff --git a/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh new file mode 100644 index 0000000000..adfba2f33b --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# loadedImageFlags is a bit-flag to track which docker images loaded successfully. +let loadedImageFlags=0 + +while true; do + restart_docker=false + + if which docker 1>/dev/null 2>&1; then + + timeout 30 docker load -i /srv/salt/kube-bins/kube-proxy.tar 1>/dev/null 2>&1 + rc=$? + if [[ "${rc}" == 0 ]]; then + let loadedImageFlags="${loadedImageFlags}|1" + elif [[ "${rc}" == 124 ]]; then + restart_docker=true + fi + fi + + # required docker images got installed. exit while loop. + if [[ "${loadedImageFlags}" == 1 ]]; then break; fi + + # Sometimes docker load hang, restart docker daemon resolve the issue + if [[ "${restart_docker}" ]]; then service docker restart; fi + + # sleep for 15 seconds before attempting to load docker images again + sleep 15 + +done + +# Now exit. After kube-push, salt will notice that the service is down and it +# will start it and new docker images will be loaded. \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh.meta b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/etc/kubernetes/kube-node-unpacker.sh.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/srv/salt/kube-bins/kube-proxy.tar.asset b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/srv/salt/kube-bins/kube-proxy.tar.asset new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/files/srv/salt/kube-bins/kube-proxy.tar.asset @@ -0,0 +1,2 @@ +{ +} diff --git a/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/services/kube-node-unpacker.service b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/services/kube-node-unpacker.service new file mode 100644 index 0000000000..42641a810d --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/_kube-node-unpacker/services/kube-node-unpacker.service @@ -0,0 +1,9 @@ +[Unit] +Description=Kubernetes Node Unpacker +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +ExecStart=/etc/kubernetes/kube-node-unpacker.sh + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount b/upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount new file mode 100644 index 0000000000..ba3a3c3d77 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount @@ -0,0 +1,147 @@ +#! /bin/bash +# Copyright 2013 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Mount a disk, formatting it if necessary. If the disk looks like it may +# have been formatted before, we will not format it. +# +# This script uses blkid and file to search for magic "formatted" bytes +# at the beginning of the disk. Furthermore, it attempts to use fsck to +# repair the filesystem before formatting it. + +FSCK=fsck.ext4 +MOUNT_OPTIONS="discard,defaults" +MKFS="mkfs.ext4 -E lazy_itable_init=0,lazy_journal_init=0 -F" +if [ -e /etc/redhat-release ]; then + if grep -q '6\..' /etc/redhat-release; then + # lazy_journal_init is not recognized in redhat 6 + MKFS="mkfs.ext4 -E lazy_itable_init=0 -F" + elif grep -q '7\..' /etc/redhat-release; then + FSCK=fsck.xfs + MKFS=mkfs.xfs + fi +fi + +LOGTAG=safe_format_and_mount +LOGFACILITY=user + +function log() { + local readonly severity=$1; shift; + logger -t ${LOGTAG} -p ${LOGFACILITY}.${severity} -s "$@" +} + +function log_command() { + local readonly log_file=$(mktemp) + local readonly retcode + log info "Running: $*" + $* > ${log_file} 2>&1 + retcode=$? + # only return the last 1000 lines of the logfile, just in case it's HUGE. + tail -1000 ${log_file} | logger -t ${LOGTAG} -p ${LOGFACILITY}.info -s + rm -f ${log_file} + return ${retcode} +} + +function help() { + cat >&2 < +EOF + exit 0 +} + +while getopts ":hf:o:m:" opt; do + case $opt in + h) help;; + f) FSCK=$OPTARG;; + o) MOUNT_OPTIONS=$OPTARG;; + m) MKFS=$OPTARG;; + -) break;; + \?) log error "Invalid option: -${OPTARG}"; exit 1;; + :) log "Option -${OPTARG} requires an argument."; exit 1;; + esac +done + +shift $(($OPTIND - 1)) +readonly DISK=$1 +readonly MOUNTPOINT=$2 + +[[ -z ${DISK} ]] && help +[[ -z ${MOUNTPOINT} ]] && help + +function disk_looks_unformatted() { + blkid ${DISK} + if [[ $? == 0 ]]; then + return 0 + fi + + local readonly file_type=$(file --special-files ${DISK}) + case ${file_type} in + *filesystem*) + return 0;; + esac + + return 1 +} + +function format_disk() { + log_command ${MKFS} ${DISK} +} + +function try_repair_disk() { + log_command ${FSCK} -a ${DISK} + local readonly fsck_return=$? + if [[ ${fsck_return} -ge 8 ]]; then + log error "Fsck could not correct errors on ${DISK}" + return 1 + fi + if [[ ${fsck_return} -gt 0 ]]; then + log warning "Fsck corrected errors on ${DISK}" + fi + return 0 +} + +function try_mount() { + local mount_retcode + try_repair_disk + + log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT} + mount_retcode=$? + if [[ ${mount_retcode} == 0 ]]; then + return 0 + fi + + # Check to see if it looks like a filesystem before formatting it. + disk_looks_unformatted ${DISK} + if [[ $? == 0 ]]; then + log error "Disk ${DISK} looks formatted but won't mount. Giving up." + return ${mount_retcode} + fi + + # The disk looks like it's not been formatted before. + format_disk + if [[ $? != 0 ]]; then + log error "Format of ${DISK} failed." + fi + + log_command mount -o ${MOUNT_OPTIONS} ${DISK} ${MOUNTPOINT} + mount_retcode=$? + if [[ ${mount_retcode} == 0 ]]; then + return 0 + fi + log error "Tried everything we could, but could not mount ${DISK}." + return ${mount_retcode} +} + +try_mount +exit $? \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount.meta b/upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/helpers/_aws/files/usr/share/google/safe_format_and_mount.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/etc/kubernetes/manifests/kube-proxy.manifest.template b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/etc/kubernetes/manifests/kube-proxy.manifest.template new file mode 100644 index 0000000000..0fd15f2e2b --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/etc/kubernetes/manifests/kube-proxy.manifest.template @@ -0,0 +1,40 @@ +# kube-proxy podspec +apiVersion: v1 +kind: Pod +metadata: + name: kube-proxy + namespace: kube-system +spec: + hostNetwork: true + containers: + - name: kube-proxy + image: {{ .KubeProxy.Image }} + resources: + requests: + cpu: {{ .KubeProxy.CPURequest }} + command: + - /bin/sh + - -c + - kube-proxy --kubeconfig=/var/lib/kube-proxy/kubeconfig --resource-container="" {{ BuildFlags .KubeProxy }} 1>>/var/log/kube-proxy.log 2>&1 + securityContext: + privileged: true + volumeMounts: + - mountPath: /etc/ssl/certs + name: ssl-certs-host + readOnly: true + - mountPath: /var/log + name: varlog + readOnly: false + - mountPath: /var/lib/kube-proxy/kubeconfig + name: kubeconfig + readOnly: false + volumes: + - hostPath: + path: /usr/share/ca-certificates + name: ssl-certs-host + - hostPath: + path: /var/lib/kube-proxy/kubeconfig + name: kubeconfig + - hostPath: + path: /var/log + name: varlog diff --git a/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template new file mode 100644 index 0000000000..dcaee723cd --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Config +users: +- name: kube-proxy + user: + token: {{ .GetToken "kube-proxy" }} +clusters: +- name: local + cluster: + certificate-authority-data: {{ Base64Encode .CACertificate.AsString }} +contexts: +- context: + cluster: local + user: kube-proxy + name: service-account-context +current-context: service-account-context \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template.meta b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template.meta new file mode 100644 index 0000000000..a617cf1f45 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/lib/kube-proxy/kubeconfig.template.meta @@ -0,0 +1,3 @@ +{ + "mode": "0400" +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log.meta b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log.meta new file mode 100644 index 0000000000..56d0e34103 --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/kube-proxy/files/var/log/kube-proxy.log.meta @@ -0,0 +1,3 @@ +{ + "ifNotExists": true +} \ No newline at end of file diff --git a/upup/models/nodeup/_kubernetes_pool/kube-proxy/options/kube-proxy b/upup/models/nodeup/_kubernetes_pool/kube-proxy/options/kube-proxy new file mode 100644 index 0000000000..6f01a570bb --- /dev/null +++ b/upup/models/nodeup/_kubernetes_pool/kube-proxy/options/kube-proxy @@ -0,0 +1,10 @@ +KubeProxy: + Image: gcr.io/google_containers/kube-proxy:v1.2.2 + Master: https://kubernetes-master + LogLevel: 2 + # 20m might cause kube-proxy CPU starvation on full nodes, resulting in + # delayed service updates. But, giving it more would be a breaking change + # to the overhead requirements for existing clusters. + # Any change here should be accompanied by a proportional change in CPU + # requests of other per-node add-ons (e.g. fluentd). + CPURequest: 20m \ No newline at end of file diff --git a/upup/models/nodeup/auto-upgrades/_debian_family/files/etc/apt/apt.conf.d/20auto-upgrades b/upup/models/nodeup/auto-upgrades/_debian_family/files/etc/apt/apt.conf.d/20auto-upgrades new file mode 100644 index 0000000000..2bb25d7053 --- /dev/null +++ b/upup/models/nodeup/auto-upgrades/_debian_family/files/etc/apt/apt.conf.d/20auto-upgrades @@ -0,0 +1,4 @@ +APT::Periodic::Update-Package-Lists "1"; +APT::Periodic::Unattended-Upgrade "1"; + +APT::Periodic::AutocleanInterval "7"; diff --git a/upup/models/nodeup/auto-upgrades/_debian_family/packages/unattended-upgrades b/upup/models/nodeup/auto-upgrades/_debian_family/packages/unattended-upgrades new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf b/upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf new file mode 100644 index 0000000000..d801c31a48 --- /dev/null +++ b/upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf @@ -0,0 +1,2 @@ +# Kubernetes +net.ipv4.ip_forward=1 diff --git a/upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf.meta b/upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf.meta new file mode 100644 index 0000000000..7301e8af13 --- /dev/null +++ b/upup/models/nodeup/docker/_gce/files/etc/sysctl.d/99-ip_forward.conf.meta @@ -0,0 +1,3 @@ +{ + "onChangeExecute": [ "sysctl", "--system" ] +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/files/etc/sysconfig/docker.template b/upup/models/nodeup/docker/_systemd/files/etc/sysconfig/docker.template new file mode 100644 index 0000000000..662fc7ede5 --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/files/etc/sysconfig/docker.template @@ -0,0 +1,2 @@ +DOCKER_OPTS="{{ BuildFlags .Docker }}" +DOCKER_NOFILE=1000000 diff --git a/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck new file mode 100644 index 0000000000..5391fc430b --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck @@ -0,0 +1,45 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is intended to be run periodically, to check the health +# of docker. If it detects a failure, it will restart docker using systemctl. + +if timeout 10 docker version > /dev/null; then + echo "docker healthy" + exit 0 +fi + +echo "docker failed" +echo "Giving docker 30 seconds grace before restarting" +sleep 30 + +if timeout 10 docker version > /dev/null; then + echo "docker recovered" + exit 0 +fi + +echo "docker still down; triggering docker restart" +systemctl restart docker + +echo "Waiting 60 seconds to give docker time to start" +sleep 60 + +if timeout 10 docker version > /dev/null; then + echo "docker recovered" + exit 0 +fi + +echo "docker still failing" \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck.meta b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-healthcheck.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart new file mode 100644 index 0000000000..97a31ce2fb --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script is intended to be run before we start Docker. + +# cleanup docker network checkpoint to avoid running into known issue +# of docker (https://github.com/docker/docker/issues/18283) +rm -rf /var/lib/docker/network diff --git a/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart.meta b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/files/opt/kubernetes/helpers/docker-prestart.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service b/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service new file mode 100644 index 0000000000..1fb10cacd1 --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service @@ -0,0 +1,9 @@ +[Unit] +Description=Run docker-healthcheck once + +[Service] +Type=oneshot +ExecStart=/opt/kubernetes/helpers/docker-healthcheck + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service.meta b/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service.meta new file mode 100644 index 0000000000..6c615f549a --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.service.meta @@ -0,0 +1,3 @@ +{ + "manageState": false +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.timer b/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.timer new file mode 100644 index 0000000000..9697f9a9d5 --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/services/docker-healthcheck.timer @@ -0,0 +1,9 @@ +[Unit] +Description=Trigger docker-healthcheck periodically + +[Timer] +OnUnitInactiveSec=10s +Unit=docker-healthcheck.service + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/services/docker.service b/upup/models/nodeup/docker/_systemd/services/docker.service new file mode 100644 index 0000000000..197fd15396 --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/services/docker.service @@ -0,0 +1,21 @@ +[Unit] +Description=Docker Application Container Engine +Documentation=https://docs.docker.com +After=network.target docker.socket +Requires=docker.socket + +[Service] +Type=notify +EnvironmentFile=/etc/sysconfig/docker +ExecStart=/usr/bin/docker daemon -H fd:// "$DOCKER_OPTS" +MountFlags=slave +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +Restart=always +RestartSec=2s +StartLimitInterval=0 +ExecStartPre=/opt/kubernetes/helpers/docker-prestart + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/upup/models/nodeup/docker/_systemd/services/docker.service.meta b/upup/models/nodeup/docker/_systemd/services/docker.service.meta new file mode 100644 index 0000000000..6c615f549a --- /dev/null +++ b/upup/models/nodeup/docker/_systemd/services/docker.service.meta @@ -0,0 +1,3 @@ +{ + "manageState": false +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/files/usr/share/doc/docker/apache.txt b/upup/models/nodeup/docker/files/usr/share/doc/docker/apache.txt new file mode 100644 index 0000000000..d7af995ffe --- /dev/null +++ b/upup/models/nodeup/docker/files/usr/share/doc/docker/apache.txt @@ -0,0 +1,4 @@ +{ + "source": "https://storage.googleapis.com/kubernetes-release/docker/apache2.txt", + "hash": "2b8b815229aa8a61e483fb4ba0588b8b6c491890" +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/options/_e2e_storage_test_environment/e2e.options b/upup/models/nodeup/docker/options/_e2e_storage_test_environment/e2e.options new file mode 100644 index 0000000000..51325680f2 --- /dev/null +++ b/upup/models/nodeup/docker/options/_e2e_storage_test_environment/e2e.options @@ -0,0 +1,2 @@ +Docker: + Storage: devicemapper \ No newline at end of file diff --git a/upup/models/nodeup/docker/options/_kubenet/kubenet.options b/upup/models/nodeup/docker/options/_kubenet/kubenet.options new file mode 100644 index 0000000000..f6b5beef32 --- /dev/null +++ b/upup/models/nodeup/docker/options/_kubenet/kubenet.options @@ -0,0 +1,5 @@ +{% set log_level = "--log-level=warn" -%} +{% if pillar['docker_test_log_level'] is defined -%} + {% set log_level = pillar['docker_test_log_level'] -%} +{% endif -%} +docker.bridge= \ No newline at end of file diff --git a/upup/models/nodeup/docker/options/default b/upup/models/nodeup/docker/options/default new file mode 100644 index 0000000000..223605cf4e --- /dev/null +++ b/upup/models/nodeup/docker/options/default @@ -0,0 +1,5 @@ +Docker: + Bridge: cbr0 + LogLevel: warn + IPTables: false + IPMasq: false diff --git a/upup/models/nodeup/docker/packages/_jessie/docker-engine b/upup/models/nodeup/docker/packages/_jessie/docker-engine new file mode 100644 index 0000000000..6664f249e7 --- /dev/null +++ b/upup/models/nodeup/docker/packages/_jessie/docker-engine @@ -0,0 +1,7 @@ +{ + "version": "1.9.1-0~jessie", + "source": "http://apt.dockerproject.org/repo/pool/main/d/docker-engine/docker-engine_1.9.1-0~jessie_amd64.deb", + "hash": "c58c39008fd6399177f6b2491222e4438f518d78", + + "preventStart": true +} \ No newline at end of file diff --git a/upup/models/nodeup/docker/packages/bridge-utils b/upup/models/nodeup/docker/packages/bridge-utils new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/docker/packages/libapparmor1 b/upup/models/nodeup/docker/packages/libapparmor1 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/docker/packages/perl b/upup/models/nodeup/docker/packages/perl new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset b/upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset new file mode 100644 index 0000000000..7a73a41bfd --- /dev/null +++ b/upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset @@ -0,0 +1,2 @@ +{ +} \ No newline at end of file diff --git a/upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset.meta b/upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/kube-client-tools/files/usr/local/bin/kubectl.asset.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/kubelet/files/etc/sysconfig/kubelet.template b/upup/models/nodeup/kubelet/files/etc/sysconfig/kubelet.template new file mode 100644 index 0000000000..0e740af1f6 --- /dev/null +++ b/upup/models/nodeup/kubelet/files/etc/sysconfig/kubelet.template @@ -0,0 +1 @@ +DAEMON_ARGS="{{ BuildFlags .Kubelet }}" diff --git a/upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset b/upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset new file mode 100644 index 0000000000..2c63c08510 --- /dev/null +++ b/upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset @@ -0,0 +1,2 @@ +{ +} diff --git a/upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset.meta b/upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset.meta new file mode 100644 index 0000000000..5d42912612 --- /dev/null +++ b/upup/models/nodeup/kubelet/files/usr/local/bin/kubelet.asset.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} diff --git a/upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template b/upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template new file mode 100644 index 0000000000..0663831772 --- /dev/null +++ b/upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Config +users: +- name: kubelet + user: + client-certificate-data: {{ Base64Encode .Kubelet.Certificate.AsString }} + client-key-data: {{ Base64Encode .Kubelet.Key.AsString }} +clusters: +- name: local + cluster: + certificate-authority-data: {{ Base64Encode (or .Kubelet.CACertificate .CACertificate).AsString }} +contexts: +- context: + cluster: local + user: kubelet + name: service-account-context +current-context: service-account-context diff --git a/upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template.meta b/upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template.meta new file mode 100644 index 0000000000..a617cf1f45 --- /dev/null +++ b/upup/models/nodeup/kubelet/files/var/lib/kubelet/kubeconfig.template.meta @@ -0,0 +1,3 @@ +{ + "mode": "0400" +} \ No newline at end of file diff --git a/upup/models/nodeup/kubelet/options/_aws/kubelet.aws b/upup/models/nodeup/kubelet/options/_aws/kubelet.aws new file mode 100644 index 0000000000..c34a76a76f --- /dev/null +++ b/upup/models/nodeup/kubelet/options/_aws/kubelet.aws @@ -0,0 +1,5 @@ +Kubelet: + CloudProvider: aws + CgroupRoot: docker + NonMasqueradeCdir: 10.0.0.0/8 + APIServers: https://172.20.0.9 diff --git a/upup/models/nodeup/kubelet/options/_gce/kubelet.gce b/upup/models/nodeup/kubelet/options/_gce/kubelet.gce new file mode 100644 index 0000000000..9f8996f787 --- /dev/null +++ b/upup/models/nodeup/kubelet/options/_gce/kubelet.gce @@ -0,0 +1,8 @@ +Kubelet: + CloudProvider: gce + APIServers: https://kubernetes-master + HairpinMode: promiscuous-bridge + RuntimeCgroups: /docker-daemon + KubeletCgroups: /kubelet + SystemCgroups: /system + CgroupRoot: / diff --git a/upup/models/nodeup/kubelet/options/_kubernetes_master/kubelet.kubernetes_master b/upup/models/nodeup/kubelet/options/_kubernetes_master/kubelet.kubernetes_master new file mode 100644 index 0000000000..a37eaf463f --- /dev/null +++ b/upup/models/nodeup/kubelet/options/_kubernetes_master/kubelet.kubernetes_master @@ -0,0 +1,7 @@ +Kubelet: + RegisterSchedulable: false + ReconcileCIDR: false + EnableDebugginHandlers: false + HairpinMode: none + PodCIDR: 10.123.45.0/30 + diff --git a/upup/models/nodeup/kubelet/options/kubelet b/upup/models/nodeup/kubelet/options/kubelet new file mode 100644 index 0000000000..627af280d3 --- /dev/null +++ b/upup/models/nodeup/kubelet/options/kubelet @@ -0,0 +1,10 @@ +Kubelet: + EnableDebuggingHandlers: true + Config: /etc/kubernetes/manifests + AllowPrivileged: true + Verbosity: 2 + ClusterDNS: 10.0.0.10 + ClusterDomain: cluster.local + ConfigureCBR0: true + BabysitDaemons: true + diff --git a/upup/models/nodeup/kubelet/services/kubelet.service b/upup/models/nodeup/kubelet/services/kubelet.service new file mode 100644 index 0000000000..b266e03fb5 --- /dev/null +++ b/upup/models/nodeup/kubelet/services/kubelet.service @@ -0,0 +1,11 @@ +[Unit] +Description=Kubernetes Kubelet Server +Documentation=https://github.com/kubernetes/kubernetes + +[Service] +EnvironmentFile=/etc/sysconfig/kubelet +ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS" +Restart=always + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate b/upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate new file mode 100644 index 0000000000..67f96fe969 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate @@ -0,0 +1,2 @@ +#!/bin/sh +logrotate /etc/logrotate.conf \ No newline at end of file diff --git a/upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate.meta b/upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate.meta new file mode 100644 index 0000000000..aac9c2ebd8 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/cron.hourly/logrotate.meta @@ -0,0 +1,3 @@ +{ + "mode": "0755" +} \ No newline at end of file diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/docker b/upup/models/nodeup/logrotate/files/etc/logrotate.d/docker new file mode 100644 index 0000000000..c2d1955c9e --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/docker @@ -0,0 +1,10 @@ +/var/log/docker.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/docker-containers b/upup/models/nodeup/logrotate/files/etc/logrotate.d/docker-containers new file mode 100644 index 0000000000..c8f221ffcf --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/docker-containers @@ -0,0 +1,10 @@ +/var/lib/docker/containers/*/*-json.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 10M + daily + create 0644 root root +} \ No newline at end of file diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-addons b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-addons new file mode 100644 index 0000000000..111f650078 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-addons @@ -0,0 +1,10 @@ +/var/log/kube-addons.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-apiserver b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-apiserver new file mode 100644 index 0000000000..0e949d6dac --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-apiserver @@ -0,0 +1,10 @@ +/var/log/kube-apiserver.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-controller-manager b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-controller-manager new file mode 100644 index 0000000000..1041054114 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-controller-manager @@ -0,0 +1,10 @@ +/var/log/kube-controller-manager.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-proxy b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-proxy new file mode 100644 index 0000000000..d195834bc4 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-proxy @@ -0,0 +1,10 @@ +/var/log/kube-proxy.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-scheduler b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-scheduler new file mode 100644 index 0000000000..50b6e5f069 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kube-scheduler @@ -0,0 +1,10 @@ +/var/log/kube-scheduler.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/files/etc/logrotate.d/kubelet b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kubelet new file mode 100644 index 0000000000..825c164161 --- /dev/null +++ b/upup/models/nodeup/logrotate/files/etc/logrotate.d/kubelet @@ -0,0 +1,10 @@ +/var/log/kubelet.log { + rotate 5 + copytruncate + missingok + notifempty + compress + maxsize 100M + daily + create 0644 root root +} diff --git a/upup/models/nodeup/logrotate/packages/logrotate b/upup/models/nodeup/logrotate/packages/logrotate new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/ntp/_aws/packages/ntp b/upup/models/nodeup/ntp/_aws/packages/ntp new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/ntp/_aws/services/ntp b/upup/models/nodeup/ntp/_aws/services/ntp new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_debian_family/packages/apt-transport-https b/upup/models/nodeup/top/_debian_family/packages/apt-transport-https new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_debian_family/packages/nfs-common b/upup/models/nodeup/top/_debian_family/packages/nfs-common new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_debian_family/packages/python-apt b/upup/models/nodeup/top/_debian_family/packages/python-apt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_debian_family/packages/socat b/upup/models/nodeup/top/_debian_family/packages/socat new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_redhat_family/packages/git b/upup/models/nodeup/top/_redhat_family/packages/git new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_redhat_family/packages/python b/upup/models/nodeup/top/_redhat_family/packages/python new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_ubuntu/packages/git b/upup/models/nodeup/top/_ubuntu/packages/git new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/_ubuntu/packages/netcat-traditional b/upup/models/nodeup/top/_ubuntu/packages/netcat-traditional new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/models/nodeup/top/files/usr/local/share/doc/kubernetes/LICENSES b/upup/models/nodeup/top/files/usr/local/share/doc/kubernetes/LICENSES new file mode 100644 index 0000000000..6db8ff074b --- /dev/null +++ b/upup/models/nodeup/top/files/usr/local/share/doc/kubernetes/LICENSES @@ -0,0 +1 @@ +TODO - where is this sourced from? \ No newline at end of file diff --git a/upup/models/nodeup/top/packages/curl b/upup/models/nodeup/top/packages/curl new file mode 100644 index 0000000000..e69de29bb2 diff --git a/upup/pkg/fi/assetstore.go b/upup/pkg/fi/assetstore.go new file mode 100644 index 0000000000..6798028b9a --- /dev/null +++ b/upup/pkg/fi/assetstore.go @@ -0,0 +1,283 @@ +package fi + +import ( + "fmt" + "github.com/golang/glog" + "io" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "strings" +) + +type asset struct { + Key string + AssetPath string + resource Resource + source *Source +} + +type Source struct { + Parent *Source + URL string + Hash string + ExtractFromArchive string +} + +// Builds a unique key for this source +func (s *Source) Key() string { + var k string + if s.Parent != nil { + k = s.Parent.Key() + "/" + } + if s.URL != "" { + k += s.URL + } else if s.ExtractFromArchive != "" { + k += s.ExtractFromArchive + } else { + glog.Fatalf("expected either URL or ExtractFromArchive to be set") + } + return k +} + +func (s *Source) String() string { + return "Source[" + s.Key() + "]" +} + +type HasSource interface { + GetSource() *Source +} + +// assetResource implements Resource, but also implements HasFetchInstructions +type assetResource struct { + asset *asset +} + +var _ Resource = &assetResource{} +var _ HasSource = &assetResource{} + +func (r *assetResource) Open() (io.ReadSeeker, error) { + return r.asset.resource.Open() +} + +func (r *assetResource) GetSource() *Source { + return r.asset.source +} + +type AssetStore struct { + assetDir string + assets []*asset +} + +func NewAssetStore(assetDir string) *AssetStore { + a := &AssetStore{ + assetDir: assetDir, + } + return a +} +func (a *AssetStore) Find(key string, assetPath string) (Resource, error) { + var matches []*asset + for _, asset := range a.assets { + if asset.Key != key { + continue + } + + if assetPath != "" { + if !strings.HasSuffix(asset.AssetPath, assetPath) { + continue + } + } + + matches = append(matches, asset) + } + + if len(matches) == 0 { + return nil, nil + } + if len(matches) == 1 { + glog.Infof("Resolved asset %s:%s to %s", key, assetPath, matches[0].AssetPath) + return &assetResource{asset: matches[0]}, nil + } + + glog.Infof("Matching assets:") + for _, match := range matches { + glog.Infof(" %s %s", match.Key, match.AssetPath) + } + return nil, fmt.Errorf("found multiple matching assets for key: %q", key) +} + +func hashFromHttpHeader(url string) (string, error) { + glog.Infof("Doing HTTP HEAD on %q", url) + response, err := http.Head(url) + if err != nil { + return "", fmt.Errorf("error doing HEAD on %q: %v", url, err) + } + defer response.Body.Close() + + etag := response.Header.Get("ETag") + etag = strings.TrimSpace(etag) + etag = strings.Trim(etag, "'\"") + + if etag != "" { + if len(etag) == 32 { + // Likely md5 + return etag, nil + } + } + + return "", fmt.Errorf("unable to determine hash from HTTP HEAD: %q", url) +} + +func (a *AssetStore) Add(id string) error { + if strings.HasSuffix(id, "http://") || strings.HasPrefix(id, "https://") { + return a.addURL(id, "") + } + // TODO: local files! + return fmt.Errorf("unknown asset format: %q", id) +} + +func (a *AssetStore) addURL(url string, hash string) error { + var err error + + if hash == "" { + hash, err = hashFromHttpHeader(url) + if err != nil { + return err + } + } + + localFile := path.Join(a.assetDir, hash+"_"+utils.SanitizeString(url)) + _, err = DownloadURL(url, localFile, hash) + if err != nil { + return err + } + + key := path.Base(url) + assetPath := url + r := NewFileResource(localFile) + + source := &Source{URL: url, Hash: hash} + + asset := &asset{ + Key: key, + AssetPath: assetPath, + resource: r, + source: source, + } + glog.V(2).Infof("added asset %q for %q", asset.Key, asset.resource) + a.assets = append(a.assets, asset) + + if strings.HasSuffix(assetPath, ".tar.gz") { + err = a.addArchive(source, localFile) + if err != nil { + return err + } + } + + return nil +} + +//func (a *AssetStore) addFile(assetPath string, p string) error { +// r := NewFileResource(p) +// return a.addResource(assetPath, r) +//} + +//func (a *AssetStore) addResource(assetPath string, r Resource) error { +// hash, err := HashForResource(r, HashAlgorithmSHA256) +// if err != nil { +// return err +// } +// +// localFile := path.Join(a.assetDir, hash + "_" + utils.SanitizeString(assetPath)) +// hasHash, err := fileHasHash(localFile, hash) +// if err != nil { +// return err +// } +// +// if !hasHash { +// err = WriteFile(localFile, r, 0644, 0755) +// if err != nil { +// return err +// } +// } +// +// asset := &asset{ +// Key: localFile, +// AssetPath: assetPath, +// resource: r, +// } +// glog.V(2).Infof("added asset %q for %q", asset.Key, asset.resource) +// a.assets = append(a.assets, asset) +// +// if strings.HasSuffix(assetPath, ".tar.gz") { +// err = a.addArchive(localFile) +// if err != nil { +// return err +// } +// } +// +// return nil +//} + +func (a *AssetStore) addArchive(archiveSource *Source, archiveFile string) error { + extracted := path.Join(a.assetDir, "extracted/"+path.Base(archiveFile)) + + // TODO: Use a temp file so this is atomic + if _, err := os.Stat(extracted); os.IsNotExist(err) { + err := os.MkdirAll(extracted, 0755) + if err != nil { + return fmt.Errorf("error creating directories %q: %v", path.Dir(extracted), err) + } + + args := []string{"tar", "zxf", archiveFile, "-C", extracted} + glog.Infof("running extract command %s", args) + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error expanding asset file %q %v: %s", archiveFile, err, string(output)) + } + } + + localBase := extracted + assetBase := "" + + walker := func(localPath string, info os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("error descending into path %q: %v", localPath, err) + } + + if info.IsDir() { + return nil + } + + relativePath, err := filepath.Rel(localBase, localPath) + if err != nil { + return fmt.Errorf("error finding relative path for %q: %v", localPath, err) + } + + assetPath := path.Join(assetBase, relativePath) + key := info.Name() + r := NewFileResource(localPath) + + asset := &asset{ + Key: key, + AssetPath: assetPath, + resource: r, + source: &Source{Parent: archiveSource, ExtractFromArchive: assetPath}, + } + glog.V(2).Infof("added asset %q for %q", asset.Key, asset.resource) + a.assets = append(a.assets, asset) + + return nil + } + + err := filepath.Walk(localBase, walker) + if err != nil { + return fmt.Errorf("error adding expanded asset files in %q: %v", extracted, err) + } + return nil + +} diff --git a/upup/pkg/fi/ca.go b/upup/pkg/fi/ca.go new file mode 100644 index 0000000000..dc6c20c517 --- /dev/null +++ b/upup/pkg/fi/ca.go @@ -0,0 +1,238 @@ +package fi + +import ( + "bytes" + "crypto" + crypto_rand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/base64" + "encoding/json" + "encoding/pem" + "fmt" + "github.com/golang/glog" + "io" + "math/big" + "time" +) + +type Certificate struct { + Subject pkix.Name + IsCA bool + + Certificate *x509.Certificate + PublicKey crypto.PublicKey +} + +func (c *Certificate) UnmarshalJSON(b []byte) (err error) { + s := "" + if err = json.Unmarshal(b, &s); err == nil { + d, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("error decoding certificate base64 data: %q", string(b)) + } + r, err := LoadPEMCertificate(d) + if err != nil { + glog.Infof("Invalid certificate data: %q", string(b)) + return fmt.Errorf("error parsing certificate: %v", err) + } + *c = *r + return nil + } + return fmt.Errorf("unknown format for Certificate: %q", string(b)) +} + +type CAStore interface { + Cert(id string) (*Certificate, error) + PrivateKey(id string) (*PrivateKey, error) + + FindCert(id string) (*Certificate, error) + FindPrivateKey(id string) (*PrivateKey, error) + + IssueCert(id string, privateKey *PrivateKey, template *x509.Certificate) (*Certificate, error) + CreatePrivateKey(id string) (*PrivateKey, error) +} + +func (c *Certificate) AsString() (string, error) { + // Nicer behaviour because this is called from templates + if c == nil { + return "", fmt.Errorf("AsString called on nil Certificate") + } + + var data bytes.Buffer + err := c.WriteCertificate(&data) + if err != nil { + return "", fmt.Errorf("error writing SSL certificate: %v", err) + } + return data.String(), nil +} + +type PrivateKey struct { + Key crypto.PrivateKey +} + +func (c *PrivateKey) AsString() (string, error) { + // Nicer behaviour because this is called from templates + if c == nil { + return "", fmt.Errorf("AsString called on nil Certificate") + } + + var data bytes.Buffer + err := WritePrivateKey(c.Key, &data) + if err != nil { + return "", fmt.Errorf("error writing SSL private key: %v", err) + } + return data.String(), nil +} + +func (c *PrivateKey) UnmarshalJSON(b []byte) (err error) { + s := "" + if err = json.Unmarshal(b, &s); err == nil { + d, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("error decoding private key base64 data: %q", string(b)) + } + k, err := parsePEMPrivateKey(d) + if err != nil { + return fmt.Errorf("error parsing private key: %v", err) + } + c.Key = k + return nil + } + return fmt.Errorf("unknown format for private key: %q", string(b)) +} + +func LoadPEMCertificate(pemData []byte) (*Certificate, error) { + cert, err := parsePEMCertificate(pemData) + if err != nil { + return nil, err + } + + c := &Certificate{ + Subject: cert.Subject, + Certificate: cert, + PublicKey: cert.PublicKey, + IsCA: cert.IsCA, + } + return c, nil +} + +func SignNewCertificate(privateKey *PrivateKey, template *x509.Certificate, signer *x509.Certificate, signerPrivateKey *PrivateKey) (*Certificate, error) { + if template.PublicKey == nil { + rsaPrivateKey, ok := privateKey.Key.(*rsa.PrivateKey) + if ok { + template.PublicKey = rsaPrivateKey.Public() + } + } + + if template.PublicKey == nil { + return nil, fmt.Errorf("PublicKey not set, and cannot be determined from %T", privateKey) + } + + now := time.Now() + if template.NotBefore.IsZero() { + template.NotBefore = now.Add(time.Hour * -48) + } + + if template.NotAfter.IsZero() { + template.NotAfter = now.Add(time.Hour * 10 * 365 * 24) + } + + if template.SerialNumber == nil { + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := crypto_rand.Int(crypto_rand.Reader, serialNumberLimit) + if err != nil { + return nil, fmt.Errorf("error generating certificate serial number: %s", err) + } + template.SerialNumber = serialNumber + } + var parent *x509.Certificate + if signer != nil { + parent = signer + } else { + parent = template + signerPrivateKey = privateKey + } + + if template.KeyUsage == 0 { + template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment + } + + if template.ExtKeyUsage == nil { + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + } + //c.SignatureAlgorithm = do we want to overrride? + + certificateData, err := x509.CreateCertificate(crypto_rand.Reader, template, parent, template.PublicKey, signerPrivateKey.Key) + if err != nil { + return nil, fmt.Errorf("error creating certificate: %v", err) + } + + c := &Certificate{} + c.PublicKey = template.PublicKey + + cert, err := x509.ParseCertificate(certificateData) + if err != nil { + return nil, fmt.Errorf("error parsing certificate: %v", err) + } + c.Certificate = cert + + return c, nil +} + +func (c *Certificate) WriteCertificate(w io.Writer) error { + return pem.Encode(w, &pem.Block{Type: "CERTIFICATE", Bytes: c.Certificate.Raw}) +} + +func parsePEMCertificate(pemData []byte) (*x509.Certificate, error) { + for { + block, rest := pem.Decode(pemData) + if block == nil { + return nil, fmt.Errorf("could not parse certificate") + } + + if block.Type == "CERTIFICATE" { + glog.V(8).Infof("Parsing pem block: %q", block.Type) + return x509.ParseCertificate(block.Bytes) + } else { + glog.Infof("Ignoring unexpected PEM block: %q", block.Type) + } + + pemData = rest + } +} + +func WritePrivateKey(privateKey crypto.PrivateKey, w io.Writer) error { + rsaPrivateKey, ok := privateKey.(*rsa.PrivateKey) + if ok { + return pem.Encode(w, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rsaPrivateKey)}) + } + + return fmt.Errorf("unknown private key type: %T", privateKey) +} + +func parsePEMPrivateKey(pemData []byte) (crypto.PrivateKey, error) { + for { + block, rest := pem.Decode(pemData) + if block == nil { + return nil, fmt.Errorf("could not parse private key") + } + + if block.Type == "RSA PRIVATE KEY" { + glog.V(8).Infof("Parsing pem block: %q", block.Type) + return x509.ParsePKCS1PrivateKey(block.Bytes) + } else if block.Type == "PRIVATE KEY" { + glog.V(8).Infof("Parsing pem block: %q", block.Type) + k, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return k.(crypto.PrivateKey), nil + } else { + glog.Infof("Ignoring unexpected PEM block: %q", block.Type) + } + + pemData = rest + } +} diff --git a/upup/pkg/fi/changes.go b/upup/pkg/fi/changes.go new file mode 100644 index 0000000000..f5a2c7c62d --- /dev/null +++ b/upup/pkg/fi/changes.go @@ -0,0 +1,133 @@ +package fi + +import ( + "github.com/golang/glog" + "reflect" +) + +// An important part of our state synchronization is to compare two tasks, to see what has changed +// Doing so means that the tasks don't have to have this logic, and we can reuse this for dry-run. +// We do so using reflection. We have a custom notion of equality for Resources and for Tasks that implement HasID. +// A task that implements CompareWithID is compared by ID alone. + +// BuildChanges compares the values of a & e, and populates differences into changes, +// except that if a value is nil in e, the corresponding value in a is ignored. +// a, e and changes must all be of the same type +// a is the actual object found, e is the expected value +// Note that the ignore-nil-in-e logic therefore implements the idea that nil value in e means "don't care" +// If a is nil, all the non-nil values in e will be copied over to changes, because every field in e must be applied +func BuildChanges(a, e, changes interface{}) bool { + changed := false + + vc := reflect.ValueOf(changes) + vc = vc.Elem() + t := vc.Type() + + ve := reflect.ValueOf(e) + ve = ve.Elem() + if t != ve.Type() { + panic("mismatched types in BuildChanges") + } + + va := reflect.ValueOf(a) + aIsNil := false + if va.IsNil() { + aIsNil = true + } + if !aIsNil { + va = va.Elem() + + if t != va.Type() { + panic("mismatched types in BuildChanges") + } + } + + for i := 0; i < ve.NumField(); i++ { + if t.Field(i).PkgPath != "" { + // unexported: ignore + continue + } + + fve := ve.Field(i) + if fve.Kind() == reflect.Ptr && fve.IsNil() { + // Nil expected value means 'don't care' + continue + } + + if !aIsNil { + fva := va.Field(i) + + if equalFieldValues(fva, fve) { + continue + } + + glog.V(8).Infof("Field changed %q actual=%q expected=%q", t.Field(i).Name, DebugPrint(fva.Interface()), DebugPrint(fve.Interface())) + } + changed = true + vc.Field(i).Set(fve) + } + + return changed +} + +// equalFieldValues implements our equality checking, with special cases for resources and tasks +func equalFieldValues(a, e reflect.Value) bool { + if a.Kind() == reflect.Map { + return equalMapValues(a, e) + } + if (a.Kind() == reflect.Ptr || a.Kind() == reflect.Interface) && !a.IsNil() { + aHasID, ok := a.Interface().(CompareWithID) + if ok && (e.Kind() == reflect.Ptr || e.Kind() == reflect.Interface) && !e.IsNil() { + eHasID, ok := e.Interface().(CompareWithID) + if ok { + aID := aHasID.CompareWithID() + eID := eHasID.CompareWithID() + if aID != nil && eID != nil && *aID == *eID { + return true + } + } + } + + aResource, ok := a.Interface().(Resource) + if ok && (e.Kind() == reflect.Ptr || e.Kind() == reflect.Interface) && !e.IsNil() { + eResource, ok := e.Interface().(Resource) + if ok { + same, err := ResourcesMatch(aResource, eResource) + if err != nil { + glog.Fatalf("error while comparing resources: %v", err) + } else { + return same + } + } + } + } + if reflect.DeepEqual(a.Interface(), e.Interface()) { + return true + } + return false +} + +// equalMapValues performs a deep-equality check on a map, but using our custom comparison logic (equalFieldValues) +func equalMapValues(a, e reflect.Value) bool { + if a.IsNil() != e.IsNil() { + return false + } + if a.IsNil() && e.IsNil() { + return true + } + if a.Len() != e.Len() { + return false + } + for _, k := range a.MapKeys() { + valA := a.MapIndex(k) + valE := e.MapIndex(k) + + glog.Infof("comparing maps: %v %v %v", k, valA, valE) + + if !equalFieldValues(valA, valE) { + glog.Infof("unequals map value: %v %v %v", k, valA, valE) + return false + } + } + return true +} diff --git a/upup/pkg/fi/cloud.go b/upup/pkg/fi/cloud.go new file mode 100644 index 0000000000..846ae28468 --- /dev/null +++ b/upup/pkg/fi/cloud.go @@ -0,0 +1,10 @@ +package fi + +type ProviderID string + +const ProviderAWS ProviderID = "aws" +const ProviderGCE ProviderID = "gce" + +type Cloud interface { + ProviderID() ProviderID +} diff --git a/upup/pkg/fi/cloudup/config.go b/upup/pkg/fi/cloudup/config.go new file mode 100644 index 0000000000..46f07f6c54 --- /dev/null +++ b/upup/pkg/fi/cloudup/config.go @@ -0,0 +1,171 @@ +package cloudup + +import ( + "encoding/binary" + "fmt" + "math/big" + "net" +) + +type CloudConfig struct { + CloudProvider string + + // The version of kubernetes to install + KubernetesVersion string + + // The Node initializer technique to use: cloudinit or nodeup + NodeInit string + + InstancePrefix string + ClusterName string + AllocateNodeCIDRs bool + Zone string + Project string + + Multizone bool + + ClusterIPRange string + ServiceClusterIPRange string + MasterIPRange string + NonMasqueradeCidr string + + NetworkProvider string + + HairpinMode string + + OpencontrailTag string + OpencontrailKubernetesTag string + OpencontrailPublicSubnet string + + EnableClusterMonitoring string + EnableL7LoadBalancing string + EnableClusterUI bool + + EnableClusterDNS bool + DNSReplicas int + DNSServerIP string + DNSDomain string + + EnableClusterLogging bool + EnableNodeLogging bool + LoggingDestination string + ElasticsearchLoggingReplicas int + + EnableClusterRegistry bool + ClusterRegistryDisk string + ClusterRegistryDiskSize int + + EnableCustomMetrics bool + + MasterName string + RegisterMasterKubelet bool + MasterVolumeType string + MasterVolumeSize int + MasterTag string + MasterInternalIP string + MasterPublicIP string + MasterMachineType string + MasterImage string + + NodeImage string + NodeCount int + NodeInstancePrefix string + NodeLabels string + NodeMachineType string + NodeTag string + + KubeUser string + + // These are moved to CAStore / SecretStore + //KubePassword string + //KubeletToken string + //KubeProxyToken string + //BearerToken string + //CACert []byte + //CAKey []byte + //KubeletCert []byte + //KubeletKey []byte + //MasterCert []byte + //MasterKey []byte + //KubecfgCert []byte + //KubecfgKey []byte + + AdmissionControl string + RuntimeConfig string + + KubeImageTag string + KubeDockerRegistry string + KubeAddonRegistry string + + KubeletPort int + + KubeApiserverRequestTimeout int + + TerminatedPodGcThreshold string + + EnableManifestURL bool + ManifestURL string + ManifestURLHeader string + + TestCluster string + + DockerOptions string + DockerStorage string + ExtraDockerOpts string + + E2EStorageTestEnvironment string + KubeletTestArgs string + KubeletTestLogLevel string + DockerTestArgs string + DockerTestLogLevel string + ApiserverTestArgs string + ApiserverTestLogLevel string + ControllerManagerTestArgs string + ControllerManagerTestLogLevel string + SchedulerTestArgs string + SchedulerTestLogLevel string + KubeProxyTestArgs string + KubeProxyTestLogLevel string + + Assets []string + + NodeUp NodeUpConfig +} + +type NodeUpConfig struct { + Location string + Hash string +} + +func (c *CloudConfig) WellKnownServiceIP(id int) (net.IP, error) { + _, cidr, err := net.ParseCIDR(c.ServiceClusterIPRange) + if err != nil { + return nil, fmt.Errorf("error parsing ServiceClusterIPRange: %v", err) + } + + ip4 := cidr.IP.To4() + if ip4 != nil { + n := binary.BigEndian.Uint32(ip4) + n += uint32(id) + serviceIP := make(net.IP, len(ip4)) + binary.BigEndian.PutUint32(serviceIP, n) + return serviceIP, nil + } + + ip6 := cidr.IP.To16() + if ip6 != nil { + baseIPInt := big.NewInt(0) + baseIPInt.SetBytes(ip6) + serviceIPInt := big.NewInt(0) + serviceIPInt.Add(big.NewInt(int64(id)), baseIPInt) + serviceIP := make(net.IP, len(ip6)) + serviceIPBytes := serviceIPInt.Bytes() + for i := range serviceIPBytes { + serviceIP[len(serviceIP)-len(serviceIPBytes)+i] = serviceIPBytes[i] + } + return serviceIP, nil + } + + return nil, fmt.Errorf("Unexpected IP address type for ServiceClusterIPRange: %s", c.ServiceClusterIPRange) + +} diff --git a/upup/pkg/fi/cloudup/gce/gce_apitarget.go b/upup/pkg/fi/cloudup/gce/gce_apitarget.go new file mode 100644 index 0000000000..bf52f02a41 --- /dev/null +++ b/upup/pkg/fi/cloudup/gce/gce_apitarget.go @@ -0,0 +1,19 @@ +package gce + +import "k8s.io/kube-deploy/upup/pkg/fi" + +type GCEAPITarget struct { + Cloud *GCECloud +} + +var _ fi.Target = &GCEAPITarget{} + +func NewGCEAPITarget(cloud *GCECloud) *GCEAPITarget { + return &GCEAPITarget{ + Cloud: cloud, + } +} + +func (t *GCEAPITarget) Finish(taskMap map[string]fi.Task) error { + return nil +} diff --git a/upup/pkg/fi/cloudup/gce/gce_cloud.go b/upup/pkg/fi/cloudup/gce/gce_cloud.go new file mode 100644 index 0000000000..45b1e90787 --- /dev/null +++ b/upup/pkg/fi/cloudup/gce/gce_cloud.go @@ -0,0 +1,51 @@ +package gce + +import ( + "fmt" + + "golang.org/x/net/context" + "golang.org/x/oauth2/google" + "google.golang.org/api/compute/v1" + "google.golang.org/api/storage/v1" + "k8s.io/kube-deploy/upup/pkg/fi" +) + +type GCECloud struct { + Compute *compute.Service + Storage *storage.Service + + Region string + Project string + + //tags map[string]string +} + +var _ fi.Cloud = &GCECloud{} + +func (c *GCECloud) ProviderID() fi.ProviderID { + return fi.ProviderGCE +} + +func NewGCECloud(region string, project string) (*GCECloud, error) { + c := &GCECloud{Region: region, Project: project} + + ctx := context.Background() + + client, err := google.DefaultClient(ctx, compute.ComputeScope) + if err != nil { + return nil, fmt.Errorf("error building google API client: %v", err) + } + computeService, err := compute.New(client) + if err != nil { + return nil, fmt.Errorf("error building compute API client: %v", err) + } + c.Compute = computeService + + storageService, err := storage.New(client) + if err != nil { + return nil, fmt.Errorf("error building storage API client: %v", err) + } + c.Storage = storageService + + return c, nil +} diff --git a/upup/pkg/fi/cloudup/gce/gce_url.go b/upup/pkg/fi/cloudup/gce/gce_url.go new file mode 100644 index 0000000000..4c7e262ac4 --- /dev/null +++ b/upup/pkg/fi/cloudup/gce/gce_url.go @@ -0,0 +1,82 @@ +package gce + +import ( + "fmt" + "strings" +) + +type GoogleCloudURL struct { + Project string + Type string + Name string + Global bool + Zone string +} + +func (u *GoogleCloudURL) BuildURL() string { + url := "https://www.googleapis.com/compute/v1/" + if u.Project != "" { + url += "projects/" + u.Project + "/" + } + if u.Global { + url += "global/" + } + if u.Zone != "" { + url += "zones/" + u.Zone + "/" + } + url += u.Type + "/" + u.Name + return url +} + +func ParseGoogleCloudURL(u string) (*GoogleCloudURL, error) { + tokens := strings.Split(u, "/") + if len(tokens) < 3 { + return nil, fmt.Errorf("invalid google cloud URL (token count): %q", u) + } + + if tokens[0] != "https:" || tokens[1] != "" || tokens[2] != "www.googleapis.com" { + return nil, fmt.Errorf("invalid google cloud URL (schema / host): %q", u) + } + + if len(tokens) < 5 || tokens[3] != "compute" || tokens[4] != "v1" { + return nil, fmt.Errorf("invalid google cloud URL (not compute/v1): %q", u) + } + + parsed := &GoogleCloudURL{} + pos := 5 + for { + if pos >= len(tokens) { + return nil, fmt.Errorf("invalid google cloud URL (unexpected end): %q", u) + } + t := tokens[pos] + if t == "projects" { + pos++ + if pos >= len(tokens) { + return nil, fmt.Errorf("invalid google cloud URL (unexpected projects): %q", u) + } + parsed.Project = tokens[pos] + } else if t == "zones" { + pos++ + if pos >= len(tokens) { + return nil, fmt.Errorf("invalid google cloud URL (unexpected zones): %q", u) + } + parsed.Zone = tokens[pos] + } else if t == "global" { + parsed.Global = true + } else { + parsed.Type = tokens[pos] + pos++ + if pos >= len(tokens) { + return nil, fmt.Errorf("invalid google cloud URL (no name): %q", u) + } + parsed.Name = tokens[pos] + pos++ + if pos != len(tokens) { + return nil, fmt.Errorf("invalid google cloud URL (content after name): %q", u) + } else { + return parsed, nil + } + } + pos++ + } +} diff --git a/upup/pkg/fi/cloudup/gce/utils.go b/upup/pkg/fi/cloudup/gce/utils.go new file mode 100644 index 0000000000..3b4897f8c9 --- /dev/null +++ b/upup/pkg/fi/cloudup/gce/utils.go @@ -0,0 +1,30 @@ +package gce + +import ( + "google.golang.org/api/googleapi" +) + +func IsNotFound(err error) bool { + apiErr, ok := err.(*googleapi.Error) + if !ok { + return false + } + + // We could also check for Errors[].Resource == "notFound" + //glog.Info("apiErr: %v", apiErr) + + return apiErr.Code == 404 +} + +func IsNotReady(err error) bool { + apiErr, ok := err.(*googleapi.Error) + if !ok { + return false + } + for _, e := range apiErr.Errors { + if e.Reason == "resourceNotReady" { + return true + } + } + return false +} diff --git a/upup/pkg/fi/cloudup/gcetasks/QUESTIONS.md b/upup/pkg/fi/cloudup/gcetasks/QUESTIONS.md new file mode 100644 index 0000000000..16c73ea240 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/QUESTIONS.md @@ -0,0 +1,5 @@ +Why is the default network created as 10.240.0.0/16 yet the nodes are in 10.244.0.0/16? + +Should we be using local SSDs for the nodes? + +Should we be using NVME? diff --git a/upup/pkg/fi/cloudup/gcetasks/disk.go b/upup/pkg/fi/cloudup/gcetasks/disk.go new file mode 100644 index 0000000000..bd15da00b8 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/disk.go @@ -0,0 +1,131 @@ +package gcetasks + +import ( + "fmt" + + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "strings" +) + +type PersistentDisk struct { + Name *string + VolumeType *string + SizeGB *int64 + Zone *string +} + +func (d *PersistentDisk) String() string { + return utils.JsonString(d) +} + +func (d *PersistentDisk) CompareWithID() *string { + return d.Name +} + +// Returns the last component of a URL, i.e. anything after the last slash +// If there is no slash, returns the whole string +func lastComponent(s string) string { + lastSlash := strings.LastIndex(s, "/") + if lastSlash != -1 { + s = s[lastSlash+1:] + } + return s +} + +func (e *PersistentDisk) Find(c *fi.Context) (*PersistentDisk, error) { + cloud := c.Cloud.(*gce.GCECloud) + + r, err := cloud.Compute.Disks.Get(cloud.Project, *e.Zone, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing PersistentDisks: %v", err) + } + + actual := &PersistentDisk{} + actual.Name = &r.Name + actual.VolumeType = fi.String(lastComponent(r.Type)) + actual.Zone = fi.String(lastComponent(r.Zone)) + actual.SizeGB = &r.SizeGb + + return actual, nil +} + +func (e *PersistentDisk) URL(project string) string { + u := &gce.GoogleCloudURL{ + Project: project, + Zone: *e.Zone, + Type: "disks", + Name: *e.Name, + } + return u.BuildURL() +} + +func (e *PersistentDisk) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *PersistentDisk) CheckChanges(a, e, changes *PersistentDisk) error { + if a != nil { + if changes.SizeGB != nil { + return fi.CannotChangeField("SizeGB") + } + if changes.Zone != nil { + return fi.CannotChangeField("Zone") + } + if changes.VolumeType != nil { + return fi.CannotChangeField("VolumeType") + } + } else { + if e.Zone == nil { + return fi.RequiredField("Zone") + } + } + return nil +} + +func (_ *PersistentDisk) RenderGCE(t *gce.GCEAPITarget, a, e, changes *PersistentDisk) error { + typeURL := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/diskTypes/%s", + t.Cloud.Project, + *e.Zone, + *e.VolumeType) + + disk := &compute.Disk{ + Name: *e.Name, + SizeGb: *e.SizeGB, + Type: typeURL, + } + + if a == nil { + _, err := t.Cloud.Compute.Disks.Insert(t.Cloud.Project, *e.Zone, disk).Do() + if err != nil { + return fmt.Errorf("error creating PersistentDisk: %v", err) + } + } else { + return fmt.Errorf("Cannot apply changes to PersistentDisk: %v", changes) + } + + return nil +} + +type terraformDisk struct { + Name *string `json:"name"` + VolumeType *string `json:"type"` + SizeGB *int64 `json:"size"` + Zone *string `json:"zone"` +} + +func (_ *PersistentDisk) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *PersistentDisk) error { + tf := &terraformDisk{ + Name: e.Name, + VolumeType: e.VolumeType, + SizeGB: e.SizeGB, + Zone: e.Zone, + } + return t.RenderResource("google_compute_disk", *e.Name, tf) +} diff --git a/upup/pkg/fi/cloudup/gcetasks/firewall_rule.go b/upup/pkg/fi/cloudup/gcetasks/firewall_rule.go new file mode 100644 index 0000000000..df616bcaa6 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/firewall_rule.go @@ -0,0 +1,181 @@ +package gcetasks + +import ( + "fmt" + + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "strings" +) + +type FirewallRule struct { + Name *string + Network *Network + SourceTags []string + SourceRanges []string + TargetTags []string + Allowed []string +} + +func (d *FirewallRule) String() string { + return utils.JsonString(d) +} + +func (d *FirewallRule) CompareWithID() *string { + return d.Name +} + +func (e *FirewallRule) Find(c *fi.Context) (*FirewallRule, error) { + cloud := c.Cloud.(*gce.GCECloud) + + r, err := cloud.Compute.Firewalls.Get(cloud.Project, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing FirewallRules: %v", err) + } + + actual := &FirewallRule{} + actual.Name = &r.Name + actual.Network = &Network{Name: fi.String(lastComponent(r.Network))} + actual.TargetTags = r.TargetTags + actual.SourceRanges = r.SourceRanges + actual.SourceTags = r.SourceTags + for _, a := range r.Allowed { + actual.Allowed = append(actual.Allowed, serializeFirewallAllowed(a)) + } + + return actual, nil +} + +func (e *FirewallRule) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *FirewallRule) CheckChanges(a, e, changes *FirewallRule) error { + return nil +} + +func parseFirewallAllowed(rule string) (*compute.FirewallAllowed, error) { + o := &compute.FirewallAllowed{} + + tokens := strings.Split(rule, ":") + if len(tokens) < 1 || len(tokens) > 2 { + return nil, fmt.Errorf("expected protocol[:portspec] in firewall rule %q", rule) + } + + o.IPProtocol = tokens[0] + if len(tokens) == 1 { + return o, nil + } + + o.Ports = []string{tokens[1]} + return o, nil +} + +func serializeFirewallAllowed(r *compute.FirewallAllowed) string { + if len(r.Ports) == 0 { + return r.IPProtocol + } + + var tokens []string + for _, ports := range r.Ports { + tokens = append(tokens, r.IPProtocol+":"+ports) + } + + return strings.Join(tokens, ",") +} + +func (e *FirewallRule) mapToGCE(project string) (*compute.Firewall, error) { + var allowed []*compute.FirewallAllowed + if e.Allowed != nil { + for _, a := range e.Allowed { + p, err := parseFirewallAllowed(a) + if err != nil { + return nil, err + } + allowed = append(allowed, p) + } + } + firewall := &compute.Firewall{ + Name: *e.Name, + Network: e.Network.URL(project), + SourceTags: e.SourceTags, + SourceRanges: e.SourceRanges, + TargetTags: e.TargetTags, + Allowed: allowed, + } + return firewall, nil +} + +func (_ *FirewallRule) RenderGCE(t *gce.GCEAPITarget, a, e, changes *FirewallRule) error { + firewall, err := e.mapToGCE(t.Cloud.Project) + if err != nil { + return err + } + + if a == nil { + _, err := t.Cloud.Compute.Firewalls.Insert(t.Cloud.Project, firewall).Do() + if err != nil { + return fmt.Errorf("error creating FirewallRule: %v", err) + } + } else { + _, err := t.Cloud.Compute.Firewalls.Update(t.Cloud.Project, *e.Name, firewall).Do() + if err != nil { + return fmt.Errorf("error creating FirewallRule: %v", err) + } + } + + return nil +} + +type terraformAllow struct { + Protocol string `json:"protocol,omitempty"` + Ports []string `json:"ports,omitempty"` +} + +type terraformFirewall struct { + Name string `json:"name"` + Network *terraform.Literal `json:"network"` + + Allowed []*terraformAllow `json:"allow,omitempty"` + + SourceTags []string `json:"source_tags,omitempty"` + + SourceRanges []string `json:"source_ranges,omitempty"` + TargetTags []string `json:"target_tags,omitempty"` +} + +func (_ *FirewallRule) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *FirewallRule) error { + g, err := e.mapToGCE(t.Project) + if err != nil { + return err + } + + var allowed []*terraformAllow + if g.Allowed != nil { + for _, ga := range g.Allowed { + a := &terraformAllow{ + Protocol: ga.IPProtocol, + Ports: ga.Ports, + } + + allowed = append(allowed, a) + } + } + tf := &terraformFirewall{ + Name: g.Name, + SourceRanges: g.SourceRanges, + TargetTags: g.TargetTags, + Allowed: allowed, + } + + // TODO: This doesn't seem right, but it looks like a TF problem + tf.Network = e.Network.TerraformName() + + return t.RenderResource("google_compute_firewall", *e.Name, tf) +} diff --git a/upup/pkg/fi/cloudup/gcetasks/instance.go b/upup/pkg/fi/cloudup/gcetasks/instance.go new file mode 100644 index 0000000000..f6e5ed8674 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/instance.go @@ -0,0 +1,425 @@ +package gcetasks + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "strings" +) + +var scopeAliases map[string]string + +type Instance struct { + Name *string + Network *Network + Tags []string + Preemptible *bool + Image *string + Disks map[string]*PersistentDisk + + CanIPForward *bool + IPAddress *IPAddress + Subnet *Subnet + + Scopes []string + + Metadata map[string]fi.Resource + Zone *string + MachineType *string +} + +func (d *Instance) String() string { + return utils.JsonString(d) +} + +func (d *Instance) CompareWithID() *string { + return d.Name +} + +func (e *Instance) Find(c *fi.Context) (*Instance, error) { + cloud := c.Cloud.(*gce.GCECloud) + + r, err := cloud.Compute.Instances.Get(cloud.Project, *e.Zone, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing Instances: %v", err) + } + + actual := &Instance{} + actual.Name = &r.Name + for _, tag := range r.Tags.Items { + actual.Tags = append(actual.Tags, tag) + } + actual.Zone = fi.String(lastComponent(r.Zone)) + actual.MachineType = fi.String(lastComponent(r.MachineType)) + actual.CanIPForward = &r.CanIpForward + actual.Image = &r.Disks[0].Source + + if r.Scheduling != nil { + actual.Preemptible = &r.Scheduling.Preemptible + } + if len(r.NetworkInterfaces) != 0 { + ni := r.NetworkInterfaces[0] + actual.Network = &Network{Name: fi.String(lastComponent(ni.Network))} + if len(ni.AccessConfigs) != 0 { + ac := ni.AccessConfigs[0] + if ac.NatIP != "" { + addr, err := cloud.Compute.Addresses.List(cloud.Project, cloud.Region).Filter("address eq " + ac.NatIP).Do() + if err != nil { + return nil, fmt.Errorf("error querying for address %q: %v", ac.NatIP, err) + } else if len(addr.Items) != 0 { + actual.IPAddress = &IPAddress{Name: &addr.Items[0].Name} + } else { + return nil, fmt.Errorf("address not found %q: %v", ac.NatIP, err) + } + } + } + } + + for _, serviceAccount := range r.ServiceAccounts { + for _, scope := range serviceAccount.Scopes { + actual.Scopes = append(actual.Scopes, scopeToShortForm(scope)) + } + } + + actual.Disks = make(map[string]*PersistentDisk) + for i, disk := range r.Disks { + if i == 0 { + source := disk.Source + + // TODO: Parse source URL instead of assuming same project/zone? + name := lastComponent(source) + d, err := cloud.Compute.Disks.Get(cloud.Project, *e.Zone, name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, fmt.Errorf("disk not found %q: %v", source, err) + } + return nil, fmt.Errorf("error querying for disk %q: %v", source, err) + } else { + imageURL, err := gce.ParseGoogleCloudURL(d.SourceImage) + if err != nil { + return nil, fmt.Errorf("unable to parse image URL: %q", d.SourceImage) + } + actual.Image = fi.String(imageURL.Project + "/" + imageURL.Name) + } + } else { + url, err := gce.ParseGoogleCloudURL(disk.Source) + if err != nil { + return nil, fmt.Errorf("unable to parse disk source URL: %q", disk.Source) + } + + actual.Disks[disk.DeviceName] = &PersistentDisk{Name: &url.Name} + } + } + + return actual, nil +} + +func (e *Instance) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *Instance) CheckChanges(a, e, changes *Instance) error { + return nil +} + +func expandScopeAlias(s string) string { + switch s { + case "storage-ro": + s = "https://www.googleapis.com/auth/devstorage.read_only" + case "storage-rw": + s = "https://www.googleapis.com/auth/devstorage.read_write" + case "compute-ro": + s = "https://www.googleapis.com/auth/compute.read_only" + case "compute-rw": + s = "https://www.googleapis.com/auth/compute" + case "monitoring": + s = "https://www.googleapis.com/auth/monitoring" + case "monitoring-write": + s = "https://www.googleapis.com/auth/monitoring.write" + case "logging-write": + s = "https://www.googleapis.com/auth/logging.write" + } + return s +} + +func init() { + scopeAliases = map[string]string{ + "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", + "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", + "compute-ro": "https://www.googleapis.com/auth/compute.read_only", + "compute-rw": "https://www.googleapis.com/auth/compute", + "monitoring": "https://www.googleapis.com/auth/monitoring", + "monitoring-write": "https://www.googleapis.com/auth/monitoring.write", + "logging-write": "https://www.googleapis.com/auth/logging.write", + } +} + +func scopeToLongForm(s string) string { + e, found := scopeAliases[s] + if found { + return e + } + return s +} + +func scopeToShortForm(s string) string { + for k, v := range scopeAliases { + if v == s { + return k + } + } + return s +} + +func (e *Instance) mapToGCE(project string, ipAddressResolver func(*IPAddress) (*string, error)) (*compute.Instance, error) { + zone := *e.Zone + + var scheduling *compute.Scheduling + if fi.BoolValue(e.Preemptible) { + scheduling = &compute.Scheduling{ + OnHostMaintenance: "TERMINATE", + Preemptible: true, + } + } else { + scheduling = &compute.Scheduling{ + AutomaticRestart: true, + // TODO: Migrate or terminate? + OnHostMaintenance: "MIGRATE", + Preemptible: false, + } + } + + var disks []*compute.AttachedDisk + disks = append(disks, &compute.AttachedDisk{ + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: BuildImageURL(project, *e.Image), + }, + Boot: true, + DeviceName: "persistent-disks-0", + Index: 0, + AutoDelete: true, + Mode: "READ_WRITE", + Type: "PERSISTENT", + }) + + for name, disk := range e.Disks { + disks = append(disks, &compute.AttachedDisk{ + Source: disk.URL(project), + AutoDelete: false, + Mode: "READ_WRITE", + DeviceName: name, + }) + } + + var tags *compute.Tags + if e.Tags != nil { + tags = &compute.Tags{ + Items: e.Tags, + } + } + + var networkInterfaces []*compute.NetworkInterface + if e.IPAddress != nil { + addr, err := ipAddressResolver(e.IPAddress) + if err != nil { + return nil, fmt.Errorf("unable to resolve IP for instance: %v", err) + } + if addr == nil { + return nil, fmt.Errorf("instance IP address has not yet been created") + } + networkInterface := &compute.NetworkInterface{ + AccessConfigs: []*compute.AccessConfig{{ + NatIP: *addr, + Type: "ONE_TO_ONE_NAT", + }}, + Network: e.Network.URL(project), + } + if e.Subnet != nil { + networkInterface.Subnetwork = *e.Subnet.Name + } + networkInterfaces = append(networkInterfaces, networkInterface) + } + + var serviceAccounts []*compute.ServiceAccount + if e.Scopes != nil { + var scopes []string + for _, s := range e.Scopes { + s = expandScopeAlias(s) + + scopes = append(scopes, s) + } + serviceAccounts = append(serviceAccounts, &compute.ServiceAccount{ + Email: "default", + Scopes: scopes, + }) + } + + var metadataItems []*compute.MetadataItems + for key, r := range e.Metadata { + v, err := fi.ResourceAsString(r) + if err != nil { + return nil, fmt.Errorf("error rendering Instance metadata %q: %v", key, err) + } + metadataItems = append(metadataItems, &compute.MetadataItems{ + Key: key, + Value: fi.String(v), + }) + } + + i := &compute.Instance{ + CanIpForward: *e.CanIPForward, + + Disks: disks, + + MachineType: BuildMachineTypeURL(project, zone, *e.MachineType), + + Metadata: &compute.Metadata{ + Items: metadataItems, + }, + + Name: *e.Name, + + NetworkInterfaces: networkInterfaces, + + Scheduling: scheduling, + + ServiceAccounts: serviceAccounts, + + Tags: tags, + } + + return i, nil +} + +func (_ *Instance) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Instance) error { + project := t.Cloud.Project + + ipAddressResolver := func(ip *IPAddress) (*string, error) { + return ip.FindAddress(t.Cloud) + } + + i, err := e.mapToGCE(project, ipAddressResolver) + if err != nil { + return err + } + + if a == nil { + _, err := t.Cloud.Compute.Instances.Insert(t.Cloud.Project, *e.Zone, i).Do() + if err != nil { + return fmt.Errorf("error creating Instance: %v", err) + } + } else { + // TODO: Make error again + glog.Errorf("Cannot apply changes to Instance: %v", changes) + // return fmt.Errorf("Cannot apply changes to Instance: %v", changes) + } + + return nil +} + +func BuildMachineTypeURL(project, zone, name string) string { + return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/machineTypes/%s", project, zone, name) +} + +func BuildImageURL(defaultProject, nameSpec string) string { + tokens := strings.Split(nameSpec, "/") + var project, name string + if len(tokens) == 2 { + project = tokens[0] + name = tokens[1] + } else if len(tokens) == 1 { + project = defaultProject + name = tokens[0] + } else { + glog.Exitf("Cannot parse image spec: %q", nameSpec) + } + + return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/images/%s", project, name) +} + +func ShortenImageURL(imageURL string) (string, error) { + u, err := gce.ParseGoogleCloudURL(imageURL) + if err != nil { + return "", err + } + return u.Project + "/" + u.Name, nil +} + +func (_ *Instance) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Instance) error { + project := t.Project + + // This is a "little" hacky... + ipAddressResolver := func(ip *IPAddress) (*string, error) { + tf := "${google_compute_address." + *ip.Name + ".address}" + return &tf, nil + } + + i, err := e.mapToGCE(project, ipAddressResolver) + if err != nil { + return err + } + + tf := &terraformInstanceTemplate{ + Name: i.Name, + CanIPForward: i.CanIpForward, + MachineType: lastComponent(i.MachineType), + Zone: i.Zone, + Tags: i.Tags.Items, + } + + // TF requires zone + if tf.Zone == "" && e.Zone != nil { + tf.Zone = *e.Zone + } + + tf.AddServiceAccounts(i.ServiceAccounts) + + for _, d := range i.Disks { + tfd := &terraformAttachedDisk{ + AutoDelete: d.AutoDelete, + Scratch: d.Type == "SCRATCH", + DeviceName: d.DeviceName, + + // TODO: Does this need to be a TF link? + Disk: lastComponent(d.Source), + } + if d.InitializeParams != nil { + tfd.Disk = d.InitializeParams.DiskName + tfd.Image = d.InitializeParams.SourceImage + tfd.Type = d.InitializeParams.DiskType + tfd.Size = d.InitializeParams.DiskSizeGb + } + tf.Disks = append(tf.Disks, tfd) + } + + tf.AddNetworks(e.Network, e.Subnet, i.NetworkInterfaces) + + tf.AddMetadata(i.Metadata) + + // Using metadata_startup_script is now mandatory (?) + { + startupScript, found := tf.Metadata["startup-script"] + if found { + delete(tf.Metadata, "startup-script") + } + tf.MetadataStartupScript = startupScript + } + + if i.Scheduling != nil { + tf.Scheduling = &terraformScheduling{ + AutomaticRestart: i.Scheduling.AutomaticRestart, + OnHostMaintenance: i.Scheduling.OnHostMaintenance, + Preemptible: i.Scheduling.Preemptible, + } + } + + return t.RenderResource("google_compute_instance", i.Name, tf) +} diff --git a/upup/pkg/fi/cloudup/gcetasks/instance_template.go b/upup/pkg/fi/cloudup/gcetasks/instance_template.go new file mode 100644 index 0000000000..ed333ea1ef --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/instance_template.go @@ -0,0 +1,420 @@ +package gcetasks + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "strings" +) + +type InstanceTemplate struct { + Name *string + Network *Network + Tags []string + Preemptible *bool + + BootDiskImage *string + BootDiskSizeGB *int64 + BootDiskType *string + + CanIPForward *bool + Subnet *Subnet + + Scopes []string + + Metadata map[string]fi.Resource + MachineType *string +} + +func (d *InstanceTemplate) CompareWithID() *string { + return d.Name +} + +func (d *InstanceTemplate) String() string { + return utils.JsonString(d) +} + +func (e *InstanceTemplate) Find(c *fi.Context) (*InstanceTemplate, error) { + cloud := c.Cloud.(*gce.GCECloud) + + r, err := cloud.Compute.InstanceTemplates.Get(cloud.Project, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing InstanceTemplates: %v", err) + } + + actual := &InstanceTemplate{} + actual.Name = &r.Name + + p := r.Properties + + for _, tag := range p.Tags.Items { + actual.Tags = append(actual.Tags, tag) + } + actual.MachineType = fi.String(lastComponent(p.MachineType)) + actual.CanIPForward = &p.CanIpForward + + bootDiskImage, err := ShortenImageURL(p.Disks[0].InitializeParams.SourceImage) + if err != nil { + return nil, fmt.Errorf("error parsing source image URL: %v", err) + } + actual.BootDiskImage = fi.String(bootDiskImage) + actual.BootDiskType = &p.Disks[0].InitializeParams.DiskType + actual.BootDiskSizeGB = &p.Disks[0].InitializeParams.DiskSizeGb + + if p.Scheduling != nil { + actual.Preemptible = &p.Scheduling.Preemptible + } + if len(p.NetworkInterfaces) != 0 { + ni := p.NetworkInterfaces[0] + actual.Network = &Network{Name: fi.String(lastComponent(ni.Network))} + } + + for _, serviceAccount := range p.ServiceAccounts { + for _, scope := range serviceAccount.Scopes { + actual.Scopes = append(actual.Scopes, scopeToShortForm(scope)) + } + } + + //for i, disk := range p.Disks { + // if i == 0 { + // source := disk.Source + // + // // TODO: Parse source URL instead of assuming same project/zone? + // name := lastComponent(source) + // d, err := cloud.Compute.Disks.Get(cloud.Project, *e.Zone, name).Do() + // if err != nil { + // if gce.IsNotFound(err) { + // return nil, fmt.Errorf("disk not found %q: %v", source, err) + // } + // return nil, fmt.Errorf("error querying for disk %q: %v", source, err) + // } else { + // imageURL, err := gce.ParseGoogleCloudURL(d.SourceImage) + // if err != nil { + // return nil, fmt.Errorf("unable to parse image URL: %q", d.SourceImage) + // } + // actual.Image = fi.String(imageURL.Project + "/" + imageURL.Name) + // } + // } + //} + + if p.Metadata != nil { + actual.Metadata = make(map[string]fi.Resource) + for _, meta := range p.Metadata.Items { + actual.Metadata[meta.Key] = fi.NewStringResource(*meta.Value) + } + } + + return actual, nil +} + +func (e *InstanceTemplate) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *InstanceTemplate) CheckChanges(a, e, changes *InstanceTemplate) error { + return nil +} + +func (e *InstanceTemplate) mapToGCE(project string) (*compute.InstanceTemplate, error) { + // TODO: This is similar to Instance... + var scheduling *compute.Scheduling + + if fi.BoolValue(e.Preemptible) { + scheduling = &compute.Scheduling{ + AutomaticRestart: false, + OnHostMaintenance: "TERMINATE", + Preemptible: true, + } + } else { + scheduling = &compute.Scheduling{ + AutomaticRestart: true, + // TODO: Migrate or terminate? + OnHostMaintenance: "MIGRATE", + Preemptible: false, + } + } + + glog.Infof("We should be using NVME for GCE") + + var disks []*compute.AttachedDisk + disks = append(disks, &compute.AttachedDisk{ + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: BuildImageURL(project, *e.BootDiskImage), + DiskSizeGb: *e.BootDiskSizeGB, + DiskType: *e.BootDiskType, + }, + Boot: true, + DeviceName: "persistent-disks-0", + Index: 0, + AutoDelete: true, + Mode: "READ_WRITE", + Type: "PERSISTENT", + }) + + var tags *compute.Tags + if e.Tags != nil { + tags = &compute.Tags{ + Items: e.Tags, + } + } + + var networkInterfaces []*compute.NetworkInterface + ni := &compute.NetworkInterface{ + AccessConfigs: []*compute.AccessConfig{{ + //NatIP: *e.IPAddress.Address, + Type: "ONE_TO_ONE_NAT", + }}, + Network: e.Network.URL(project), + } + if e.Subnet != nil { + ni.Subnetwork = *e.Subnet.Name + } + networkInterfaces = append(networkInterfaces, ni) + + var serviceAccounts []*compute.ServiceAccount + if e.Scopes != nil { + var scopes []string + for _, s := range e.Scopes { + s = expandScopeAlias(s) + + scopes = append(scopes, s) + } + serviceAccounts = append(serviceAccounts, &compute.ServiceAccount{ + Email: "default", + Scopes: scopes, + }) + } + + var metadataItems []*compute.MetadataItems + for key, r := range e.Metadata { + v, err := fi.ResourceAsString(r) + if err != nil { + return nil, fmt.Errorf("error rendering InstanceTemplate metadata %q: %v", key, err) + } + metadataItems = append(metadataItems, &compute.MetadataItems{ + Key: key, + Value: fi.String(v), + }) + } + + i := &compute.InstanceTemplate{ + Name: *e.Name, + Properties: &compute.InstanceProperties{ + CanIpForward: *e.CanIPForward, + + Disks: disks, + + MachineType: *e.MachineType, + + Metadata: &compute.Metadata{ + Items: metadataItems, + }, + + NetworkInterfaces: networkInterfaces, + + Scheduling: scheduling, + + ServiceAccounts: serviceAccounts, + + Tags: tags, + }, + } + + return i, nil +} + +func (_ *InstanceTemplate) RenderGCE(t *gce.GCEAPITarget, a, e, changes *InstanceTemplate) error { + project := t.Cloud.Project + + i, err := e.mapToGCE(project) + if err != nil { + return err + } + + if a == nil { + _, err := t.Cloud.Compute.InstanceTemplates.Insert(t.Cloud.Project, i).Do() + if err != nil { + return fmt.Errorf("error creating InstanceTemplate: %v", err) + } + } else { + // TODO: Make error again + glog.Errorf("Cannot apply changes to InstanceTemplate: %v", changes) + //return fmt.Errorf("Cannot apply changes to InstanceTemplate: %v", changes) + } + + return nil +} + +type terraformInstanceTemplate struct { + Name string `json:"name"` + CanIPForward bool `json:"can_ip_forward"` + MachineType string `json:"machine_type,omitempty"` + ServiceAccount *terraformServiceAccount `json:"service_account,omitempty"` + Scheduling *terraformScheduling `json:"scheduling,omitempty"` + Disks []*terraformAttachedDisk `json:"disk,omitempty"` + NetworkInterfaces []*terraformNetworkInterface `json:"network_interface,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + MetadataStartupScript string `json:"metadata_startup_script,omitempty"` + Tags []string `json:"tags,omitempty"` + + // Only for instances: + Zone string `json:"zone,omitempty"` +} + +type terraformServiceAccount struct { + Scopes []string `json:"scopes"` +} + +type terraformScheduling struct { + AutomaticRestart bool `json:"automatic_restart"` + OnHostMaintenance string `json:"on_host_maintenance,omitempty"` + Preemptible bool `json:"preemptible"` +} + +type terraformAttachedDisk struct { + // These values are common + AutoDelete bool `json:"auto_delete,omitempty"` + DeviceName string `json:"device_name,omitempty"` + + // DANGER - common but different meaning: + // for an instance template this is scratch vs persistent + // for an instance this is 'pd-standard', 'pd-ssd', 'local-ssd' etc + Type string `json:"type,omitempty"` + + // These values are only for instance templates: + Boot bool `json:"boot,omitempty"` + DiskName string `json:"disk_name,omitempty"` + SourceImage string `json:"source_image,omitempty"` + Source string `json:"source,omitempty"` + Interface string `json:"interface,omitempty"` + Mode string `json:"mode,omitempty"` + DiskType string `json:"disk_type,omitempty"` + DiskSizeGB int64 `json:"disk_size_gb,omitempty"` + + // These values are only for instances: + Disk string `json:"disk,omitempty"` + Image string `json:"image,omitempty"` + Scratch bool `json:"scratch,omitempty"` + Size int64 `json:"size,omitempty"` +} + +type terraformNetworkInterface struct { + Network *terraform.Literal `json:"network,omitempty"` + Subnetwork *terraform.Literal `json:"subnetwork,omitempty"` + AccessConfig []*terraformAccessConfig `json:"access_config"` +} + +type terraformAccessConfig struct { + NatIP *terraform.Literal `json:"nat_ip,omitempty"` +} + +func (t *terraformInstanceTemplate) AddNetworks(network *Network, subnet *Subnet, networkInterfacs []*compute.NetworkInterface) { + for _, g := range networkInterfacs { + tf := &terraformNetworkInterface{} + if network != nil { + tf.Network = network.TerraformName() + } + if subnet != nil { + tf.Subnetwork = subnet.TerraformName() + } + for _, gac := range g.AccessConfigs { + tac := &terraformAccessConfig{} + natIP := gac.NatIP + if strings.HasPrefix(natIP, "${") { + tac.NatIP = terraform.LiteralExpression(natIP) + } else if natIP != "" { + tac.NatIP = terraform.LiteralFromStringValue(natIP) + } + + tf.AccessConfig = append(tf.AccessConfig, tac) + } + + t.NetworkInterfaces = append(t.NetworkInterfaces, tf) + } +} + +func (t *terraformInstanceTemplate) AddMetadata(metadata *compute.Metadata) { + if metadata != nil { + if t.Metadata == nil { + t.Metadata = make(map[string]string) + } + for _, g := range metadata.Items { + value := *g.Value + tfValue := strings.Replace(value, "${", "$${", -1) + t.Metadata[g.Key] = tfValue + } + } +} + +func (t *terraformInstanceTemplate) AddServiceAccounts(serviceAccounts []*compute.ServiceAccount) { + for _, g := range serviceAccounts { + for _, scope := range g.Scopes { + if t.ServiceAccount == nil { + t.ServiceAccount = &terraformServiceAccount{} + } + t.ServiceAccount.Scopes = append(t.ServiceAccount.Scopes, scope) + } + } +} + +func (_ *InstanceTemplate) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *InstanceTemplate) error { + project := t.Project + + i, err := e.mapToGCE(project) + if err != nil { + return err + } + + tf := &terraformInstanceTemplate{ + Name: i.Name, + CanIPForward: i.Properties.CanIpForward, + //Description: i.Properties.Description, + MachineType: i.Properties.MachineType, + Tags: i.Properties.Tags.Items, + } + + tf.AddServiceAccounts(i.Properties.ServiceAccounts) + + for _, d := range i.Properties.Disks { + tfd := &terraformAttachedDisk{ + AutoDelete: d.AutoDelete, + Boot: d.Boot, + DeviceName: d.DeviceName, + DiskName: d.InitializeParams.DiskName, + SourceImage: d.InitializeParams.SourceImage, + Source: d.Source, + Interface: d.Interface, + Mode: d.Mode, + DiskType: d.InitializeParams.DiskType, + DiskSizeGB: d.InitializeParams.DiskSizeGb, + Type: d.Type, + } + tf.Disks = append(tf.Disks, tfd) + } + + tf.AddNetworks(e.Network, e.Subnet, i.Properties.NetworkInterfaces) + + tf.AddMetadata(i.Properties.Metadata) + + if i.Properties.Scheduling != nil { + tf.Scheduling = &terraformScheduling{ + AutomaticRestart: i.Properties.Scheduling.AutomaticRestart, + OnHostMaintenance: i.Properties.Scheduling.OnHostMaintenance, + Preemptible: i.Properties.Scheduling.Preemptible, + } + } + + return t.RenderResource("google_compute_instance_template", i.Name, tf) +} + +func (i *InstanceTemplate) TerraformLink() *terraform.Literal { + return terraform.LiteralSelfLink("google_compute_instance_template", *i.Name) +} diff --git a/upup/pkg/fi/cloudup/gcetasks/ip_address.go b/upup/pkg/fi/cloudup/gcetasks/ip_address.go new file mode 100644 index 0000000000..51dd7c0780 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/ip_address.go @@ -0,0 +1,105 @@ +package gcetasks + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" +) + +type IPAddress struct { + Name *string + Address *string + + actual *IPAddress +} + +func (d *IPAddress) String() string { + return utils.JsonString(d) +} + +func (d *IPAddress) CompareWithID() *string { + return d.Name +} + +func (e *IPAddress) Find(c *fi.Context) (*IPAddress, error) { + return e.find(c.Cloud.(*gce.GCECloud)) +} + +func (e *IPAddress) find(cloud *gce.GCECloud) (*IPAddress, error) { + r, err := cloud.Compute.Addresses.Get(cloud.Project, cloud.Region, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + + return nil, fmt.Errorf("error listing IPAddresss: %v", err) + } + + actual := &IPAddress{} + actual.Address = &r.Address + actual.Name = &r.Name + + return actual, nil +} + +func (e *IPAddress) FindAddress(cloud fi.Cloud) (*string, error) { + actual, err := e.find(cloud.(*gce.GCECloud)) + if err != nil { + // TODO: Race here if the address isn't immediately created? + return nil, fmt.Errorf("error querying for IPAddress: %v", err) + } + return actual.Address, nil +} + +func (e *IPAddress) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *IPAddress) CheckChanges(a, e, changes *IPAddress) error { + if a != nil { + if changes.Name != nil { + return fi.CannotChangeField("Name") + } + if changes.Address != nil { + return fi.CannotChangeField("Address") + } + } + return nil +} + +func (_ *IPAddress) RenderGCE(t *gce.GCEAPITarget, a, e, changes *IPAddress) error { + addr := &compute.Address{ + Name: *e.Name, + Address: fi.StringValue(e.Address), + Region: t.Cloud.Region, + } + + if a == nil { + glog.Infof("GCE creating address: %q", addr.Name) + + _, err := t.Cloud.Compute.Addresses.Insert(t.Cloud.Project, t.Cloud.Region, addr).Do() + if err != nil { + return fmt.Errorf("error creating IPAddress: %v", err) + } + } else { + return fmt.Errorf("Cannot apply changes to IPAddress: %v", changes) + } + + return nil +} + +type terraformAddress struct { + Name *string `json:"name"` +} + +func (_ *IPAddress) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *IPAddress) error { + tf := &terraformAddress{ + Name: e.Name, + } + return t.RenderResource("google_compute_address", *e.Name, tf) +} diff --git a/upup/pkg/fi/cloudup/gcetasks/managed_instance_group.go b/upup/pkg/fi/cloudup/gcetasks/managed_instance_group.go new file mode 100644 index 0000000000..cbb6113ce5 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/managed_instance_group.go @@ -0,0 +1,114 @@ +package gcetasks + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "time" +) + +type ManagedInstanceGroup struct { + Name *string + Zone *string + BaseInstanceName *string + InstanceTemplate *InstanceTemplate + TargetSize *int64 +} + +func (d *ManagedInstanceGroup) String() string { + return utils.JsonString(d) +} + +func (d *ManagedInstanceGroup) CompareWithID() *string { + return d.Name +} + +func (e *ManagedInstanceGroup) Find(c *fi.Context) (*ManagedInstanceGroup, error) { + cloud := c.Cloud.(*gce.GCECloud) + + r, err := cloud.Compute.InstanceGroupManagers.Get(cloud.Project, *e.Zone, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing ManagedInstanceGroups: %v", err) + } + + actual := &ManagedInstanceGroup{} + actual.Name = &r.Name + actual.Zone = fi.String(lastComponent(r.Zone)) + actual.BaseInstanceName = &r.BaseInstanceName + actual.TargetSize = &r.TargetSize + actual.InstanceTemplate = &InstanceTemplate{Name: fi.String(lastComponent(r.InstanceTemplate))} + + return actual, nil +} + +func (e *ManagedInstanceGroup) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *ManagedInstanceGroup) CheckChanges(a, e, changes *ManagedInstanceGroup) error { + return nil +} + +func BuildInstanceTemplateURL(project, name string) string { + return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/global/instanceTemplates/%s", project, name) +} + +func (_ *ManagedInstanceGroup) RenderGCE(t *gce.GCEAPITarget, a, e, changes *ManagedInstanceGroup) error { + project := t.Cloud.Project + + i := &compute.InstanceGroupManager{ + Name: *e.Name, + Zone: *e.Zone, + BaseInstanceName: *e.BaseInstanceName, + TargetSize: *e.TargetSize, + InstanceTemplate: BuildInstanceTemplateURL(project, *e.InstanceTemplate.Name), + } + + if a == nil { + for { + _, err := t.Cloud.Compute.InstanceGroupManagers.Insert(t.Cloud.Project, *e.Zone, i).Do() + if err != nil { + if gce.IsNotReady(err) { + glog.Infof("Found resourceNotReady error - sleeping before retry: %v", err) + time.Sleep(5 * time.Second) + continue + } + return fmt.Errorf("error creating ManagedInstanceGroup: %v", err) + } else { + break + } + } + } else { + return fmt.Errorf("Cannot apply changes to ManagedInstanceGroup: %v", changes) + } + + return nil +} + +type terraformInstanceGroupManager struct { + Name *string `json:"name"` + Zone *string `json:"zone"` + BaseInstanceName *string `json:"base_instance_name"` + InstanceTemplate *terraform.Literal `json:"instance_template"` + TargetSize *int64 `json:"target_size"` +} + +func (_ *ManagedInstanceGroup) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *ManagedInstanceGroup) error { + tf := &terraformInstanceGroupManager{ + Name: e.Name, + Zone: e.Zone, + BaseInstanceName: e.BaseInstanceName, + InstanceTemplate: e.InstanceTemplate.TerraformLink(), + TargetSize: e.TargetSize, + } + + return t.RenderResource("google_compute_instance_group_manager", *e.Name, tf) +} diff --git a/upup/pkg/fi/cloudup/gcetasks/network.go b/upup/pkg/fi/cloudup/gcetasks/network.go new file mode 100644 index 0000000000..64294c85f7 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/network.go @@ -0,0 +1,115 @@ +package gcetasks + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" +) + +type Network struct { + Name *string + CIDR *string +} + +var _ fi.Task = &Network{} +var _ fi.CompareWithID = &Network{} + +func (d *Network) String() string { + return utils.JsonString(d) +} + +func (d *Network) CompareWithID() *string { + return d.Name +} + +func (e *Network) Find(c *fi.Context) (*Network, error) { + cloud := c.Cloud.(*gce.GCECloud) + + r, err := cloud.Compute.Networks.Get(cloud.Project, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing Networks: %v", err) + } + + actual := &Network{} + actual.Name = &r.Name + actual.CIDR = &r.IPv4Range + + if r.SelfLink != e.URL(cloud.Project) { + glog.Warningf("SelfLink did not match URL: %q vs %q", r.SelfLink, e.URL(cloud.Project)) + } + + return actual, nil +} + +func (e *Network) URL(project string) string { + u := gce.GoogleCloudURL{ + Project: project, + Name: *e.Name, + Type: "networks", + Global: true, + } + return u.BuildURL() +} + +func (e *Network) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *Network) CheckChanges(a, e, changes *Network) error { + return nil +} + +func (_ *Network) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Network) error { + if a == nil { + glog.V(2).Infof("Creating Network with CIDR: %q", *e.CIDR) + + network := &compute.Network{ + IPv4Range: *e.CIDR, + + //// AutoCreateSubnetworks: When set to true, the network is created in + //// "auto subnet mode". When set to false, the network is in "custom + //// subnet mode". + //// + //// In "auto subnet mode", a newly created network is assigned the + //// default CIDR of 10.128.0.0/9 and it automatically creates one + //// subnetwork per region. + //AutoCreateSubnetworks bool `json:"autoCreateSubnetworks,omitempty"` + + Name: *e.Name, + } + _, err := t.Cloud.Compute.Networks.Insert(t.Cloud.Project, network).Do() + if err != nil { + return fmt.Errorf("error creating Network: %v", err) + } + } + + return nil +} + +type terraformNetwork struct { + Name *string `json:"name"` + CIDR *string `json:"ipv4_range"` + //AutoCreateSubnetworks bool `json:"auto_create_subnetworks"` +} + +func (_ *Network) RenderTerraform(t *terraform.TerraformTarget, a, e, changes *Network) error { + tf := &terraformNetwork{ + Name: e.Name, + CIDR: e.CIDR, + //AutoCreateSubnetworks: false, + } + + return t.RenderResource("google_compute_network", *e.Name, tf) +} + +func (i *Network) TerraformName() *terraform.Literal { + return terraform.LiteralProperty("google_compute_network", *i.Name, "name") +} diff --git a/upup/pkg/fi/cloudup/gcetasks/subnet.go b/upup/pkg/fi/cloudup/gcetasks/subnet.go new file mode 100644 index 0000000000..28762babf4 --- /dev/null +++ b/upup/pkg/fi/cloudup/gcetasks/subnet.go @@ -0,0 +1,94 @@ +package gcetasks + +import ( + "fmt" + + "github.com/golang/glog" + "google.golang.org/api/compute/v1" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/gce" + "k8s.io/kube-deploy/upup/pkg/fi/cloudup/terraform" + "k8s.io/kube-deploy/upup/pkg/fi/utils" +) + +type Subnet struct { + Name *string + Network *Network + Region *string + CIDR *string +} + +func (d *Subnet) String() string { + return utils.JsonString(d) +} + +func (d *Subnet) CompareWithID() *string { + return d.Name +} + +func (e *Subnet) Find(c *fi.Context) (*Subnet, error) { + cloud := c.Cloud.(*gce.GCECloud) + + s, err := cloud.Compute.Subnetworks.Get(cloud.Project, cloud.Region, *e.Name).Do() + if err != nil { + if gce.IsNotFound(err) { + return nil, nil + } + return nil, fmt.Errorf("error listing Subnets: %v", err) + } + + actual := &Subnet{} + actual.Name = &s.Name + actual.Network = &Network{Name: &s.Network} + actual.Region = &s.Region + actual.CIDR = &s.IpCidrRange + + return actual, nil +} + +func (e *Subnet) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *Subnet) CheckChanges(a, e, changes *Subnet) error { + return nil +} + +func (_ *Subnet) RenderGCE(t *gce.GCEAPITarget, a, e, changes *Subnet) error { + if a == nil { + glog.V(2).Infof("Creating Subnet with CIDR: %q", *e.CIDR) + + subnet := &compute.Subnetwork{ + IpCidrRange: *e.CIDR, + Name: *e.Name, + Network: *e.Network.Name, + } + _, err := t.Cloud.Compute.Subnetworks.Insert(t.Cloud.Project, t.Cloud.Region, subnet).Do() + if err != nil { + return fmt.Errorf("error creating Subnet: %v", err) + } + } + + return nil +} + +type terraformSubnet struct { + Name *string `json:"name"` + Network *terraform.Literal `json:"network"` + Region *string `json:"region"` + CIDR *string `json:"ip_cidr_range"` +} + +func (_ *Subnet) RenderSubnet(t *terraform.TerraformTarget, a, e, changes *Subnet) error { + tf := &terraformSubnet{ + Name: e.Name, + Network: e.Network.TerraformName(), + Region: e.Region, + CIDR: e.CIDR, + } + return t.RenderResource("google_compute_subnetwork", *e.Name, tf) +} + +func (i *Subnet) TerraformName() *terraform.Literal { + return terraform.LiteralProperty("google_compute_subnetwork", *i.Name, "name") +} diff --git a/upup/pkg/fi/cloudup/loader.go b/upup/pkg/fi/cloudup/loader.go new file mode 100644 index 0000000000..1686a3bad5 --- /dev/null +++ b/upup/pkg/fi/cloudup/loader.go @@ -0,0 +1,536 @@ +package cloudup + +import ( + "bytes" + "encoding/base64" + "fmt" + "github.com/golang/glog" + "io" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/fitasks" + "k8s.io/kube-deploy/upup/pkg/fi/loader" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "os" + "path" + "reflect" + "strings" + "text/template" +) + +type deferredType int + +const ( + KEY_NAME = "name" + KEY_TYPE = "_type" +) + +const ( + deferredUnit deferredType = iota + deferredResource +) + +type Loader struct { + StateDir string + OptionsLoader *loader.OptionsLoader + NodeModelDir string + + Tags map[string]struct{} + CAStore fi.CAStore + SecretStore fi.SecretStore + + typeMap map[string]reflect.Type + + templates []*template.Template + config interface{} + + resources map[string]fi.Resource + deferred []*deferredBinding + + tasks map[string]fi.Task + + unmarshaller utils.Unmarshaller +} + +type templateResource struct { + key string + loader *Loader + template string + args []string +} + +var _ fi.Resource = &templateResource{} +var _ fi.TemplateResource = &templateResource{} + +func (a *templateResource) Open() (io.ReadSeeker, error) { + var err error + result, err := a.loader.executeTemplate(a.key, a.template, a.args) + if err != nil { + return nil, fmt.Errorf("error executing resource template %q: %v", a.key, err) + } + reader := bytes.NewReader([]byte(result)) + return reader, nil +} + +func (a *templateResource) Curry(args []string) fi.TemplateResource { + curried := &templateResource{} + *curried = *a + curried.args = append(curried.args, args...) + return curried +} + +type deferredBinding struct { + name string + dest utils.Settable + src string + deferredType deferredType +} + +func (l *Loader) Init() { + l.tasks = make(map[string]fi.Task) + l.typeMap = make(map[string]reflect.Type) + l.unmarshaller.SpecialCases = l.unmarshallerSpecialCases + l.resources = make(map[string]fi.Resource) +} + +func (l *Loader) AddTypes(types map[string]interface{}) { + for key, proto := range types { + _, exists := l.typeMap[key] + if exists { + glog.Fatalf("duplicate type key: %q", key) + } + + t := reflect.TypeOf(proto) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + l.typeMap[key] = t + } +} + +func (l *Loader) executeTemplate(key string, d string, args []string) (string, error) { + t := template.New(key) + + funcMap := make(template.FuncMap) + funcMap["Base64Encode"] = func(s string) string { + return base64.StdEncoding.EncodeToString([]byte(s)) + } + funcMap["CA"] = func() fi.CAStore { + return l.CAStore + } + funcMap["Secrets"] = func() fi.SecretStore { + return l.SecretStore + } + funcMap["Args"] = func() []string { + return args + } + funcMap["BuildNodeConfig"] = func(target string, configResourceName string, args []string) (string, error) { + return l.buildNodeConfig(target, configResourceName, args) + } + t.Funcs(funcMap) + + context := l.config + + _, err := t.Parse(d) + if err != nil { + return "", fmt.Errorf("error parsing template %q: %v", key, err) + } + + t.Option("missingkey=zero") + + var buffer bytes.Buffer + err = t.ExecuteTemplate(&buffer, key, context) + if err != nil { + return "", fmt.Errorf("error executing template %q: %v", key, err) + } + + return buffer.String(), nil +} + +func ignoreHandler(i *loader.TreeWalkItem) error { + return nil +} + +func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) { + // First pass: load options + tw := &loader.TreeWalker{ + DefaultHandler: ignoreHandler, + Contexts: map[string]loader.Handler{ + "resources": ignoreHandler, + "pki": ignoreHandler, + }, + Extensions: map[string]loader.Handler{ + ".options": l.OptionsLoader.HandleOptions, + }, + Tags: l.Tags, + } + err := tw.Walk(baseDir) + if err != nil { + return nil, err + } + + l.config, err = l.OptionsLoader.Build() + if err != nil { + return nil, err + } + glog.Infof("options: %s", utils.JsonString(l.config)) + + // Second pass: load everything else + tw = &loader.TreeWalker{ + DefaultHandler: l.objectHandler, + Contexts: map[string]loader.Handler{ + "resources": l.resourceHandler, + "pki": l.pkiHandler, + }, + Extensions: map[string]loader.Handler{ + ".options": ignoreHandler, + }, + Tags: l.Tags, + } + + err = tw.Walk(baseDir) + if err != nil { + return nil, err + } + + err = l.processDeferrals() + if err != nil { + return nil, err + } + return l.tasks, nil +} + +func (l *Loader) processDeferrals() error { + if len(l.deferred) != 0 { + unitMap := make(map[string]fi.Task) + + for k, o := range l.tasks { + if unit, ok := o.(fi.Task); ok { + unitMap[k] = unit + } + } + + for _, d := range l.deferred { + src := d.src + + switch d.deferredType { + case deferredUnit: + unit, found := unitMap[src] + if !found { + glog.Infof("Known targets:") + for k := range unitMap { + glog.Infof(" %s", k) + } + return fmt.Errorf("cannot resolve link at %q to %q", d.name, d.src) + } + + d.dest.Set(reflect.ValueOf(unit)) + + case deferredResource: + // Resources can contain template 'arguments', separated by spaces + // + tokens := strings.Split(src, " ") + match := tokens[0] + args := tokens[1:] + + match = strings.TrimPrefix(match, "resources/") + found := l.resources[match] + + if found == nil { + glog.Infof("Known resources:") + for k := range l.resources { + glog.Infof(" %s", k) + } + return fmt.Errorf("cannot resolve resource link %q (at %q)", d.src, d.name) + } + + err := l.populateResource(d.name, d.dest, found, args) + if err != nil { + return fmt.Errorf("error setting resource value: %v", err) + } + + default: + panic("unhandled deferred type") + } + } + } + + return nil +} + +func (l *Loader) resourceHandler(i *loader.TreeWalkItem) error { + contents, err := i.ReadBytes() + if err != nil { + return err + } + + var a fi.Resource + key := i.RelativePath + if strings.HasSuffix(key, ".template") { + key = strings.TrimSuffix(key, ".template") + glog.V(2).Infof("loading (templated) resource %q", key) + + a = &templateResource{ + template: string(contents), + loader: l, + key: key, + } + } else { + glog.V(2).Infof("loading resource %q", key) + a = fi.NewBytesResource(contents) + + } + + l.resources[key] = a + return nil +} + +func (l *Loader) pkiHandler(i *loader.TreeWalkItem) error { + contents, err := i.ReadString() + if err != nil { + return err + } + + key := i.RelativePath + + contents, err = l.executeTemplate(key, contents, nil) + if err != nil { + return err + } + + task, err := fitasks.NewPKIKeyPairTask(key, contents, "") + if err != nil { + return err + } + l.tasks["pki/"+i.RelativePath] = task + return nil +} + +func (l *Loader) objectHandler(i *loader.TreeWalkItem) error { + contents, err := i.ReadString() + if err != nil { + return err + } + + data, err := l.executeTemplate(i.RelativePath, contents, nil) + if err != nil { + return err + } + + objects, err := l.loadYamlObjects(i.RelativePath, data) + if err != nil { + return err + } + + for k, v := range objects { + _, found := l.tasks[k] + if found { + return fmt.Errorf("found duplicate object: %q", k) + } + l.tasks[k] = v.(fi.Task) + } + return nil +} + +func (l *Loader) loadYamlObjects(key string, data string) (map[string]interface{}, error) { + var o map[string]interface{} + err := utils.YamlUnmarshal([]byte(data), &o) + if err != nil { + // TODO: It would be nice if yaml returned us the line number here + glog.Infof("error parsing yaml. yaml follows:") + for i, line := range strings.Split(string(data), "\n") { + fmt.Fprintf(os.Stderr, "%3d: %s\n", i, line) + } + return nil, fmt.Errorf("error parsing yaml %q: %v", key, err) + } + + return l.loadObjectMap(key, o) +} + +func (l *Loader) loadObjectMap(key string, data map[string]interface{}) (map[string]interface{}, error) { + loaded := make(map[string]interface{}) + + for k, v := range data { + typeId := "" + name := "" + + // If the name & type are not specified in the values, + // we infer them from the key (first component -> typeid, last component -> name) + if vMap, ok := v.(map[string]interface{}); ok { + if s, ok := vMap[KEY_TYPE]; ok { + typeId = s.(string) + } + if s, ok := vMap[KEY_NAME]; ok { + name = s.(string) + } + } + + inferredName := false + + if name == "" { + lastSlash := strings.LastIndex(k, "/") + name = k[lastSlash+1:] + inferredName = true + } + + if typeId == "" { + firstSlash := strings.Index(k, "/") + if firstSlash != -1 { + typeId = k[:firstSlash] + } + + if typeId == "" { + return nil, fmt.Errorf("cannot determine type for %q", k) + } + } + + t, found := l.typeMap[typeId] + if !found { + return nil, fmt.Errorf("unknown type %q (in %q)", typeId, key) + } + + o := reflect.New(t) + err := l.unmarshaller.UnmarshalStruct(key+":"+k, o, v) + if err != nil { + return nil, err + } + //glog.Infof("Built %s:%s => %v", key, k, o.Interface()) + + if inferredName { + nameField := o.Elem().FieldByName("Name") + if nameField.IsValid() { + err := l.unmarshaller.UnmarshalSettable(k+":Name", utils.Settable{Value: nameField}, name) + if err != nil { + return nil, err + } + } + } + loaded[k] = o.Interface() + } + return loaded, nil +} + +func (l *Loader) unmarshallerSpecialCases(name string, dest utils.Settable, src interface{}, destTypeName string) (bool, error) { + switch destTypeName { + case "Resource": + { + d := &deferredBinding{ + name: name, + dest: dest, + deferredType: deferredResource, + } + switch src := src.(type) { + case string: + d.src = src + default: + return false, fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + l.deferred = append(l.deferred, d) + return true, nil + } + + default: + if _, ok := dest.Value.Interface().(fi.Task); ok { + d := &deferredBinding{ + name: name, + dest: dest, + deferredType: deferredUnit, + } + switch src := src.(type) { + case string: + d.src = src + default: + return false, fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + l.deferred = append(l.deferred, d) + return true, nil + } + + } + + return false, nil +} + +func (l *Loader) populateResource(name string, dest utils.Settable, src interface{}, args []string) error { + if src == nil { + return nil + } + + destTypeName := utils.BuildTypeName(dest.Type()) + + switch destTypeName { + case "Resource": + { + switch src := src.(type) { + case []byte: + if len(args) != 0 { + return fmt.Errorf("cannot have arguments with static resources") + } + dest.Set(reflect.ValueOf(fi.NewBytesResource(src))) + + default: + if resource, ok := src.(fi.Resource); ok { + if len(args) != 0 { + templateResource, ok := resource.(fi.TemplateResource) + if !ok { + return fmt.Errorf("cannot have arguments with resources of type %T", resource) + } + resource = templateResource.Curry(args) + } + dest.Set(reflect.ValueOf(resource)) + } else { + return fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + } + return nil + } + + default: + return fmt.Errorf("unhandled destination type for %q: %s", name, destTypeName) + } + +} + +func (l *Loader) buildNodeConfig(target string, configResourceName string, args []string) (string, error) { + assetDir := path.Join(l.StateDir, "node/assets") + + resourceKey := strings.TrimSuffix(configResourceName, ".template") + resourceKey = strings.TrimPrefix(resourceKey, "resources/") + configResource := l.resources[resourceKey] + if configResource == nil { + return "", fmt.Errorf("cannot find resource %q", configResourceName) + } + + if tr, ok := configResource.(fi.TemplateResource); ok { + configResource = tr.Curry(args) + } else if len(args) != 0 { + return "", fmt.Errorf("args passed when building node config, but config was not a template %q", configResourceName) + } + + confData, err := fi.ResourceAsBytes(configResource) + if err != nil { + return "", fmt.Errorf("error reading resource %q: %v", configResourceName, err) + } + + config := &nodeup.NodeConfig{} + err = utils.YamlUnmarshal(confData, config) + if err != nil { + return "", fmt.Errorf("error parsing configuration %q: %v", configResourceName, err) + } + + cmd := &nodeup.NodeUpCommand{ + Config: config, + ConfigLocation: "", + ModelDir: l.NodeModelDir, + Target: target, + AssetDir: assetDir, + } + + var buff bytes.Buffer + err = cmd.Run(&buff) + if err != nil { + return "", fmt.Errorf("error building node configuration: %v", err) + } + + return buff.String(), nil +} diff --git a/upup/pkg/fi/cloudup/terraform/literal.go b/upup/pkg/fi/cloudup/terraform/literal.go new file mode 100644 index 0000000000..f5d0486931 --- /dev/null +++ b/upup/pkg/fi/cloudup/terraform/literal.go @@ -0,0 +1,30 @@ +package terraform + +import "encoding/json" + +type Literal struct { + value string +} + +var _ json.Marshaler = &Literal{} + +func (l *Literal) MarshalJSON() ([]byte, error) { + return json.Marshal(&l.value) +} + +func LiteralExpression(s string) *Literal { + return &Literal{value: s} +} + +func LiteralSelfLink(resourceType, resourceName string) *Literal { + return LiteralProperty(resourceType, resourceName, "self_link") +} + +func LiteralProperty(resourceType, resourceName, prop string) *Literal { + expr := "${" + resourceType + "." + resourceName + "." + prop + "}" + return LiteralExpression(expr) +} + +func LiteralFromStringValue(s string) *Literal { + return &Literal{value: s} +} diff --git a/upup/pkg/fi/cloudup/terraform/target.go b/upup/pkg/fi/cloudup/terraform/target.go new file mode 100644 index 0000000000..6795d618c5 --- /dev/null +++ b/upup/pkg/fi/cloudup/terraform/target.go @@ -0,0 +1,83 @@ +package terraform + +import ( + "encoding/json" + "fmt" + "io" + "k8s.io/kube-deploy/upup/pkg/fi" +) + +type TerraformTarget struct { + Region string + Project string + resources []*terraformResource + + out io.Writer +} + +func NewTerraformTarget(region, project string, out io.Writer) *TerraformTarget { + return &TerraformTarget{ + Region: region, + Project: project, + out: out, + } +} + +var _ fi.Target = &TerraformTarget{} + +type terraformResource struct { + ResourceType string + ResourceName string + Item interface{} +} + +func (t *TerraformTarget) RenderResource(resourceType string, resourceName string, e interface{}) error { + res := &terraformResource{ + ResourceType: resourceType, + ResourceName: resourceName, + Item: e, + } + + t.resources = append(t.resources, res) + + return nil +} + +func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error { + resourcesByType := make(map[string]map[string]interface{}) + + for _, res := range t.resources { + resources := resourcesByType[res.ResourceType] + if resources == nil { + resources = make(map[string]interface{}) + resourcesByType[res.ResourceType] = resources + } + + if resources[res.ResourceName] != nil { + return fmt.Errorf("duplicate resource found: %s.%s", res.ResourceType, res.ResourceName) + } + + resources[res.ResourceName] = res.Item + } + + providersByName := make(map[string]map[string]interface{}) + providerGoogle := make(map[string]interface{}) + providerGoogle["project"] = t.Project + providerGoogle["region"] = t.Region + providersByName["google"] = providerGoogle + + data := make(map[string]interface{}) + data["resource"] = resourcesByType + data["provider"] = providersByName + + jsonBytes, err := json.MarshalIndent(data, "", " ") + if err != nil { + return fmt.Errorf("error marshalling terraform data to json: %v", err) + } + + _, err = t.out.Write(jsonBytes) + if err != nil { + return fmt.Errorf("error writing terraform data to output: %v", err) + } + return nil +} diff --git a/upup/pkg/fi/compare_with_id.go b/upup/pkg/fi/compare_with_id.go new file mode 100644 index 0000000000..097c5db60e --- /dev/null +++ b/upup/pkg/fi/compare_with_id.go @@ -0,0 +1,10 @@ +package fi + +// CompareWithID indicates that the value should be compared by the returned ID value (instead of a deep comparison) +// Most Tasks implement this, because typically when a Task references another task, it only is concerned with +// being linked to that task, not the values of the task. +// For example, when an instance is linked to a disk, it cares that the disk is attached to that instance, +// not the size or speed of the disk. +type CompareWithID interface { + CompareWithID() *string +} diff --git a/upup/pkg/fi/context.go b/upup/pkg/fi/context.go new file mode 100644 index 0000000000..0d01804a97 --- /dev/null +++ b/upup/pkg/fi/context.go @@ -0,0 +1,130 @@ +package fi + +import ( + "fmt" + "github.com/golang/glog" + "io/ioutil" + "os" + "reflect" + "strings" +) + +type Context struct { + Tmpdir string + + Target Target + Cloud Cloud + CAStore CAStore + + CheckExisting bool +} + +func NewContext(target Target, cloud Cloud, castore CAStore, checkExisting bool) (*Context, error) { + c := &Context{ + Cloud: cloud, + Target: target, + CAStore: castore, + CheckExisting: checkExisting, + } + + t, err := ioutil.TempDir("", "deploy") + if err != nil { + return nil, fmt.Errorf("error creating temporary directory: %v", err) + } + c.Tmpdir = t + + return c, nil +} + +func (c *Context) RunTasks(taskMap map[string]Task) error { + taskOrder := TopologicalSort(taskMap) + + for _, stage := range taskOrder { + for _, k := range stage { + task := taskMap[k] + glog.Infof("Executing %v\n", task) + err := task.Run(c) + if err != nil { + return fmt.Errorf("error running tasks (%s): %v", task, err) + } + } + } + + return nil +} + +func (c *Context) Close() { + glog.V(2).Infof("deleting temp dir: %q", c.Tmpdir) + if c.Tmpdir != "" { + err := os.RemoveAll(c.Tmpdir) + if err != nil { + glog.Warningf("unable to delete temporary directory %q: %v", c.Tmpdir, err) + } + } +} + +//func (c *Context) MergeOptions(options Options) error { +// return c.Options.Merge(options) +//} + +func (c *Context) NewTempDir(prefix string) (string, error) { + t, err := ioutil.TempDir(c.Tmpdir, prefix) + if err != nil { + return "", fmt.Errorf("error creating temporary directory: %v", err) + } + return t, nil +} + +func (c *Context) Render(a, e, changes Task) error { + if _, ok := c.Target.(*DryRunTarget); ok { + return c.Target.(*DryRunTarget).Render(a, e, changes) + } + + v := reflect.ValueOf(e) + vType := v.Type() + + targetType := reflect.ValueOf(c.Target).Type() + + var renderer *reflect.Method + for i := 0; i < vType.NumMethod(); i++ { + method := vType.Method(i) + if !strings.HasPrefix(method.Name, "Render") { + continue + } + match := true + for j := 0; j < method.Type.NumIn(); j++ { + arg := method.Type.In(j) + if arg.ConvertibleTo(vType) { + continue + } + if arg.ConvertibleTo(targetType) { + continue + } + match = false + break + } + if match { + if renderer != nil { + return fmt.Errorf("Found multiple Render methods that could be invokved on %T", e) + } + renderer = &method + } + + } + if renderer == nil { + return fmt.Errorf("Could not find Render method on type %T (target %T)", e, c.Target) + } + var args []reflect.Value + args = append(args, reflect.ValueOf(c.Target)) + args = append(args, reflect.ValueOf(a)) + args = append(args, reflect.ValueOf(e)) + args = append(args, reflect.ValueOf(changes)) + glog.V(4).Infof("Calling method %s on %T", renderer.Name, e) + m := v.MethodByName(renderer.Name) + rv := m.Call(args) + var rvErr error + if !rv[0].IsNil() { + rvErr = rv[0].Interface().(error) + } + return rvErr +} diff --git a/upup/pkg/fi/default_methods.go b/upup/pkg/fi/default_methods.go new file mode 100644 index 0000000000..86211760ca --- /dev/null +++ b/upup/pkg/fi/default_methods.go @@ -0,0 +1,67 @@ +package fi + +import ( + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "reflect" +) + +// DefaultDeltaRunMethod implements the standard change-based run procedure: +// find the existing item; compare properties; call render with (actual, expected, changes) +func DefaultDeltaRunMethod(e Task, c *Context) error { + var a Task + var err error + + if c.CheckExisting { + a, err = invokeFind(e, c) + if err != nil { + return err + } + } + + if a == nil { + // This is kind of subtle. We want an interface pointer to a struct of the correct type... + a = reflect.New(reflect.TypeOf(e)).Elem().Interface().(Task) + } + + changes := reflect.New(reflect.TypeOf(e).Elem()).Interface().(Task) + changed := BuildChanges(a, e, changes) + + if !changed { + return nil + } + + err = invokeCheckChanges(a, e, changes) + if err != nil { + return err + } + + return c.Render(a, e, changes) +} + +// invokeCheckChanges calls the checkChanges method by reflection +func invokeCheckChanges(a, e, changes Task) error { + rv, err := utils.InvokeMethod(e, "CheckChanges", a, e, changes) + if err != nil { + return err + } + if !rv[0].IsNil() { + err = rv[0].Interface().(error) + } + return err +} + +// invokeFind calls the find method by reflection +func invokeFind(e Task, c *Context) (Task, error) { + rv, err := utils.InvokeMethod(e, "Find", c) + if err != nil { + return nil, err + } + var task Task + if !rv[0].IsNil() { + task = rv[0].Interface().(Task) + } + if !rv[1].IsNil() { + err = rv[1].Interface().(error) + } + return task, err +} diff --git a/upup/pkg/fi/dryrun_target.go b/upup/pkg/fi/dryrun_target.go new file mode 100644 index 0000000000..0484d8749e --- /dev/null +++ b/upup/pkg/fi/dryrun_target.go @@ -0,0 +1,180 @@ +package fi + +import ( + "fmt" + + "bytes" + "github.com/golang/glog" + "io" + "reflect" +) + +// DryRunTarget is a special Target that does not execute anything, but instead tracks all changes. +// By running against a DryRunTarget, a list of changes that would be made can be easily collected, +// without any special support from the Tasks. +type DryRunTarget struct { + changes []*render + + // The destination to which the final report will be printed on Finish() + out io.Writer +} + +type render struct { + a Task + aIsNil bool + e Task + changes Task +} + +var _ Target = &DryRunTarget{} + +func NewDryRunTarget(out io.Writer) *DryRunTarget { + t := &DryRunTarget{} + t.out = out + return t +} + +func (t *DryRunTarget) Render(a, e, changes Task) error { + valA := reflect.ValueOf(a) + aIsNil := valA.IsNil() + + t.changes = append(t.changes, &render{ + a: a, + aIsNil: aIsNil, + e: e, + changes: changes, + }) + return nil +} + +func IdForTask(taskMap map[string]Task, t Task) string { + for k, v := range taskMap { + if v == t { + return k + } + } + glog.Fatalf("unknown task: %v", t) + return "?" +} + +func (t *DryRunTarget) PrintReport(taskMap map[string]Task, out io.Writer) error { + b := &bytes.Buffer{} + + if len(t.changes) != 0 { + fmt.Fprintf(b, "Created resources:\n") + for _, r := range t.changes { + if !r.aIsNil { + continue + } + + fmt.Fprintf(b, " %T\t%s\n", r.changes, IdForTask(taskMap, r.e)) + } + + fmt.Fprintf(b, "Changed resources:\n") + for _, r := range t.changes { + if r.aIsNil { + continue + } + var changeList []string + + valC := reflect.ValueOf(r.changes) + valA := reflect.ValueOf(r.a) + valE := reflect.ValueOf(r.e) + if valC.Kind() == reflect.Ptr && !valC.IsNil() { + valC = valC.Elem() + } + if valA.Kind() == reflect.Ptr && !valA.IsNil() { + valA = valA.Elem() + } + if valE.Kind() == reflect.Ptr && !valE.IsNil() { + valE = valE.Elem() + } + if valC.Kind() == reflect.Struct { + for i := 0; i < valC.NumField(); i++ { + fieldValC := valC.Field(i) + + if (fieldValC.Kind() == reflect.Ptr || fieldValC.Kind() == reflect.Slice || fieldValC.Kind() == reflect.Map) && fieldValC.IsNil() { + // No change + continue + } + + fieldValE := valE.Field(i) + + description := "" + ignored := false + if fieldValE.CanInterface() { + fieldValA := valA.Field(i) + + switch fieldValE.Interface().(type) { + //case SimpleUnit: + // ignored = true + default: + description = fmt.Sprintf(" %v -> %v", asString(fieldValA), asString(fieldValE)) + } + } + if ignored { + continue + } + changeList = append(changeList, valC.Type().Field(i).Name+description) + } + } else { + return fmt.Errorf("unhandled change type: %v", valC.Type()) + } + + if len(changeList) == 0 { + continue + } + + fmt.Fprintf(b, " %T\t%s\n", r.changes, IdForTask(taskMap, r.e)) + for _, f := range changeList { + fmt.Fprintf(b, " %s\n", f) + } + fmt.Fprintf(b, "\n") + } + } + + _, err := out.Write(b.Bytes()) + return err +} + +// asString returns a human-readable string representation of the passed value +func asString(v reflect.Value) string { + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + if v.IsNil() { + return "" + } + } + if v.CanInterface() { + iv := v.Interface() + _, isResource := iv.(Resource) + if isResource { + return "" + } + _, isHasID := iv.(CompareWithID) + if isHasID { + id := iv.(CompareWithID).CompareWithID() + if id == nil { + return "id:" + } else { + return "id:" + *id + } + } + switch typed := iv.(type) { + case *string: + return *typed + case *bool: + return fmt.Sprintf("%v", *typed) + default: + return fmt.Sprintf("%T (%v)", iv, iv) + } + + } else { + return fmt.Sprintf("Unhandled: %T", v.Type()) + + } +} + +// Finish is called at the end of a run, and prints a list of changes to the configured Writer +func (t *DryRunTarget) Finish(taskMap map[string]Task) error { + return t.PrintReport(taskMap, t.out) +} diff --git a/upup/pkg/fi/errors.go b/upup/pkg/fi/errors.go new file mode 100644 index 0000000000..557761dced --- /dev/null +++ b/upup/pkg/fi/errors.go @@ -0,0 +1,13 @@ +package fi + +import ( + "fmt" +) + +func RequiredField(key string) error { + return fmt.Errorf("Field is required: %s", key) +} + +func CannotChangeField(key string) error { + return fmt.Errorf("Field cannot be changed: %s", key) +} diff --git a/upup/pkg/fi/files.go b/upup/pkg/fi/files.go new file mode 100644 index 0000000000..058920378b --- /dev/null +++ b/upup/pkg/fi/files.go @@ -0,0 +1,135 @@ +package fi + +import ( + "fmt" + "github.com/golang/glog" + "io" + "os" + "path" + "strconv" +) + +func WriteFile(destPath string, contents Resource, fileMode os.FileMode, dirMode os.FileMode) error { + err := os.MkdirAll(path.Dir(destPath), dirMode) + if err != nil { + return fmt.Errorf("error creating directories for destination file %q: %v", destPath, err) + } + + err = writeFileContents(destPath, contents, fileMode) + if err != nil { + return err + } + + _, err = EnsureFileMode(destPath, fileMode) + if err != nil { + return err + } + + return nil +} + +func writeFileContents(destPath string, src Resource, fileMode os.FileMode) error { + glog.Infof("Writing file %q", destPath) + + out, err := os.OpenFile(destPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode) + if err != nil { + return fmt.Errorf("error opening destination file %q: %v", destPath, err) + } + defer out.Close() + + in, err := src.Open() + if err != nil { + return fmt.Errorf("error opening source resource for file %q: %v", destPath, err) + } + defer SafeClose(in) + + _, err = io.Copy(out, in) + if err != nil { + return fmt.Errorf("error writing file %q: %v", destPath, err) + } + return nil +} + +func EnsureFileMode(destPath string, fileMode os.FileMode) (bool, error) { + changed := false + stat, err := os.Stat(destPath) + if err != nil { + return changed, fmt.Errorf("error getting file mode for %q: %v", destPath, err) + } + if stat.Mode() == fileMode { + return changed, nil + } + glog.Infof("Changing file mode for %q to %s", destPath, fileMode) + + err = os.Chmod(destPath, fileMode) + if err != nil { + return changed, fmt.Errorf("error setting file mode for %q: %v", destPath, err) + } + changed = true + return changed, nil +} + +func fileHasHash(f string, expected string) (bool, error) { + hashAlgorithm, err := determineHashAlgorithm(expected) + if err != nil { + return false, nil + } + + actual, err := HashFile(f, hashAlgorithm) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + + if actual == expected { + glog.V(2).Infof("Hash matched for %q: %v", f, expected) + return true, nil + } else { + glog.V(2).Infof("Hash did not match for %q: actual=%v vs expected=%v", f, actual, expected) + return false, nil + } +} + +func HashFile(f string, hashAlgorithm HashAlgorithm) (string, error) { + glog.V(2).Infof("hashing file %q", f) + + fileAsset := NewFileResource(f) + hash, err := HashForResource(fileAsset, hashAlgorithm) + if err != nil { + return "", err + } + + return hash, nil +} + +func ParseFileMode(s string, defaultMode os.FileMode) (os.FileMode, error) { + fileMode := defaultMode + if s != "" { + v, err := strconv.ParseUint(s, 8, 32) + if err != nil { + return fileMode, fmt.Errorf("cannot parse file mode %q", s) + } + fileMode = os.FileMode(v) + } + return fileMode, nil +} + +func FileModeToString(mode os.FileMode) string { + return "0" + strconv.FormatUint(uint64(mode), 8) +} + +func SafeClose(r io.Reader) { + if r == nil { + return + } + closer, ok := r.(io.Closer) + if !ok { + return + } + err := closer.Close() + if err != nil { + glog.Warningf("unexpected error closing stream: %v", err) + } +} diff --git a/upup/pkg/fi/fitasks/pki.go b/upup/pkg/fi/fitasks/pki.go new file mode 100644 index 0000000000..20f942f381 --- /dev/null +++ b/upup/pkg/fi/fitasks/pki.go @@ -0,0 +1,116 @@ +package fitasks + +import ( + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "net" + "strings" +) + +const ( + CertificateType_Client string = "client" + CertificateType_Server string = "server" +) + +type PKIKeyPairTask struct { + Name string + Subject *pkix.Name `json:"subject"` + Type string `json:"type"` + AlternateNames []string `json:"alternateNames"` +} + +func (t *PKIKeyPairTask) String() string { + return fmt.Sprintf("PKI: %s", t.Name) +} + +func NewPKIKeyPairTask(name string, contents string, meta string) (fi.Task, error) { + t := &PKIKeyPairTask{Name: name} + + if contents != "" { + err := utils.YamlUnmarshal([]byte(contents), t) + if err != nil { + return nil, fmt.Errorf("error parsing data for PKIKeyPairTask %q: %v", name, err) + } + } + + if meta != "" { + return nil, fmt.Errorf("meta is not supported for PKIKeyPairTask") + } + + return t, nil +} + +func (t *PKIKeyPairTask) Run(c *fi.Context) error { + castore := c.CAStore + cert, err := castore.FindCert(t.Name) + if err != nil { + return err + } + if cert != nil { + key, err := castore.FindPrivateKey(t.Name) + if err != nil { + return err + } + if key == nil { + return fmt.Errorf("found cert in store, but did not find keypair: %q", t.Name) + } + } + + if cert == nil { + glog.V(2).Infof("Creating PKI keypair %q", t.Name) + + template := &x509.Certificate{ + Subject: *t.Subject, + BasicConstraintsValid: true, + IsCA: false, + } + + if len(t.Subject.ToRDNSequence()) == 0 { + return fmt.Errorf("Subject name was empty for SSL keypair %q", t.Name) + } + + switch t.Type { + case CertificateType_Client: + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} + template.KeyUsage = x509.KeyUsageDigitalSignature + break + + case CertificateType_Server: + template.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + template.KeyUsage = x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment + break + + default: + return fmt.Errorf("unknown certificate type: %q", t.Type) + } + + for _, san := range t.AlternateNames { + san = strings.TrimSpace(san) + if san == "" { + continue + } + if ip := net.ParseIP(san); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, san) + } + } + + privateKey, err := castore.CreatePrivateKey(t.Name) + if err != nil { + return err + } + cert, err = castore.IssueCert(t.Name, privateKey, template) + if err != nil { + return err + } + } + + // TODO: Check correct subject / flags + + return nil +} diff --git a/upup/pkg/fi/fs_castore.go b/upup/pkg/fi/fs_castore.go new file mode 100644 index 0000000000..202d385711 --- /dev/null +++ b/upup/pkg/fi/fs_castore.go @@ -0,0 +1,297 @@ +package fi + +import ( + "bytes" + "crypto" + crypto_rand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "fmt" + "github.com/golang/glog" + "io/ioutil" + "os" + "path" +) + +type FilesystemCAStore struct { + basedir string + caCertificate *Certificate + caPrivateKey *PrivateKey +} + +var _ CAStore = &FilesystemCAStore{} + +func NewFilesystemCAStore(basedir string) (CAStore, error) { + c := &FilesystemCAStore{ + basedir: basedir, + } + err := os.MkdirAll(path.Join(basedir, "private"), 0700) + if err != nil { + return nil, fmt.Errorf("error creating directory: %v", err) + } + err = os.MkdirAll(path.Join(basedir, "issued"), 0700) + if err != nil { + return nil, fmt.Errorf("error creating directory: %v", err) + } + caCertificate, err := c.loadCertificate(path.Join(basedir, "ca.crt")) + if err != nil { + return nil, err + } + + if caCertificate != nil { + privateKeyPath := path.Join(basedir, "private", "ca.key") + caPrivateKey, err := c.loadPrivateKey(privateKeyPath) + if err != nil { + return nil, err + } + if caPrivateKey == nil { + glog.Warningf("CA private key was not found %q", privateKeyPath) + //return nil, fmt.Errorf("error loading CA private key - key not found") + } + c.caCertificate = caCertificate + c.caPrivateKey = caPrivateKey + } else { + err := c.generateCACertificate() + if err != nil { + return nil, err + } + } + return c, nil +} + +func (c *FilesystemCAStore) generateCACertificate() error { + subject := &pkix.Name{ + CommonName: "kubernetes", + } + template := &x509.Certificate{ + Subject: *subject, + KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign, + ExtKeyUsage: []x509.ExtKeyUsage{}, + BasicConstraintsValid: true, + IsCA: true, + } + + caPrivateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048) + if err != nil { + return fmt.Errorf("error generating RSA private key: %v", err) + } + + caCertificate, err := SignNewCertificate(&PrivateKey{Key: caPrivateKey}, template, nil, nil) + if err != nil { + return err + } + + keyPath := path.Join(c.basedir, "private", "ca.key") + err = c.storePrivateKey(caPrivateKey, keyPath) + if err != nil { + return err + } + + certPath := path.Join(c.basedir, "ca.crt") + err = c.storeCertificate(caCertificate, certPath) + if err != nil { + return err + } + + // Make double-sure it round-trips + caCertificate, err = c.loadCertificate(certPath) + if err != nil { + return err + } + + c.caPrivateKey = &PrivateKey{Key: caPrivateKey} + c.caCertificate = caCertificate + return nil +} + +func (c *FilesystemCAStore) getSubjectKey(subject *pkix.Name) string { + seq := subject.ToRDNSequence() + var s bytes.Buffer + for _, rdnSet := range seq { + for _, rdn := range rdnSet { + if s.Len() != 0 { + s.WriteString(",") + } + key := "" + t := rdn.Type + if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 { + switch t[3] { + case 3: + key = "cn" + case 5: + key = "serial" + case 6: + key = "c" + case 7: + key = "l" + case 10: + key = "o" + case 11: + key = "ou" + } + } + if key == "" { + key = t.String() + } + s.WriteString(fmt.Sprintf("%v=%v", key, rdn.Value)) + } + } + return s.String() +} + +func (c *FilesystemCAStore) buildCertificatePath(id string) string { + return path.Join(c.basedir, "issued", id+".crt") +} + +func (c *FilesystemCAStore) buildPrivateKeyPath(id string) string { + return path.Join(c.basedir, "private", id+".key") +} + +func (c *FilesystemCAStore) loadCertificate(p string) (*Certificate, error) { + data, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + } + cert, err := LoadPEMCertificate(data) + if err != nil { + return nil, err + } + if cert == nil { + return nil, nil + } + return cert, nil +} + +func (c *FilesystemCAStore) Cert(id string) (*Certificate, error) { + cert, err := c.FindCert(id) + if err == nil && cert == nil { + return nil, fmt.Errorf("cannot find cert %q", id) + } + return cert, err + +} + +func (c *FilesystemCAStore) FindCert(id string) (*Certificate, error) { + var cert *Certificate + if id == "ca" { + cert = c.caCertificate + } else { + var err error + p := c.buildCertificatePath(id) + cert, err = c.loadCertificate(p) + if err != nil { + return nil, err + } + } + return cert, nil +} + +func (c *FilesystemCAStore) IssueCert(id string, privateKey *PrivateKey, template *x509.Certificate) (*Certificate, error) { + p := c.buildCertificatePath(id) + + if c.caPrivateKey == nil { + return nil, fmt.Errorf("ca.key was not found; cannot issue certificates") + } + cert, err := SignNewCertificate(privateKey, template, c.caCertificate.Certificate, c.caPrivateKey) + if err != nil { + return nil, err + } + + err = c.storeCertificate(cert, p) + if err != nil { + return nil, err + } + + // Make double-sure it round-trips + return c.loadCertificate(p) +} + +func (c *FilesystemCAStore) loadPrivateKey(p string) (*PrivateKey, error) { + data, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + } + k, err := parsePEMPrivateKey(data) + if err != nil { + return nil, fmt.Errorf("error parsing private key from %q: %v", p, err) + } + if k == nil { + return nil, nil + } + return &PrivateKey{Key: k}, nil +} + +func (c *FilesystemCAStore) FindPrivateKey(id string) (*PrivateKey, error) { + var key *PrivateKey + if id == "ca" { + key = c.caPrivateKey + } else { + var err error + p := c.buildPrivateKeyPath(id) + key, err = c.loadPrivateKey(p) + if err != nil { + return nil, err + } + } + return key, nil +} + +func (c *FilesystemCAStore) PrivateKey(id string) (*PrivateKey, error) { + key, err := c.FindPrivateKey(id) + if err == nil && key == nil { + return nil, fmt.Errorf("cannot find SSL key %q", id) + } + return key, err + +} + +func (c *FilesystemCAStore) CreatePrivateKey(id string) (*PrivateKey, error) { + p := c.buildPrivateKeyPath(id) + + privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048) + if err != nil { + return nil, fmt.Errorf("error generating RSA private key: %v", err) + } + + err = c.storePrivateKey(privateKey, p) + if err != nil { + return nil, err + } + + return &PrivateKey{Key: privateKey}, nil +} + +func (c *FilesystemCAStore) storePrivateKey(privateKey crypto.PrivateKey, p string) error { + var data bytes.Buffer + err := WritePrivateKey(privateKey, &data) + if err != nil { + return err + } + + return c.writeFile(data.Bytes(), p) +} + +func (c *FilesystemCAStore) storeCertificate(cert *Certificate, p string) error { + var data bytes.Buffer + err := cert.WriteCertificate(&data) + if err != nil { + return err + } + + return c.writeFile(data.Bytes(), p) +} + +func (c *FilesystemCAStore) writeFile(data []byte, p string) error { + // TODO: concurrency? + err := ioutil.WriteFile(p, data, 0600) + if err != nil { + // TODO: Delete file on disk? Write a temp file and move it atomically? + return fmt.Errorf("error writing certificate/key data to path %q: %v", p, err) + } + return nil +} diff --git a/upup/pkg/fi/fs_secretstore.go b/upup/pkg/fi/fs_secretstore.go new file mode 100644 index 0000000000..a09a2f4fde --- /dev/null +++ b/upup/pkg/fi/fs_secretstore.go @@ -0,0 +1,102 @@ +package fi + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" +) + +type FilesystemSecretStore struct { + basedir string +} + +var _ SecretStore = &FilesystemSecretStore{} + +func NewFilesystemSecretStore(basedir string) (SecretStore, error) { + c := &FilesystemSecretStore{ + basedir: basedir, + } + err := os.MkdirAll(path.Join(basedir), 0700) + if err != nil { + return nil, fmt.Errorf("error creating directory: %v", err) + } + return c, nil +} + +func (c *FilesystemSecretStore) buildSecretPath(id string) string { + return path.Join(c.basedir, id) +} + +func (c *FilesystemSecretStore) FindSecret(id string) (*Secret, error) { + p := c.buildSecretPath(id) + s, err := c.loadSecret(p) + if err != nil { + return nil, err + } + return s, nil +} + +func (c *FilesystemSecretStore) Secret(id string) (*Secret, error) { + s, err := c.FindSecret(id) + if err != nil { + return nil, err + } + if s == nil { + // For now, we auto-create the secret + return c.CreateSecret(id) + // return nil, fmt.Errorf("Secret not found: %q", id) + } + return s, nil +} + +func (c *FilesystemSecretStore) CreateSecret(id string) (*Secret, error) { + p := c.buildSecretPath(id) + + s, err := CreateSecret() + if err != nil { + return nil, err + } + + err = c.storeSecret(s, p) + if err != nil { + return nil, err + } + + // Make double-sure it round-trips + return c.loadSecret(p) +} + +func (c *FilesystemSecretStore) loadSecret(p string) (*Secret, error) { + data, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + } + s := &Secret{} + err = json.Unmarshal(data, s) + if err != nil { + return nil, fmt.Errorf("error parsing secret from %q: %v", p, err) + } + return s, nil +} + +func (c *FilesystemSecretStore) storeSecret(s *Secret, p string) error { + data, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("error serializing secret: %v", err) + } + return c.writeFile(data, p) +} + +func (c *FilesystemSecretStore) writeFile(data []byte, p string) error { + // TODO: concurrency? + err := ioutil.WriteFile(p, data, 0600) + if err != nil { + // TODO: Delete file on disk? Write a temp file and move it atomically? + return fmt.Errorf("error writing certificate/key data to path %q: %v", p, err) + } + return nil +} diff --git a/upup/pkg/fi/hash.go b/upup/pkg/fi/hash.go new file mode 100644 index 0000000000..0596f9af36 --- /dev/null +++ b/upup/pkg/fi/hash.go @@ -0,0 +1,46 @@ +package fi + +import ( + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "fmt" + "github.com/golang/glog" + "hash" +) + +type HashAlgorithm string + +const ( + HashAlgorithmSHA256 = "sha256" + HashAlgorithmSHA1 = "sha1" + HashAlgorithmMD5 = "md5" +) + +func NewHasher(hashAlgorithm HashAlgorithm) hash.Hash { + switch hashAlgorithm { + case HashAlgorithmMD5: + return md5.New() + + case HashAlgorithmSHA1: + return sha1.New() + + case HashAlgorithmSHA256: + return sha256.New() + } + + glog.Exitf("Unknown hash algorithm: %v", hashAlgorithm) + return nil +} + +func determineHashAlgorithm(hash string) (HashAlgorithm, error) { + if len(hash) == 32 { + return HashAlgorithmMD5, nil + } else if len(hash) == 40 { + return HashAlgorithmSHA1, nil + } else if len(hash) == 64 { + return HashAlgorithmSHA256, nil + } else { + return "", fmt.Errorf("Unrecognized hash format: %q", hash) + } +} diff --git a/upup/pkg/fi/http.go b/upup/pkg/fi/http.go new file mode 100644 index 0000000000..9757121156 --- /dev/null +++ b/upup/pkg/fi/http.go @@ -0,0 +1,72 @@ +package fi + +import ( + "fmt" + "github.com/golang/glog" + "io" + "net/http" + "os" + "path" +) + +func DownloadURL(url string, dest string, hash string) (string, error) { + if hash != "" { + match, err := fileHasHash(dest, hash) + if err != nil { + return "", err + } + if match { + return hash, nil + } + } + + dirMode := os.FileMode(0755) + err := downloadURLAlways(url, dest, dirMode) + if err != nil { + return "", err + } + + if hash != "" { + match, err := fileHasHash(dest, hash) + if err != nil { + return "", err + } + if !match { + return "", fmt.Errorf("downloaded from %q but hash did not match expected %q", url, hash) + } + } else { + hash, err = HashFile(dest, HashAlgorithmSHA256) + if err != nil { + return "", err + } + } + + return hash, nil +} + +func downloadURLAlways(url string, destPath string, dirMode os.FileMode) error { + err := os.MkdirAll(path.Dir(destPath), dirMode) + if err != nil { + return fmt.Errorf("error creating directories for destination file %q: %v", destPath, err) + } + + output, err := os.Create(destPath) + if err != nil { + return fmt.Errorf("error creating file for download %q: %v", destPath, err) + } + defer output.Close() + + glog.Infof("Downloading %q", url) + + response, err := http.Get(url) + if err != nil { + return fmt.Errorf("error doing HTTP fetch of %q: %v", url, err) + } + defer response.Body.Close() + + _, err = io.Copy(output, response.Body) + if err != nil { + return fmt.Errorf("error downloading HTTP content from %q: %v", url, err) + } + return nil +} diff --git a/upup/pkg/fi/loader/options_loader.go b/upup/pkg/fi/loader/options_loader.go new file mode 100644 index 0000000000..43c0d90c50 --- /dev/null +++ b/upup/pkg/fi/loader/options_loader.go @@ -0,0 +1,114 @@ +package loader + +import ( + "bytes" + "encoding/json" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "os" + "reflect" + "strings" + "text/template" +) + +const maxIterations = 10 + +type OptionsLoader struct { + config interface{} + templates []*template.Template +} + +func NewOptionsLoader(config interface{}) *OptionsLoader { + l := &OptionsLoader{} + l.config = config + return l +} + +func (l *OptionsLoader) AddTemplate(t *template.Template) { + l.templates = append(l.templates, t) +} +func copyStruct(dest, src interface{}) { + vDest := reflect.ValueOf(dest).Elem() + vSrc := reflect.ValueOf(src).Elem() + + for i := 0; i < vSrc.NumField(); i++ { + fv := vSrc.Field(i) + vDest.Field(i).Set(fv) + } +} + +func (l *OptionsLoader) iterate(inConfig interface{}) (interface{}, error) { + t := reflect.TypeOf(inConfig).Elem() + + options := reflect.New(t).Interface() + copyStruct(options, inConfig) + for _, t := range l.templates { + glog.V(2).Infof("executing template %s", t.Name()) + + var buffer bytes.Buffer + err := t.ExecuteTemplate(&buffer, t.Name(), inConfig) + if err != nil { + return nil, fmt.Errorf("error executing template %q: %v", t.Name(), err) + } + + yamlBytes := buffer.Bytes() + + jsonBytes, err := utils.YamlToJson(yamlBytes) + if err != nil { + // TODO: It would be nice if yaml returned us the line number here + glog.Infof("error parsing yaml. yaml follows:") + for i, line := range strings.Split(string(yamlBytes), "\n") { + fmt.Fprintf(os.Stderr, "%3d: %s\n", i, line) + } + return nil, fmt.Errorf("error parsing yaml %q: %v", t.Name(), err) + } + + err = json.Unmarshal(jsonBytes, options) + if err != nil { + return nil, fmt.Errorf("error parsing yaml (converted to JSON) %q: %v", t.Name(), err) + } + } + + return options, nil +} + +func (l *OptionsLoader) Build() (interface{}, error) { + options := l.config + iteration := 0 + for { + nextOptions, err := l.iterate(options) + if err != nil { + return nil, err + } + + if reflect.DeepEqual(options, nextOptions) { + return options, nil + } + + iteration++ + if iteration > maxIterations { + return nil, fmt.Errorf("options did not converge after %d iterations", maxIterations) + } + + options = nextOptions + } +} + +func (l *OptionsLoader) HandleOptions(i *TreeWalkItem) error { + contents, err := i.ReadString() + if err != nil { + return err + } + + t := template.New(i.RelativePath) + _, err = t.Parse(contents) + if err != nil { + return fmt.Errorf("error parsing options template %q: %v", i.Path, err) + } + + t.Option("missingkey=zero") + + l.AddTemplate(t) + return nil +} diff --git a/upup/pkg/fi/loader/tree_walker.go b/upup/pkg/fi/loader/tree_walker.go new file mode 100644 index 0000000000..4b42116ca3 --- /dev/null +++ b/upup/pkg/fi/loader/tree_walker.go @@ -0,0 +1,151 @@ +package loader + +import ( + "fmt" + "github.com/golang/glog" + "io/ioutil" + "os" + "path" + "strings" +) + +type TreeWalker struct { + Contexts map[string]Handler + Extensions map[string]Handler + DefaultHandler Handler + Tags map[string]struct{} +} + +type TreeWalkItem struct { + Context string + Name string + Path string + RelativePath string + Meta string +} + +func (i *TreeWalkItem) ReadString() (string, error) { + b, err := i.ReadBytes() + if err != nil { + return "", err + } + return string(b), nil +} + +func (i *TreeWalkItem) ReadBytes() ([]byte, error) { + b, err := ioutil.ReadFile(i.Path) + if err != nil { + return nil, fmt.Errorf("error reading file %q: %v", i.Path, err) + } + return b, nil +} + +type Handler func(item *TreeWalkItem) error + +func IsTag(name string) bool { + return len(name) != 0 && name[0] == '_' +} + +func (t *TreeWalker) Walk(basedir string) error { + i := &TreeWalkItem{ + Context: "", + Path: basedir, + RelativePath: "", + } + + return t.walkDirectory(i) +} + +func (t *TreeWalker) walkDirectory(parent *TreeWalkItem) error { + files, err := ioutil.ReadDir(parent.Path) + if err != nil { + return fmt.Errorf("error reading directory %q: %v", parent.Path, err) + } + + for _, f := range files { + var err error + + fileName := f.Name() + + i := &TreeWalkItem{ + Context: parent.Context, + Path: path.Join(parent.Path, fileName), + RelativePath: path.Join(parent.RelativePath, fileName), + Name: fileName, + } + + glog.V(4).Infof("visit %q", i.Path) + + if f.IsDir() { + if IsTag(fileName) { + // Only descend into the tag directory if we have the tag + _, found := t.Tags[fileName] + if !found { + glog.V(2).Infof("Skipping directory as tag not present: %q", i.Path) + continue + } else { + glog.V(2).Infof("Descending into directory, as tag is present: %q", i.Path) + err = t.walkDirectory(i) + } + } else if _, found := t.Contexts[fileName]; found { + // Entering a new context (mode of operation) + if parent.Context != "" { + return fmt.Errorf("found context %q inside context %q at %q", fileName, parent.Context, i.Path) + } + i.Context = fileName + i.RelativePath = "" + err = t.walkDirectory(i) + } else { + // Simple directory for organization / structure + err = t.walkDirectory(i) + } + if err != nil { + return err + } + continue + } + + if strings.HasSuffix(fileName, ".meta") { + // We'll read it when we see the actual file + // But check the actual file is there + primaryPath := strings.TrimSuffix(i.Path, ".meta") + if _, err := os.Stat(primaryPath); os.IsNotExist(err) { + return fmt.Errorf("found .meta file without corresponding file: %q", i.Path) + } + + continue + } + + { + metaPath := i.Path + ".meta" + metaBytes, err := ioutil.ReadFile(metaPath) + if err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("error reading file %q: %v", metaPath, err) + } + metaBytes = nil + } + if metaBytes != nil { + i.Meta = string(metaBytes) + } + } + + var handler Handler + if i.Context != "" { + handler = t.Contexts[i.Context] + } else { + extension := path.Ext(fileName) + handler = t.Extensions[extension] + if handler == nil { + handler = t.DefaultHandler + } + } + + err = handler(i) + if err != nil { + return fmt.Errorf("error handling file %q: %v", i.Path, err) + } + } + + return nil +} diff --git a/upup/pkg/fi/nodeup/README.md b/upup/pkg/fi/nodeup/README.md new file mode 100644 index 0000000000..d5dde1a777 --- /dev/null +++ b/upup/pkg/fi/nodeup/README.md @@ -0,0 +1,73 @@ +NodeUp Tasks +============ + +Within a model, we recognize a few well-known task names: + +* files +* packages +* services +* options + +When a directory is found with one of these well-known names, the items in the subtree build tasks of the corresponding +types. + +(TODO: Should we just prefer extensions everywhere?) + +Directories which start with an underscore are tags: we only descend into those directories if the relevant tag is present. + +All other directory names can be used for organization. + +Alongside each task file, a file with the same name with a .meta extension will be recognized as well. It contains +additional JSON options to parameterize the task. This is useful for files or templates, which otherwise have +no place to put metadata. + +files +===== + +The contents of the filesystem tree will be created, mirroring what exists under the files directory. + +Directories will be created as needed. Created directories will be set to mode 0755. + +Files will be created 0644 (change with meta 'fileMode') + +Owner & group will be root:root + +Two special extensions are recognized: + +* .asset will be sourced from assets. Assets are binaries that are made available to the installer, e.g. from a .tar.gz distributino +* .template is a go template + +packages +======== + +Any files found will be considered packages. + +The name of the file will be the package to be installed. + +services +======== + +Any files found will be considered services. + +The name of the file will be the service to be managed. + +By default, the service will be restarted and set to auto-start on boot. + + +## Order of operations + +Logically, all operations are collected before any are performed, according to the tags. + +Then operations are performed in the following order: + +options +packages +files +sysctls +services + +Ties are broken as follows + +* A task that required more tags is run after a task that required fewer tags +* Sorted by name +* Custom packages (install a deb) are run after OS provided packages diff --git a/upup/pkg/fi/nodeup/build_flags.go b/upup/pkg/fi/nodeup/build_flags.go new file mode 100644 index 0000000000..af04de1f42 --- /dev/null +++ b/upup/pkg/fi/nodeup/build_flags.go @@ -0,0 +1,58 @@ +package nodeup + +import ( + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "reflect" + "sort" + "strings" +) + +func buildFlags(options interface{}) (string, error) { + var flags []string + + walker := func(path string, field *reflect.StructField, val reflect.Value) error { + if field == nil { + glog.V(4).Infof("skipping non-field: %s", path) + return nil + } + tag := field.Tag.Get("flag") + if tag == "" { + glog.V(4).Infof("skipping field with no flag tag: %s", path) + return nil + } + flagName := tag + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + var flag string + switch v := val.Interface().(type) { + case string, int, bool, float32, float64: + vString := fmt.Sprintf("%v", v) + if vString != "" { + flag = fmt.Sprintf("--%s=%s", flagName, vString) + } + + default: + return fmt.Errorf("BuildFlags of value type not handled: %T %s=%v", v, path, v) + } + if flag != "" { + flags = append(flags, flag) + } + return nil + } + err := utils.WalkRecursive(reflect.ValueOf(options), walker) + if err != nil { + return "", err + } + // Sort so that the order is stable across runs + sort.Strings(flags) + + return strings.Join(flags, " "), nil +} diff --git a/upup/pkg/fi/nodeup/cloudinit/cloud_init_target.go b/upup/pkg/fi/nodeup/cloudinit/cloud_init_target.go new file mode 100644 index 0000000000..357102489f --- /dev/null +++ b/upup/pkg/fi/nodeup/cloudinit/cloud_init_target.go @@ -0,0 +1,171 @@ +package cloudinit + +import ( + "encoding/base64" + "fmt" + "github.com/golang/glog" + "io" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "os" + "path" +) + +type CloudInitTarget struct { + Config *CloudConfig + out io.Writer +} + +type AddBehaviour int + +const ( + Always AddBehaviour = iota + Once +) + +func NewCloudInitTarget(out io.Writer) *CloudInitTarget { + t := &CloudInitTarget{ + Config: &CloudConfig{}, + out: out, + } + return t +} + +var _ fi.Target = &CloudInitTarget{} + +type CloudConfig struct { + PackageUpdate bool `json:"package_update"` + + Packages []string `json:"packages,omitempty"` + RunCommmands [][]string `json:"runcmd,omitempty"` + WriteFiles []*CloudConfigFile `json:"write_files,omitempty"` +} + +type CloudConfigFile struct { + Encoding string `json:"encoding,omitempty"` + Owner string `json:"owner,omitempty"` + Path string `json:"path,omitempty"` + Permissions string `json:"permissions,omitempty"` + Content string `json:"content,omitempty"` +} + +func (t *CloudInitTarget) AddMkdirpCommand(p string, dirMode os.FileMode) { + t.AddCommand(Once, "mkdir", "-p", "-m", fi.FileModeToString(dirMode), p) + +} +func (t *CloudInitTarget) AddDownloadCommand(addBehaviour AddBehaviour, url string, dest string) { + // TODO: Create helper to download reliably and validate hash? + // ... but then why not just use cloudup :-) + t.AddCommand(addBehaviour, "curl", "-f", "--ipv4", "-Lo", dest, "--connect-timeout", "20", "--retry", "6", "--retry-delay", "10", url) +} + +func (t *CloudInitTarget) fetch(p *fi.Source, destPath string) { + // We could probably move this to fi.Source - it is likely to be the same for every provider + if p.URL != "" { + if p.Parent != nil { + glog.Fatalf("unexpected parent with SourceURL in FetchInstructions: %v", p) + } + t.AddDownloadCommand(Once, p.URL, destPath) + } else if p.ExtractFromArchive != "" { + if p.Parent == nil { + glog.Fatalf("unexpected ExtractFromArchive without parent in FetchInstructions: %v", p) + } + + // TODO: Remove duplicate commands? + archivePath := "/tmp/" + utils.SanitizeString(p.Parent.Key()) + t.fetch(p.Parent, archivePath) + + extractDir := "/tmp/extracted_" + utils.SanitizeString(p.Parent.Key()) + t.AddMkdirpCommand(extractDir, 0755) + t.AddCommand(Once, "tar", "zxf", archivePath, "-C", extractDir) + + // Always because this shouldn't happen and we want an indication that it happened + t.AddCommand(Always, "cp", path.Join(extractDir, p.ExtractFromArchive), destPath) + } else { + glog.Fatalf("unknown FetchInstructions: %v", p) + } +} + +func (t *CloudInitTarget) WriteFile(destPath string, contents fi.Resource, fileMode os.FileMode, dirMode os.FileMode) error { + var p *fi.Source + + if hs, ok := contents.(fi.HasSource); ok { + p = hs.GetSource() + } + + if p != nil { + t.AddMkdirpCommand(path.Dir(destPath), dirMode) + t.fetch(p, destPath) + } else { + // TODO: No way to specify parent dir permissions? + f := &CloudConfigFile{ + Encoding: "b64", + Owner: "root:root", + Permissions: fi.FileModeToString(fileMode), + Path: destPath, + } + + d, err := fi.ResourceAsBytes(contents) + if err != nil { + return err + } + + // Not a strict limit, just a sanity check + if len(d) > 256*1024 { + return fmt.Errorf("resource is very large (failed sanity-check): %v", contents) + } + + f.Content = base64.StdEncoding.EncodeToString(d) + + t.Config.WriteFiles = append(t.Config.WriteFiles, f) + } + return nil +} + +func stringSlicesEquals(l, r []string) bool { + if len(l) != len(r) { + return false + } + for i, v := range l { + if r[i] != v { + return false + } + } + return true +} + +func (t *CloudInitTarget) AddCommand(addBehaviour AddBehaviour, args ...string) { + switch addBehaviour { + case Always: + break + + case Once: + for _, c := range t.Config.RunCommmands { + if stringSlicesEquals(args, c) { + glog.V(2).Infof("skipping pre-existing command because AddBehaviour=Once: %q", args) + return + } + } + break + + default: + glog.Fatalf("unknown AddBehaviour: %v", addBehaviour) + } + + t.Config.RunCommmands = append(t.Config.RunCommmands, args) +} + +func (t *CloudInitTarget) Finish(taskMap map[string]fi.Task) error { + d, err := utils.YamlMarshal(t.Config) + if err != nil { + return fmt.Errorf("error serializing config to yaml: %v", err) + } + + conf := "#cloud-config\n" + string(d) + + _, err = t.out.Write([]byte(conf)) + if err != nil { + return fmt.Errorf("error writing cloud-init data to output: %v", err) + } + return nil +} diff --git a/upup/pkg/fi/nodeup/command.go b/upup/pkg/fi/nodeup/command.go new file mode 100644 index 0000000000..4e102e9cc3 --- /dev/null +++ b/upup/pkg/fi/nodeup/command.go @@ -0,0 +1,85 @@ +package nodeup + +import ( + "fmt" + "github.com/golang/glog" + "io" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/local" + "k8s.io/kube-deploy/upup/pkg/fi/utils" +) + +type NodeUpCommand struct { + Config *NodeConfig + ConfigLocation string + ModelDir string + AssetDir string + Target string +} + +func (c *NodeUpCommand) Run(out io.Writer) error { + if c.ConfigLocation != "" { + confData, err := utils.ReadLocation(c.ConfigLocation) + if err != nil { + return fmt.Errorf("error loading configuration %q: %v", c.ConfigLocation, err) + } + err = utils.YamlUnmarshal(confData, c.Config) + if err != nil { + return fmt.Errorf("error parsing configuration %q: %v", c.ConfigLocation, err) + } + } + + if c.AssetDir == "" { + return fmt.Errorf("AssetDir is required") + } + assets := fi.NewAssetStore(c.AssetDir) + for _, asset := range c.Config.Assets { + err := assets.Add(asset) + if err != nil { + return fmt.Errorf("error adding asset %q: %v", asset, err) + } + } + + loader := NewLoader(c.Config, assets) + + taskMap, err := loader.Build(c.ModelDir) + if err != nil { + glog.Exitf("error building: %v", err) + } + + var cloud fi.Cloud + var caStore fi.CAStore + var target fi.Target + checkExisting := true + + switch c.Target { + case "direct": + target = &local.LocalTarget{} + case "dryrun": + target = fi.NewDryRunTarget(out) + case "cloudinit": + checkExisting = false + target = cloudinit.NewCloudInitTarget(out) + default: + return fmt.Errorf("unsupported target type %q", c.Target) + } + + context, err := fi.NewContext(target, cloud, caStore, checkExisting) + if err != nil { + glog.Exitf("error building context: %v", err) + } + defer context.Close() + + err = context.RunTasks(taskMap) + if err != nil { + glog.Exitf("error running tasks: %v", err) + } + + err = target.Finish(taskMap) + if err != nil { + glog.Exitf("error closing target: %v", err) + } + + return nil +} diff --git a/upup/pkg/fi/nodeup/config.go b/upup/pkg/fi/nodeup/config.go new file mode 100644 index 0000000000..78b6bb4892 --- /dev/null +++ b/upup/pkg/fi/nodeup/config.go @@ -0,0 +1,135 @@ +package nodeup + +import "k8s.io/kube-deploy/upup/pkg/fi" + +// TODO: Can we replace some of all of this with pkg/apis/componentconfig/types.go ? +type NodeConfig struct { + Kubelet KubeletConfig + KubeProxy KubeProxyConfig + KubeControllerManager KubeControllerManagerConfig + KubeScheduler KubeSchedulerConfig + Docker DockerConfig + APIServer APIServerConfig + CACertificate *fi.Certificate + + DNS DNSConfig + + KubeUser string + KubePassword string + + Tokens map[string]string + + Tags []string + Assets []string +} + +// A helper so that templates can get tokens which are not valid identifiers +func (n *NodeConfig) GetToken(key string) string { + return n.Tokens[key] +} + +type DNSConfig struct { + Replicas int + Domain string + ServerIP string +} + +type KubeletConfig struct { + CloudProvider string `flag:"cloud-provider"` + + NonMasqueradeCdir string `flag:"non-masquerade-cidr"` + APIServers string `flag:"api-servers"` + + CgroupRoot string `flag:"cgroup-root"` + SystemCgroups string `flag:"system-cgroups"` + RuntimeCgroups string `flag:"runtime-cgroups"` + KubeletCgroups string `flag:"kubelet-cgroups"` + + HairpinMode string `flag:"hairpin-mode"` + + EnableDebuggingHandlers *bool `flag:"enable-debugging-handlers"` + Config string `flag:"config"` + AllowPrivileged *bool `flag:"allow-privileged"` + Verbosity *int `flag:"v"` + ClusterDNS string `flag:"cluster-dns"` + ClusterDomain string `flag:"cluster-domain"` + ConfigureCBR0 *bool `flag:"configure-cbr0"` + BabysitDaemons *bool `flag:"babysit-daemons"` + + RegisterSchedulable *bool `flag:"register-schedulable"` + ReconcileCIDR *bool `flag:"reconcile-cidr"` + PodCIDR string `flag:"pod-cidr"` + + Certificate *fi.Certificate + Key *fi.PrivateKey + // Allow override of CA Certificate + CACertificate *fi.Certificate +} + +type KubeProxyConfig struct { + Master string `flag:"master"` + // TODO: Name verbosity or LogLevel + LogLevel int `flag:"v"` + + // TODO: Better type ? + CPURequest string // e.g. "20m" + + Image string +} + +type DockerConfig struct { + Bridge string `flag:"bridge"` + LogLevel string `flag:"log-level"` + IPTables bool `flag:"iptables"` + IPMasq bool `flag:"ip-masq"` + Storage string `flag:"s"` +} + +type APIServerConfig struct { + CloudProvider string `flag:"cloud-provider"` + + SecurePort int `flag:"secure-port"` + Address string `flag:"address"` + EtcdServers string `flag:"etcd-servers"` + EtcdServersOverrides string `flag:"etcd-servers-overrides"` + // TODO: []string and join with commas? + AdmissionControl string `flag:"admission-control"` + ServiceClusterIPRange string `flag:"service-cluster-ip-range"` + ClientCAFile string `flag:"client-ca-file"` + BasicAuthFile string `flag:"basic-auth-file"` + TLSCertFile string `flag:"tls-cert-file"` + TLSPrivateKeyFile string `flag:"tls-private-key-file"` + TokenAuthFile string `flag:"token-auth-file"` + // TODO: Name verbosity or LogLevel + LogLevel int `flag:"v"` + AllowPrivileged *bool `flag:"allow-privileged"` + + PathSrvKubernetes string + PathSrvSshproxy string + Image string + + Certificate *fi.Certificate + Key *fi.PrivateKey +} + +type KubeControllerManagerConfig struct { + CloudProvider string `flag:"cloud-provider"` + + Master string `flag:"master"` + ClusterName string `flag:"cluster-name"` + ClusterCIDR string `flag:"cluster-cidr"` + AllocateNodeCIDRs *bool `flag:"allocate-node-cidrs"` + // TODO: Name verbosity or LogLevel + LogLevel int `flag:"v"` + + ServiceAccountPrivateKeyFile string `flag:"service-account-private-key-file"` + RootCAFile string `flag:"root-ca-file"` + + Image string + + PathSrvKubernetes string +} + +type KubeSchedulerConfig struct { + Image string +} diff --git a/upup/pkg/fi/nodeup/loader.go b/upup/pkg/fi/nodeup/loader.go new file mode 100644 index 0000000000..3125fb848a --- /dev/null +++ b/upup/pkg/fi/nodeup/loader.go @@ -0,0 +1,202 @@ +package nodeup + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/loader" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/nodetasks" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "strings" + "text/template" +) + +type Loader struct { + templates []*template.Template + optionsLoader *loader.OptionsLoader + config *NodeConfig + + assets *fi.AssetStore + tasks map[string]fi.Task +} + +func NewLoader(config *NodeConfig, assets *fi.AssetStore) *Loader { + l := &Loader{} + l.assets = assets + l.tasks = make(map[string]fi.Task) + l.optionsLoader = loader.NewOptionsLoader(config) + l.config = config + + return l +} + +func (l *Loader) executeTemplate(key string, d string) (string, error) { + t := template.New(key) + + funcMap := make(template.FuncMap) + funcMap["BuildFlags"] = buildFlags + funcMap["Base64Encode"] = func(s string) string { + return base64.StdEncoding.EncodeToString([]byte(s)) + } + t.Funcs(funcMap) + + context := l.config + + _, err := t.Parse(d) + if err != nil { + return "", fmt.Errorf("error parsing template %q: %v", key, err) + } + + t.Option("missingkey=zero") + + var buffer bytes.Buffer + err = t.ExecuteTemplate(&buffer, key, context) + if err != nil { + return "", fmt.Errorf("error executing template %q: %v", key, err) + } + + return buffer.String(), nil +} + +func ignoreHandler(i *loader.TreeWalkItem) error { + return nil +} + +func (l *Loader) Build(baseDir string) (map[string]fi.Task, error) { + tags := make(map[string]struct{}) + for _, tag := range l.config.Tags { + tags[tag] = struct{}{} + } + + // First pass: load options + tw := &loader.TreeWalker{ + DefaultHandler: ignoreHandler, + Contexts: map[string]loader.Handler{ + "options": l.optionsLoader.HandleOptions, + "packages": ignoreHandler, + "services": ignoreHandler, + "files": ignoreHandler, + }, + Tags: tags, + } + + err := tw.Walk(baseDir) + if err != nil { + return nil, err + } + + config, err := l.optionsLoader.Build() + if err != nil { + return nil, err + } + l.config = config.(*NodeConfig) + glog.Infof("options: %s", utils.JsonString(l.config)) + + // Second pass: load everything else + tw = &loader.TreeWalker{ + DefaultHandler: l.handleFile, + Contexts: map[string]loader.Handler{ + "options": ignoreHandler, + "packages": l.newTaskHandler("package/", nodetasks.NewPackage), + "services": l.newTaskHandler("service/", nodetasks.NewService), + "files": l.handleFile, + }, + Tags: tags, + } + + err = tw.Walk(baseDir) + if err != nil { + return nil, err + } + + // If there is a package task, we need an update packages task + for _, t := range l.tasks { + if _, ok := t.(*nodetasks.Package); ok { + l.tasks["UpdatePackages"] = &nodetasks.UpdatePackages{} + } + } + + return l.tasks, nil +} + +type TaskBuilder func(name string, contents string, meta string) (fi.Task, error) + +func (r *Loader) newTaskHandler(prefix string, builder TaskBuilder) loader.Handler { + return func(i *loader.TreeWalkItem) error { + contents, err := i.ReadString() + if err != nil { + return err + } + task, err := builder(i.Name, contents, i.Meta) + if err != nil { + return fmt.Errorf("error building %s for %q: %v", i.Name, i.Path, err) + } + key := prefix + i.RelativePath + + if task != nil { + r.tasks[key] = task + } + return nil + } +} + +func (r *Loader) handleFile(i *loader.TreeWalkItem) error { + var task fi.Task + var err error + if strings.HasSuffix(i.RelativePath, ".template") { + contents, err := i.ReadString() + if err != nil { + return err + } + + // TODO: Use template resource here to defer execution? + destPath := "/" + strings.TrimSuffix(i.RelativePath, ".template") + name := strings.TrimSuffix(i.Name, ".template") + expanded, err := r.executeTemplate(name, contents) + if err != nil { + return fmt.Errorf("error executing template %q: %v", i.RelativePath, err) + } + + task, err = nodetasks.NewFileTask(name, fi.NewStringResource(expanded), destPath, i.Meta) + } else if strings.HasSuffix(i.RelativePath, ".asset") { + contents, err := i.ReadBytes() + if err != nil { + return err + } + + destPath := "/" + strings.TrimSuffix(i.RelativePath, ".asset") + name := strings.TrimSuffix(i.Name, ".asset") + + def := &nodetasks.AssetDefinition{} + err = json.Unmarshal(contents, def) + if err != nil { + return fmt.Errorf("error parsing json for asset %q: %v", name, err) + } + + asset, err := r.assets.Find(name, def.AssetPath) + if err != nil { + return fmt.Errorf("error trying to locate asset %q: %v", name, err) + } + if asset == nil { + return fmt.Errorf("unable to locate asset %q", name) + } + + task, err = nodetasks.NewFileTask(i.Name, asset, destPath, i.Meta) + } else { + task, err = nodetasks.NewFileTask(i.Name, fi.NewFileResource(i.Path), "/"+i.RelativePath, i.Meta) + } + + if err != nil { + return fmt.Errorf("error building task %q: %v", i.RelativePath, err) + } + glog.V(2).Infof("path %q -> task %v", i.Path, task) + + if task != nil { + key := "file/" + i.RelativePath + r.tasks[key] = task + } + return nil +} diff --git a/upup/pkg/fi/nodeup/local/local_target.go b/upup/pkg/fi/nodeup/local/local_target.go new file mode 100644 index 0000000000..1eeccb2afa --- /dev/null +++ b/upup/pkg/fi/nodeup/local/local_target.go @@ -0,0 +1,12 @@ +package local + +import "k8s.io/kube-deploy/upup/pkg/fi" + +type LocalTarget struct { +} + +var _ fi.Target = &LocalTarget{} + +func (t *LocalTarget) Finish(taskMap map[string]fi.Task) error { + return nil +} diff --git a/upup/pkg/fi/nodeup/nodetasks/asset.go b/upup/pkg/fi/nodeup/nodetasks/asset.go new file mode 100644 index 0000000000..006f11659b --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/asset.go @@ -0,0 +1,6 @@ +package nodetasks + +type AssetDefinition struct { + AssetPath string `json:"assetPath"` + Mode string `json:"mode"` +} diff --git a/upup/pkg/fi/nodeup/nodetasks/file.go b/upup/pkg/fi/nodeup/nodetasks/file.go new file mode 100644 index 0000000000..062f1efcb3 --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/file.go @@ -0,0 +1,149 @@ +package nodetasks + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/local" + "os" + "os/exec" + "strings" +) + +type File struct { + Path string + Contents fi.Resource + + Mode *string `json:"mode"` + IfNotExists bool `json:"ifNotExists"` + + OnChangeExecute []string `json:"onChangeExecute,omitempty"` +} + +var _ fi.Task = &File{} + +func NewFileTask(name string, src fi.Resource, destPath string, meta string) (*File, error) { + f := &File{ + //Name: name, + Contents: src, + Path: destPath, + } + + if meta != "" { + err := json.Unmarshal([]byte(meta), f) + if err != nil { + return nil, fmt.Errorf("error parsing meta for file %q: %v", name, err) + } + } + + return f, nil +} + +func (f *File) String() string { + return fmt.Sprintf("File: %q", f.Path) +} + +func findFile(p string) (*File, error) { + stat, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + } + + actual := &File{} + actual.Path = p + actual.Mode = fi.String(fi.FileModeToString(stat.Mode())) + actual.Contents = fi.NewFileResource(p) + + return actual, nil +} + +func (e *File) Find(c *fi.Context) (*File, error) { + actual, err := findFile(e.Path) + if err != nil { + return nil, err + } + if actual == nil { + return nil, nil + } + + // To avoid spurious changes + actual.IfNotExists = e.IfNotExists + + return actual, nil +} + +func (e *File) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *File) CheckChanges(a, e, changes *File) error { + return nil +} + +func (_ *File) RenderLocal(t *local.LocalTarget, a, e, changes *File) error { + dirMode := os.FileMode(0755) + fileMode, err := fi.ParseFileMode(fi.StringValue(e.Mode), 0644) + if err != nil { + return fmt.Errorf("invalid file mode for %q: %q", e.Path, e.Mode) + } + + if a != nil { + if e.IfNotExists { + glog.V(2).Infof("file exists and IfNotExists set; skipping %q", e.Path) + return nil + } + } + + changed := false + if changes.Contents != nil { + err = fi.WriteFile(e.Path, e.Contents, fileMode, dirMode) + if err != nil { + return fmt.Errorf("error copying file %q: %v", e.Path, err) + } + changed = true + } else if changes.Mode != nil { + modeChanged, err := fi.EnsureFileMode(e.Path, fileMode) + if err != nil { + return fmt.Errorf("error changing file mode %q: %v", e.Path, err) + } + changed = changed || modeChanged + } + + if changed && e.OnChangeExecute != nil { + args := e.OnChangeExecute + human := strings.Join(args, " ") + + glog.Infof("Changed; will execute OnChangeExecute command: %q", human) + + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error executing command %q: %v\nOutput: %s", human, err, output) + } + } + + return nil +} + +func (_ *File) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *File) error { + dirMode := os.FileMode(0755) + fileMode, err := fi.ParseFileMode(fi.StringValue(e.Mode), 0644) + if err != nil { + return fmt.Errorf("invalid file mode for %q: %q", e.Path, e.Mode) + } + + err = t.WriteFile(e.Path, e.Contents, fileMode, dirMode) + if err != nil { + return err + } + + if e.OnChangeExecute != nil { + t.AddCommand(cloudinit.Always, e.OnChangeExecute...) + } + + return nil +} diff --git a/upup/pkg/fi/nodeup/nodetasks/package.go b/upup/pkg/fi/nodeup/nodetasks/package.go new file mode 100644 index 0000000000..7b214dd3f5 --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/package.go @@ -0,0 +1,170 @@ +package nodetasks + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/local" + "os" + "os/exec" + "path" + "strings" +) + +type Package struct { + Name string + + Version *string `json:"version"` + Source *string `json:"source"` + Hash *string `json:"hash"` + PreventStart *bool `json:"preventStart"` +} + +const ( + localPackageDir = "/var/cache/nodeup/packages/" +) + +func (p *Package) GetDependencies(tasks map[string]fi.Task) []string { + var deps []string + for k, v := range tasks { + if _, ok := v.(*UpdatePackages); ok { + deps = append(deps, k) + } + } + return deps +} + +func (p *Package) String() string { + return fmt.Sprintf("Package: %s", p.Name) +} + +func NewPackage(name string, contents string, meta string) (fi.Task, error) { + p := &Package{Name: name} + if contents != "" { + err := json.Unmarshal([]byte(contents), p) + if err != nil { + return nil, fmt.Errorf("error parsing json for package %q: %v", name, err) + } + } + return p, nil +} + +func (e *Package) Find(c *fi.Context) (*Package, error) { + args := []string{"dpkg-query", "-f", "${db:Status-Abbrev}${Version}\\n", "-W", e.Name} + glog.V(2).Infof("Listing installed packages: %q", args) + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + if strings.Contains(string(output), "no packages found") { + return nil, nil + } + return nil, fmt.Errorf("error listing installed packages: %v: %s", err, string(output)) + } + + installed := false + installedVersion := "" + for _, line := range strings.Split(string(output), "\n") { + if line == "" { + continue + } + + tokens := strings.Split(line, " ") + if len(tokens) != 2 { + return nil, fmt.Errorf("error parsing dpkg-query line %q", line) + } + state := tokens[0] + version := tokens[1] + + switch state { + case "ii": + installed = true + installedVersion = version + case "rc": + // removed + installed = false + case "un": + // unknown + installed = false + default: + return nil, fmt.Errorf("unknown package state %q in line %q", state, line) + } + } + + if !installed { + return nil, nil + } + + return &Package{ + Name: e.Name, + Version: fi.String(installedVersion), + }, nil +} + +func (e *Package) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *Package) CheckChanges(a, e, changes *Package) error { + return nil +} + +func (_ *Package) RenderLocal(t *local.LocalTarget, a, e, changes *Package) error { + if changes.Version != nil { + glog.Infof("Installing package %q", e.Name) + + if e.Source != nil { + // Install a deb + local := path.Join(localPackageDir, e.Name) + err := os.MkdirAll(localPackageDir, 0755) + if err != nil { + return fmt.Errorf("error creating directories %q: %v", path.Dir(local), err) + } + + _, err = fi.DownloadURL(fi.StringValue(e.Source), local, fi.StringValue(e.Hash)) + if err != nil { + return err + } + + args := []string{"dpkg", "-i", local} + glog.Infof("running command %s", args) + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error installing package %q: %v: %s", e.Name, err, string(output)) + } + } else { + args := []string{"apt-get", "install", "--yes", e.Name} + glog.Infof("running command %s", args) + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error installing package %q: %v: %s", e.Name, err, string(output)) + } + } + } + + return nil +} + +func (_ *Package) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *Package) error { + if e.Source != nil { + + localFile := path.Join(localPackageDir, e.Name) + t.AddMkdirpCommand(localPackageDir, 0755) + + url := *e.Source + t.AddDownloadCommand(cloudinit.Always, url, localFile) + + t.AddCommand(cloudinit.Always, "dpkg", "-i", localFile) + } else { + packageSpec := e.Name + if e.Version != nil { + packageSpec += " " + *e.Version + } + t.Config.Packages = append(t.Config.Packages, packageSpec) + } + + return nil +} diff --git a/upup/pkg/fi/nodeup/nodetasks/service.go b/upup/pkg/fi/nodeup/nodetasks/service.go new file mode 100644 index 0000000000..7b82754457 --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/service.go @@ -0,0 +1,285 @@ +package nodetasks + +import ( + "encoding/json" + "fmt" + "github.com/golang/glog" + "io/ioutil" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/local" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "os" + "os/exec" + "path" + "reflect" + "strings" + "time" +) + +const ( + systemdSystemPath = "/lib/systemd/system" // TODO: Different on redhat +) + +type Service struct { + Name string + Definition *string + Running *bool + + ManageState *bool `json:"manageState"` + SmartRestart *bool `json:"smartRestart"` +} + +func (p *Service) GetDependencies(tasks map[string]fi.Task) []string { + var deps []string + for k, v := range tasks { + // We assume that services depend on basically everything + typeName := utils.BuildTypeName(reflect.TypeOf(v)) + switch typeName { + case "*CopyAssetTask", "*File", "*Package", "*Sysctl", "*UpdatePackages": + deps = append(deps, k) + case "*Service": + // ignore + default: + glog.Fatalf("Unhandled type name: %q", typeName) + } + } + return deps +} + +func (s *Service) String() string { + return fmt.Sprintf("Service: %s", s.Name) +} + +func NewService(name string, contents string, meta string) (fi.Task, error) { + s := &Service{Name: name} + s.Definition = fi.String(contents) + + if meta != "" { + err := json.Unmarshal([]byte(meta), s) + if err != nil { + return nil, fmt.Errorf("error parsing json for service %q: %v", name, err) + } + } + + // Default some values to true: Running, SmartRestart, ManageState + if s.Running == nil { + s.Running = fi.Bool(true) + } + if s.SmartRestart == nil { + s.SmartRestart = fi.Bool(true) + } + if s.ManageState == nil { + s.ManageState = fi.Bool(true) + } + + return s, nil +} + +func getSystemdStatus(name string) (map[string]string, error) { + glog.V(2).Infof("querying state of service %q", name) + cmd := exec.Command("systemctl", "show", "--all", name) + output, err := cmd.CombinedOutput() + if err != nil { + return nil, fmt.Errorf("error doing systemd show %s: %v\nOutput: %s", name, err, output) + } + properties := make(map[string]string) + for _, line := range strings.Split(string(output), "\n") { + if line == "" { + continue + } + tokens := strings.SplitN(line, "=", 2) + if len(tokens) != 2 { + glog.Warningf("Ignoring line in systemd show output: %q", line) + continue + } + properties[tokens[0]] = tokens[1] + } + return properties, nil +} + +func (e *Service) Find(c *fi.Context) (*Service, error) { + servicePath := path.Join(systemdSystemPath, e.Name) + + d, err := ioutil.ReadFile(servicePath) + if err != nil { + if !os.IsNotExist(err) { + return nil, fmt.Errorf("Error reading systemd file %q: %v", servicePath, err) + } + + // Not found + return &Service{ + Name: e.Name, + Definition: nil, + Running: fi.Bool(false), + }, nil + } + + actual := &Service{ + Name: e.Name, + Definition: fi.String(string(d)), + + // Avoid spurious changes + ManageState: e.ManageState, + SmartRestart: e.SmartRestart, + } + + properties, err := getSystemdStatus(e.Name) + if err != nil { + return nil, err + } + activeState := properties["ActiveState"] + switch activeState { + case "active": + actual.Running = fi.Bool(true) + + case "failed", "inactive": + actual.Running = fi.Bool(false) + default: + glog.Warningf("Unknown ActiveState=%q; will treat as not running", activeState) + actual.Running = fi.Bool(false) + } + + return actual, nil +} + +// Parse the systemd unit file to extract obvious dependencies +func getSystemdDependencies(serviceName string, definition string) ([]string, error) { + var dependencies []string + for _, line := range strings.Split(definition, "\n") { + line = strings.TrimSpace(line) + tokens := strings.SplitN(line, "=", 2) + if len(tokens) != 2 { + continue + } + k := strings.TrimSpace(tokens[0]) + v := strings.TrimSpace(tokens[1]) + switch k { + case "EnvironmentFile": + dependencies = append(dependencies, v) + case "ExecStart": + // ExecStart=/usr/local/bin/kubelet "$DAEMON_ARGS" + // We extract the first argument (only) + tokens := strings.SplitN(v, " ", 2) + dependencies = append(dependencies, tokens[0]) + glog.V(2).Infof("extracted depdendency from %q: %q", line, tokens[0]) + } + } + return dependencies, nil +} + +func (e *Service) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *Service) CheckChanges(a, e, changes *Service) error { + return nil +} + +func (_ *Service) RenderLocal(t *local.LocalTarget, a, e, changes *Service) error { + serviceName := e.Name + + action := "" + + if changes.Running != nil && fi.BoolValue(e.ManageState) { + if fi.BoolValue(e.Running) { + action = "restart" + } else { + action = "stop" + } + } + + if changes.Definition != nil { + servicePath := path.Join(systemdSystemPath, serviceName) + err := fi.WriteFile(servicePath, fi.NewStringResource(*e.Definition), 0644, 0755) + if err != nil { + return fmt.Errorf("error writing systemd service file: %v", err) + } + + glog.Infof("Reloading systemd configuration") + cmd := exec.Command("systemctl", "daemon-reload") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error doing systemd daemon-reload: %v\nOutput: %s", err, output) + } + } + + // "SmartRestart" - look at the obvious dependencies in the systemd service, restart if start time older + if fi.BoolValue(e.ManageState) && fi.BoolValue(e.SmartRestart) { + definition := fi.StringValue(e.Definition) + if definition == "" && a != nil { + definition = fi.StringValue(a.Definition) + } + + if action == "" && fi.BoolValue(e.Running) && definition != "" { + dependencies, err := getSystemdDependencies(serviceName, definition) + if err != nil { + return err + } + + var newest time.Time + for _, dependency := range dependencies { + stat, err := os.Stat(dependency) + if err != nil { + glog.Infof("Ignoring error checking service dependency %q: %v", dependency, err) + continue + } + modTime := stat.ModTime() + if newest.IsZero() || newest.Before(modTime) { + newest = modTime + } + } + + if !newest.IsZero() { + properties, err := getSystemdStatus(e.Name) + if err != nil { + return err + } + + startedAt := properties["ExecMainStartTimestamp"] + if startedAt == "" { + glog.Warningf("service was running, but did not have ExecMainStartTimestamp: %q", serviceName) + } else { + startedAtTime, err := time.Parse("Mon 2006-01-02 15:04:05 MST", startedAt) + if err != nil { + return fmt.Errorf("unable to parse service ExecMainStartTimestamp: %q", startedAt) + } + if startedAtTime.Before(newest) { + glog.V(2).Infof("will restart service %q because dependency changed after service start", serviceName) + action = "restart" + } else { + glog.V(2).Infof("will not restart service %q - started after dependencies", serviceName) + } + } + } + } + } + + if action != "" && fi.BoolValue(e.ManageState) { + glog.Infof("Restarting service %q", serviceName) + cmd := exec.Command("systemctl", action, serviceName) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error doing systemd %s %s: %v\nOutput: %s", action, serviceName, err, output) + } + } + + return nil +} + +func (_ *Service) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *Service) error { + serviceName := e.Name + + servicePath := path.Join(systemdSystemPath, serviceName) + err := t.WriteFile(servicePath, fi.NewStringResource(*e.Definition), 0644, 0755) + if err != nil { + return err + } + + if fi.BoolValue(e.ManageState) { + t.AddCommand(cloudinit.Once, "systemctl", "daemon-reload") + t.AddCommand(cloudinit.Once, "systemctl", "start", "--no-block", serviceName) + } + + return nil +} diff --git a/upup/pkg/fi/nodeup/nodetasks/update_packages.go b/upup/pkg/fi/nodeup/nodetasks/update_packages.go new file mode 100644 index 0000000000..228bd17215 --- /dev/null +++ b/upup/pkg/fi/nodeup/nodetasks/update_packages.go @@ -0,0 +1,55 @@ +package nodetasks + +import ( + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/cloudinit" + "k8s.io/kube-deploy/upup/pkg/fi/nodeup/local" + "os" + "os/exec" +) + +type UpdatePackages struct { +} + +func (p *UpdatePackages) GetDependencies(tasks map[string]fi.Task) []string { + return []string{} +} + +func (p *UpdatePackages) String() string { + return fmt.Sprintf("UpdatePackages") +} + +func (e *UpdatePackages) Find(c *fi.Context) (*UpdatePackages, error) { + return nil, nil +} + +func (e *UpdatePackages) Run(c *fi.Context) error { + return fi.DefaultDeltaRunMethod(e, c) +} + +func (s *UpdatePackages) CheckChanges(a, e, changes *Service) error { + return nil +} + +func (_ *UpdatePackages) RenderLocal(t *local.LocalTarget, a, e, changes *UpdatePackages) error { + if os.Getenv("SKIP_PACKAGE_UPDATE") != "" { + glog.Infof("SKIP_PACKAGE_UPDATE was set; skipping package update") + return nil + } + args := []string{"apt-get", "update"} + glog.Infof("running command %s", args) + cmd := exec.Command(args[0], args[1:]...) + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("error update packages: %v: %s", err, string(output)) + } + + return nil +} + +func (_ *UpdatePackages) RenderCloudInit(t *cloudinit.CloudInitTarget, a, e, changes *UpdatePackages) error { + t.Config.PackageUpdate = true + return nil +} diff --git a/upup/pkg/fi/options.go b/upup/pkg/fi/options.go new file mode 100644 index 0000000000..f377d5831d --- /dev/null +++ b/upup/pkg/fi/options.go @@ -0,0 +1,117 @@ +package fi + +// +//import ( +// "fmt" +// "github.com/golang/glog" +// "sort" +// "strings" +//) +// +//type Options map[string]interface{} +// +//func NewOptions() Options { +// m := make(map[string]interface{}) +// return Options(m) +//} +// +//func (o Options) Merge(r Options) error { +// return merge(o, r) +//} +// +//// TODO: What do we do about this...? +//func (o Options) Token(key string) string { +// return "secret-" + key +//} +// +//func merge(l, r map[string]interface{}) error { +// for k, v := range r { +// if v == nil { +// delete(l, k) +// continue +// } +// +// switch v := v.(type) { +// case string, int, bool: +// l[k] = v +// +// case map[string]interface{}: +// existing, found := l[k] +// if !found { +// l[k] = v +// } else { +// switch existing := existing.(type) { +// case map[string]interface{}: +// err := merge(existing, v) +// if err != nil { +// return err +// } +// +// default: +// return fmt.Errorf("cannot merge object into target of type %T", v) +// +// } +// } +// +// default: +// return fmt.Errorf("merging of option type not handled: %T", v) +// } +// } +// return nil +//} +// +//func (o Options) BuildFlags(path string) string { +// if path != "" { +// options := o.Navigate(path) +// return options.BuildFlags("") +// } +// +// var flags []string +// for k, v := range o { +// var flag string +// switch v := v.(type) { +// case string, int, bool, float32, float64: +// flag = fmt.Sprintf("--%s=%v", k, v) +// +// default: +// // TODO: Better error handling (with templates) +// glog.Exitf("BuildFlags of value type not handled: %T %s=%v", v, k, v) +// return "" +// } +// if flag != "" { +// flags = append(flags, flag) +// } +// } +// sort.Strings(flags) +// +// return strings.Join(flags, " ") +//} +// +//func (o Options) Navigate(path string) Options { +// if path == "" { +// return o +// } +// +// tokens := strings.SplitN(path, ".", 2) +// +// child, found := o[tokens[0]] +// if !found { +// return NewOptions() +// } +// +// var childOptions Options +// switch child := child.(type) { +// +// case map[string]interface{}: +// childOptions = Options(child) +// +// default: +// glog.Warningf("Navigate of chjild type not handled: %T", child) +// childOptions = NewOptions() +// } +// +// if len(tokens) == 1 { +// return childOptions +// } +// return childOptions.Navigate(tokens[1]) +//} diff --git a/upup/pkg/fi/resources.go b/upup/pkg/fi/resources.go new file mode 100644 index 0000000000..554b2c43cf --- /dev/null +++ b/upup/pkg/fi/resources.go @@ -0,0 +1,195 @@ +package fi + +import ( + "bytes" + "encoding/hex" + "fmt" + "hash" + "io" + "os" +) + +type Resource interface { + Open() (io.ReadSeeker, error) +} + +type TemplateResource interface { + Resource + Curry(args []string) TemplateResource +} + +func HashForResource(r Resource, hashAlgorithm HashAlgorithm) (string, error) { + hasher := NewHasher(hashAlgorithm) + _, err := CopyResource(hasher, r) + if err != nil { + if os.IsNotExist(err) { + return "", err + } + return "", fmt.Errorf("error while hashing resource: %v", err) + } + return hex.EncodeToString(hasher.Sum(nil)), nil +} + +func HashesForResource(r Resource, hashAlgorithms []HashAlgorithm) (map[HashAlgorithm]string, error) { + hashers := make(map[HashAlgorithm]hash.Hash) + var writers []io.Writer + for _, hashAlgorithm := range hashAlgorithms { + if hashers[hashAlgorithm] != nil { + continue + } + hasher := NewHasher(hashAlgorithm) + hashers[hashAlgorithm] = hasher + writers = append(writers, hasher) + } + + w := io.MultiWriter(writers...) + + _, err := CopyResource(w, r) + if err != nil { + return nil, fmt.Errorf("error while hashing resource: %v", err) + } + + hashes := make(map[HashAlgorithm]string) + for k, hasher := range hashers { + hashes[k] = hex.EncodeToString(hasher.Sum(nil)) + } + + return hashes, nil +} + +func ResourcesMatch(a, b Resource) (bool, error) { + aReader, err := a.Open() + if err != nil { + return false, err + } + defer SafeClose(aReader) + + bReader, err := b.Open() + if err != nil { + return false, err + } + defer SafeClose(bReader) + + const size = 8192 + aData := make([]byte, size) + bData := make([]byte, size) + + for { + aN, aErr := io.ReadFull(aReader, aData) + if aErr != nil && aErr != io.EOF && aErr != io.ErrUnexpectedEOF { + return false, aErr + } + + bN, bErr := io.ReadFull(bReader, bData) + if bErr != nil && bErr != io.EOF && bErr != io.ErrUnexpectedEOF { + return false, bErr + } + + if aErr == nil && bErr == nil { + if aN != size || bN != size { + panic("violation of io.ReadFull contract") + } + if !bytes.Equal(aData, bData) { + return false, nil + } + continue + } + + if aN != bN { + return false, nil + } + + return bytes.Equal(aData[0:aN], bData[0:bN]), nil + } +} + +func CopyResource(dest io.Writer, r Resource) (int64, error) { + in, err := r.Open() + if err != nil { + if os.IsNotExist(err) { + return 0, err + } + return 0, fmt.Errorf("error opening resource: %v", err) + } + defer SafeClose(in) + + n, err := io.Copy(dest, in) + if err != nil { + return n, fmt.Errorf("error copying resource: %v", err) + } + return n, nil +} + +func ResourceAsString(r Resource) (string, error) { + buf := new(bytes.Buffer) + _, err := CopyResource(buf, r) + if err != nil { + return "", err + } + return buf.String(), nil +} + +func ResourceAsBytes(r Resource) ([]byte, error) { + buf := new(bytes.Buffer) + _, err := CopyResource(buf, r) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type StringResource struct { + s string +} + +var _ Resource = &StringResource{} + +func NewStringResource(s string) *StringResource { + return &StringResource{s: s} +} + +func (s *StringResource) Open() (io.ReadSeeker, error) { + r := bytes.NewReader([]byte(s.s)) + return r, nil +} + +func (s *StringResource) WriteTo(out io.Writer) error { + _, err := out.Write([]byte(s.s)) + return err +} + +type BytesResource struct { + data []byte +} + +var _ Resource = &BytesResource{} + +func NewBytesResource(data []byte) *BytesResource { + return &BytesResource{data: data} +} + +func (r *BytesResource) Open() (io.ReadSeeker, error) { + reader := bytes.NewReader([]byte(r.data)) + return reader, nil +} + +type FileResource struct { + Path string +} + +var _ Resource = &FileResource{} + +func NewFileResource(path string) *FileResource { + return &FileResource{Path: path} +} + +func (r *FileResource) Open() (io.ReadSeeker, error) { + in, err := os.Open(r.Path) + if err != nil { + if os.IsNotExist(err) { + return nil, err + } + return nil, fmt.Errorf("error opening file %q: %v", r.Path, err) + } + return in, err +} diff --git a/upup/pkg/fi/secrets.go b/upup/pkg/fi/secrets.go new file mode 100644 index 0000000000..13311f0f53 --- /dev/null +++ b/upup/pkg/fi/secrets.go @@ -0,0 +1,43 @@ +package fi + +import ( + crypto_rand "crypto/rand" + "encoding/base64" + "fmt" + "strings" +) + +type SecretStore interface { + Secret(id string) (*Secret, error) + FindSecret(id string) (*Secret, error) +} + +type Secret struct { + Data []byte +} + +func (s *Secret) AsString() (string, error) { + // Nicer behaviour because this is called from templates + if s == nil { + return "", fmt.Errorf("AsString called on nil Secret") + } + + return string(s.Data), nil +} + +func CreateSecret() (*Secret, error) { + data := make([]byte, 128) + _, err := crypto_rand.Read(data) + if err != nil { + return nil, fmt.Errorf("error reading crypto_rand: %v", err) + } + + s := base64.StdEncoding.EncodeToString(data) + r := strings.NewReplacer("+", "", "=", "", "/", "") + s = r.Replace(s) + s = s[:32] + + return &Secret{ + Data: []byte(s), + }, nil +} diff --git a/upup/pkg/fi/target.go b/upup/pkg/fi/target.go new file mode 100644 index 0000000000..872d7eb0e3 --- /dev/null +++ b/upup/pkg/fi/target.go @@ -0,0 +1,6 @@ +package fi + +type Target interface { + // Lifecycle methods, called by the driver + Finish(taskMap map[string]Task) error +} diff --git a/upup/pkg/fi/task.go b/upup/pkg/fi/task.go new file mode 100644 index 0000000000..01448cda7b --- /dev/null +++ b/upup/pkg/fi/task.go @@ -0,0 +1,5 @@ +package fi + +type Task interface { + Run(*Context) error +} diff --git a/upup/pkg/fi/topological_sort.go b/upup/pkg/fi/topological_sort.go new file mode 100644 index 0000000000..42570666e0 --- /dev/null +++ b/upup/pkg/fi/topological_sort.go @@ -0,0 +1,162 @@ +package fi + +import ( + "crypto/x509/pkix" + "fmt" + "github.com/golang/glog" + "k8s.io/kube-deploy/upup/pkg/fi/utils" + "reflect" + "sort" +) + +type HasDependencies interface { + GetDependencies(tasks map[string]Task) []string +} + +func TopologicalSort(tasks map[string]Task) [][]string { + taskToId := make(map[interface{}]string) + for k, t := range tasks { + taskToId[t] = k + } + + edges := make(map[string][]string) + + for k, t := range tasks { + task := t.(Task) + var dependencyKeys []string + + if hd, ok := task.(HasDependencies); ok { + dependencyKeys = hd.GetDependencies(tasks) + } else { + dependencyKeys = reflectForDependencies(task, taskToId) + } + + edges[k] = dependencyKeys + glog.Infof("%s : %v", k, dependencyKeys) + } + + ordered := toposort(edges) + glog.Infof("toposorted as %v", ordered) + + return ordered +} + +func reflectForDependencies(task Task, taskToId map[interface{}]string) []string { + v := reflect.ValueOf(task).Elem() + dependencies := getDependencies(v) + + var dependencyKeys []string + for _, dep := range dependencies { + dependencyKey, found := taskToId[dep] + if !found { + glog.Fatalf("dependency not found: %v", dep) + } + dependencyKeys = append(dependencyKeys, dependencyKey) + } + + return dependencyKeys +} + +// Perform a topological sort +// Note that we group them into stages, where each stage has no dependencies on other members of that stage +// This could support parallelism but also pushes nodes with fewer dependencies earlier +// +// This is not a particularly efficient implementation, but is simple, +// and likely good enough for the sizes we will be dealing with +func toposort(edges map[string][]string) [][]string { + var stages [][]string + + for { + if len(edges) == 0 { + break + } + + var stage []string + for k, in := range edges { + if len(in) != 0 { + continue + } + stage = append(stage, k) + } + + // For consistency + sort.Strings(stage) + + if len(stage) == 0 { + glog.Fatalf("graph is circular") + } + + stages = append(stages, stage) + + stageSet := make(map[string]bool) + for _, k := range stage { + delete(edges, k) + stageSet[k] = true + } + + for k, in := range edges { + var after []string + for _, v := range in { + if !stageSet[v] { + after = append(after, v) + } + } + edges[k] = after + } + } + + return stages +} + +func getDependencies(v reflect.Value) []Task { + var dependencies []Task + + err := utils.WalkRecursive(v, func(path string, f *reflect.StructField, v reflect.Value) error { + for v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + if v.IsNil() { + return nil + } + v = v.Elem() + } + + if utils.IsPrimitiveValue(v) { + return nil + } + + switch v.Kind() { + case reflect.String: + return nil + + case reflect.Slice: + case reflect.Map: + // The recursive walk will descend into the slice/map; we can ignore here + return nil + + case reflect.Interface, reflect.Struct: + // TODO: Can we / should we use a type-switch statement + intf := v.Addr().Interface() + if dep, ok := intf.(Task); ok { + dependencies = append(dependencies, dep) + } else if _, ok := intf.(Resource); ok { + // Ignore: not a dependency (?) + } else if _, ok := intf.(*pkix.Name); ok { + // Ignore: not a dependency + } else { + return fmt.Errorf("Unhandled type for %q: %T", path, v.Interface()) + } + break + + default: + glog.Infof("Unhandled kind for %q: %T", path, v.Interface()) + return fmt.Errorf("Unhandled kind for %q: %v", path, v.Kind()) + } + + return nil + }) + + if err != nil { + glog.Fatalf("unexpected error finding dependencies %v", err) + } + + return dependencies +} diff --git a/upup/pkg/fi/utils/json.go b/upup/pkg/fi/utils/json.go new file mode 100644 index 0000000000..e55ae7ad86 --- /dev/null +++ b/upup/pkg/fi/utils/json.go @@ -0,0 +1,14 @@ +package utils + +import ( + "encoding/json" + "fmt" +) + +func JsonString(v interface{}) string { + data, err := json.Marshal(v) + if err != nil { + return fmt.Sprintf("error marshalling: %v", err) + } + return string(data) +} diff --git a/upup/pkg/fi/utils/marshal.go b/upup/pkg/fi/utils/marshal.go new file mode 100644 index 0000000000..5eaba25c7c --- /dev/null +++ b/upup/pkg/fi/utils/marshal.go @@ -0,0 +1,220 @@ +package utils + +import ( + "fmt" + "github.com/golang/glog" + "reflect" + "strings" +) + +// This file is (probably) the most complex code we have +// It populates an object's fields, from the values stored in a map +// We typically get this map by unmarshalling yaml or json +// So: why not just use go's built in unmarshalling? +// The reason is that we want richer functionality for when we link objects to each other +// By doing our own marshalling, we can then link objects just by specifying a string identifier. +// Then, while we're here, we add nicer functionality like case-insensitivity and nicer handling for resources. + +// Unmarshaller implements our specialized marshalling from a map to an object +type Unmarshaller struct { + SpecialCases UnmarshallerSpecialCaseHandler +} + +// UnmarshallerSpecialCaseHandler is the function type that a handler for non-standard types must implement +type UnmarshallerSpecialCaseHandler func(name string, dest Settable, src interface{}, destTypeName string) (bool, error) + +// Settable is a workaround for the fact that map entries are not settable +type Settable struct { + Value reflect.Value + + MapValue reflect.Value + MapKey reflect.Value +} + +// Set sets the target value to the specified value +func (s *Settable) Set(v reflect.Value) { + if s.MapValue.IsValid() { + s.MapValue.SetMapIndex(s.MapKey, v) + } else { + s.Value.Set(v) + } +} + +func (s *Settable) Type() reflect.Type { + return s.Value.Type() +} + +func (r *Unmarshaller) UnmarshalSettable(name string, dest Settable, src interface{}) error { + if src == nil { + return nil + } + + // Divert special cases + switch dest.Type().Kind() { + case reflect.Map: + return r.unmarshalMap(name, dest, src) + } + + destTypeName := BuildTypeName(dest.Type()) + + switch destTypeName { + case "*string": + { + switch src := src.(type) { + case string: + v := src + dest.Set(reflect.ValueOf(&v)) + return nil + default: + return fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + } + + case "[]string": + { + switch src := src.(type) { + case string: + // We allow a single string to populate an array + v := []string{src} + dest.Set(reflect.ValueOf(v)) + return nil + case []interface{}: + v := []string{} + for _, i := range src { + v = append(v, fmt.Sprintf("%v", i)) + } + dest.Set(reflect.ValueOf(v)) + return nil + default: + return fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + } + + case "*int64": + { + switch src := src.(type) { + case int: + v := int64(src) + dest.Set(reflect.ValueOf(&v)) + return nil + case float64: + v := int64(src) + dest.Set(reflect.ValueOf(&v)) + return nil + default: + return fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + } + + case "*bool": + { + switch src := src.(type) { + case bool: + v := bool(src) + dest.Set(reflect.ValueOf(&v)) + return nil + default: + return fmt.Errorf("unhandled conversion for %q: %T -> %s", name, src, destTypeName) + } + } + + default: + if r.SpecialCases != nil { + handled, err := r.SpecialCases(name, dest, src, destTypeName) + if err != nil { + return err + } + if handled { + return nil + } + } + return fmt.Errorf("unhandled destination type for %q: %s", name, destTypeName) + } +} + +func (r *Unmarshaller) unmarshalMap(name string, dest Settable, src interface{}) error { + if src == nil { + return nil + } + + glog.Infof("populateMap on type %s", BuildTypeName(dest.Type())) + + destType := dest.Type() + + if destType.Kind() != reflect.Map { + glog.Errorf("expected map type, got %v", destType) + } + + if dest.Value.IsNil() { + m := reflect.MakeMap(dest.Type()) + dest.Set(m) + dest = Settable{Value: m} + } + + srcMap, ok := src.(map[string]interface{}) + if ok { + for k, v := range srcMap { + newValue := reflect.New(destType.Elem()).Elem() + + settable := Settable{ + Value: newValue, + MapValue: dest.Value, + MapKey: reflect.ValueOf(k), + } + settable.Set(newValue) + err := r.UnmarshalSettable(name+"."+k, settable, v) + if err != nil { + return err + } + } + return nil + } + + return fmt.Errorf("unexpected source type for map %q: %T", name, src) +} + +func (r *Unmarshaller) UnmarshalStruct(name string, dest reflect.Value, src interface{}) error { + m, ok := src.(map[string]interface{}) + if !ok { + return fmt.Errorf("unexpected type of source data for %q: %T", name, src) + } + + if dest.Kind() == reflect.Ptr && !dest.IsNil() { + dest = dest.Elem() + } + + if dest.Kind() != reflect.Struct { + return fmt.Errorf("UnmarshalStruct called on non-struct: %v", dest.Kind()) + } + + // TODO: Pre-calculate / cache? + destType := dest.Type() + fieldMap := map[string]reflect.StructField{} + for i := 0; i < destType.NumField(); i++ { + f := destType.Field(i) + fieldName := f.Name + fieldName = strings.ToLower(fieldName) + _, exists := fieldMap[fieldName] + if exists { + glog.Fatalf("ambiguous field name in %q: %q", destType.Name(), fieldName) + } + fieldMap[fieldName] = f + } + + //t := dest.Type() + for k, v := range m { + k = strings.ToLower(k) + fieldInfo, found := fieldMap[k] + if !found { + return fmt.Errorf("unknown field %q in %q", k, name) + } + field := dest.FieldByIndex(fieldInfo.Index) + + err := r.UnmarshalSettable(name+"."+k, Settable{Value: field}, v) + if err != nil { + return err + } + } + + return nil +} diff --git a/upup/pkg/fi/utils/reflect.go b/upup/pkg/fi/utils/reflect.go new file mode 100644 index 0000000000..3db5177127 --- /dev/null +++ b/upup/pkg/fi/utils/reflect.go @@ -0,0 +1,166 @@ +package utils + +import ( + "fmt" + "github.com/golang/glog" + "reflect" +) + +// InvokeMethod calls the specified method by reflection +func InvokeMethod(target interface{}, name string, args ...interface{}) ([]reflect.Value, error) { + v := reflect.ValueOf(target) + + method, found := v.Type().MethodByName(name) + if !found { + return nil, fmt.Errorf("method %q not found on %T", name, target) + } + + var argValues []reflect.Value + for _, a := range args { + argValues = append(argValues, reflect.ValueOf(a)) + } + glog.V(4).Infof("Calling method %s on %T", method.Name, target) + m := v.MethodByName(method.Name) + rv := m.Call(argValues) + return rv, nil +} + +func BuildTypeName(t reflect.Type) string { + switch t.Kind() { + case reflect.Ptr: + return "*" + BuildTypeName(t.Elem()) + case reflect.Slice: + return "[]" + BuildTypeName(t.Elem()) + case reflect.Struct, reflect.Interface: + return t.Name() + case reflect.String, reflect.Bool, reflect.Int64: + return t.Name() + case reflect.Map: + return "map[" + BuildTypeName(t.Key()) + "]" + BuildTypeName(t.Elem()) + default: + glog.Errorf("cannot find type name for: %v, assuming %s", t, t.Name()) + return t.Name() + } +} + +type visitorFunc func(path string, field *reflect.StructField, v reflect.Value) error + +func WalkRecursive(v reflect.Value, visitor visitorFunc) error { + return walkRecursive("", v, visitor) +} + +func walkRecursive(path string, v reflect.Value, visitor visitorFunc) error { + vType := v.Type() + + if v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface { + if !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Struct || e.Kind() == reflect.Map { + v = e + vType = v.Type() + } + } + } + + switch v.Kind() { + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + structField := vType.Field(i) + if structField.PkgPath != "" { + // Field not exported + continue + } + + f := v.Field(i) + + childPath := path + "." + structField.Name + err := visitor(childPath, &structField, f) + if err != nil { + return err + } + err = walkRecursive(childPath, f, visitor) + if err != nil { + return err + } + } + break + + case reflect.Map: + keys := v.MapKeys() + for _, key := range keys { + mv := v.MapIndex(key) + + childPath := path + "[" + fmt.Sprintf("%s", mv.Interface()) + "]" + err := visitor(childPath, nil, mv) + if err != nil { + return err + } + err = walkRecursive(childPath, mv, visitor) + if err != nil { + return err + } + } + break + + case reflect.Array, reflect.Slice: + len := v.Len() + for i := 0; i < len; i++ { + av := v.Index(i) + + childPath := path + "[" + fmt.Sprintf("%d", i) + "]" + err := visitor(childPath, nil, av) + if err != nil { + return err + } + err = walkRecursive(childPath, av, visitor) + if err != nil { + return err + } + } + break + + case reflect.Ptr, reflect.Interface: + err := visitor(path, nil, v) + if err != nil { + return err + } + if !v.IsNil() { + e := v.Elem() + err = walkRecursive(path, e, visitor) + if err != nil { + return err + } + } + break + + default: + err := visitor(path, nil, v) + if err != nil { + return err + } + } + + return nil +} + +// IsPrimitiveValue returns true if passed a value of primitive type: int, bool, etc +// Note that string (like []byte) is not treated as a primitive type +func IsPrimitiveValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128: + return true + + // The less-obvious cases! + case reflect.String, reflect.Slice, reflect.Array: + return false + + case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func, reflect.Map, reflect.Struct, reflect.UnsafePointer: + return false + + default: + glog.Fatalf("Unhandled kind: %v", v.Kind()) + return false + } +} diff --git a/upup/pkg/fi/utils/sanitize.go b/upup/pkg/fi/utils/sanitize.go new file mode 100644 index 0000000000..b01aa1392c --- /dev/null +++ b/upup/pkg/fi/utils/sanitize.go @@ -0,0 +1,19 @@ +package utils + +import ( + "bytes" + "strings" +) + +func SanitizeString(s string) string { + var out bytes.Buffer + allowed := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-" + for _, c := range s { + if strings.IndexRune(allowed, c) != -1 { + out.WriteRune(c) + } else { + out.WriteRune('_') + } + } + return string(out.Bytes()) +} diff --git a/upup/pkg/fi/utils/uris.go b/upup/pkg/fi/utils/uris.go new file mode 100644 index 0000000000..9424fa6104 --- /dev/null +++ b/upup/pkg/fi/utils/uris.go @@ -0,0 +1,69 @@ +package utils + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" +) + +func ReadLocation(location string) ([]byte, error) { + if !strings.Contains(location, "://") { + // Assume a simple file + v, err := ioutil.ReadFile(location) + if err != nil { + return nil, fmt.Errorf("error reading file %q: %v", location, err) + } + return v, nil + } + + u, err := url.Parse(location) + if err != nil { + return nil, fmt.Errorf("error parsing location %q - not a valid URI") + } + + var httpURL string + httpHeaders := make(map[string]string) + + switch u.Scheme { + case "metadata": + switch u.Host { + case "gce": + httpURL = "http://169.254.169.254/computeMetadata/v1/instance/attributes/" + u.Path + httpHeaders["Metadata-Flavor"] = "Google" + + case "aws": + httpURL = "http://169.254.169.254/latest/" + u.Path + + default: + return nil, fmt.Errorf("unknown metadata type: %q in %q", u.Host, location) + } + + case "http", "https": + httpURL = location + + default: + return nil, fmt.Errorf("unrecognized scheme for location %q") + } + + req, err := http.NewRequest("GET", httpURL, nil) + if err != nil { + return nil, err + } + for k, v := range httpHeaders { + req.Header.Add(k, v) + } + response, err := http.DefaultClient.Do(req) + if response != nil { + defer response.Body.Close() + } + if err != nil { + return nil, fmt.Errorf("error fetching %q: %v", httpURL, err) + } + body, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, fmt.Errorf("error reading response for %q: %v", httpURL, err) + } + return body, nil +} diff --git a/upup/pkg/fi/utils/yaml.go b/upup/pkg/fi/utils/yaml.go new file mode 100644 index 0000000000..538af13b3e --- /dev/null +++ b/upup/pkg/fi/utils/yaml.go @@ -0,0 +1,20 @@ +package utils + +import ( + //"gopkg.in/yaml.v2" + "github.com/ghodss/yaml" +) + +// See http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/ + +func YamlToJson(yamlBytes []byte) ([]byte, error) { + return yaml.YAMLToJSON(yamlBytes) +} + +func YamlUnmarshal(yamlBytes []byte, dest interface{}) error { + return yaml.Unmarshal(yamlBytes, dest) +} + +func YamlMarshal(o interface{}) ([]byte, error) { + return yaml.Marshal(o) +} diff --git a/upup/pkg/fi/values.go b/upup/pkg/fi/values.go new file mode 100644 index 0000000000..dd293f92be --- /dev/null +++ b/upup/pkg/fi/values.go @@ -0,0 +1,58 @@ +package fi + +import ( + "fmt" + "reflect" +) + +func StringValue(s *string) string { + if s == nil { + return "" + } + return *s +} + +func String(s string) *string { + return &s +} + +func Bool(v bool) *bool { + return &v +} + +func BoolValue(v *bool) bool { + if v == nil { + return false + } + return *v +} + +func Int(v int) *int { + return &v +} + +func Int64(v int64) *int64 { + return &v +} + +func DebugPrint(o interface{}) string { + if o == nil { + return "" + } + v := reflect.ValueOf(o) + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return "" + } + v = v.Elem() + } + if !v.IsValid() { + return "" + } + o = v.Interface() + stringer, ok := o.(fmt.Stringer) + if ok { + return stringer.String() + } + return fmt.Sprint(o) +}