Merge pull request #2401 from vmware/vsphere-upstream

Add vSphere provider support
This commit is contained in:
Justin Santa Barbara 2017-04-25 00:09:37 -04:00 committed by GitHub
commit 77fbf9cbf9
44 changed files with 2332 additions and 63 deletions

View File

@ -158,6 +158,24 @@ version-dist: nodeup-dist kops-dist protokube-export utils-dist
cp .build/dist/linux/amd64/utils.tar.gz .build/upload/kops/${VERSION}/linux/amd64/utils.tar.gz
cp .build/dist/linux/amd64/utils.tar.gz.sha1 .build/upload/kops/${VERSION}/linux/amd64/utils.tar.gz.sha1
vsphere-version-dist: nodeup-dist protokube-export
rm -rf .build/upload
mkdir -p .build/upload/kops/${VERSION}/linux/amd64/
mkdir -p .build/upload/kops/${VERSION}/darwin/amd64/
mkdir -p .build/upload/kops/${VERSION}/images/
mkdir -p .build/upload/utils/${VERSION}/linux/amd64/
cp .build/dist/nodeup .build/upload/kops/${VERSION}/linux/amd64/nodeup
cp .build/dist/nodeup.sha1 .build/upload/kops/${VERSION}/linux/amd64/nodeup.sha1
cp .build/dist/images/protokube.tar.gz .build/upload/kops/${VERSION}/images/protokube.tar.gz
cp .build/dist/images/protokube.tar.gz.sha1 .build/upload/kops/${VERSION}/images/protokube.tar.gz.sha1
scp -r .build/dist/nodeup* ${TARGET}:${TARGET_PATH}/nodeup
scp -r .build/dist/images/protokube.tar.gz* ${TARGET}:${TARGET_PATH}/protokube/
make kops-dist
cp .build/dist/linux/amd64/kops .build/upload/kops/${VERSION}/linux/amd64/kops
cp .build/dist/linux/amd64/kops.sha1 .build/upload/kops/${VERSION}/linux/amd64/kops.sha1
cp .build/dist/darwin/amd64/kops .build/upload/kops/${VERSION}/darwin/amd64/kops
cp .build/dist/darwin/amd64/kops.sha1 .build/upload/kops/${VERSION}/darwin/amd64/kops.sha1
upload: kops version-dist
aws s3 sync --acl public-read .build/upload/ ${S3_BUCKET}
@ -195,7 +213,6 @@ push-gce-run: push
push-aws-run: push
ssh -t ${TARGET} sudo SKIP_PACKAGE_UPDATE=1 /tmp/nodeup --conf=/var/cache/kubernetes-install/kube_env.yaml --v=8
protokube-gocode:
go install k8s.io/kops/protokube/cmd/protokube

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kops/pkg/apis/kops/validation"
"k8s.io/kops/pkg/client/simple/vfsclientset"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/utils"
@ -95,6 +96,16 @@ type CreateClusterOptions struct {
// Specify tenancy (default or dedicated) for masters and nodes
MasterTenancy string
NodeTenancy string
// vSphere options
VSphereServer string
VSphereDatacenter string
VSphereResourcePool string
VSphereCoreDNSServer string
// Note: We need open-vm-tools to be installed for vSphere Cloud Provider to work
// We need VSphereDatastore to support Kubernetes vSphere Cloud Provider (v1.5.3)
// We can remove this once we support higher versions.
VSphereDatastore string
}
func (o *CreateClusterOptions) InitDefaults() {
@ -148,7 +159,7 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringVar(&options.Target, "target", options.Target, "Target - direct, terraform, cloudformation")
cmd.Flags().StringVar(&options.Models, "model", options.Models, "Models to apply (separate multiple models with commas)")
cmd.Flags().StringVar(&options.Cloud, "cloud", options.Cloud, "Cloud provider to use - gce, aws")
cmd.Flags().StringVar(&options.Cloud, "cloud", options.Cloud, "Cloud provider to use - gce, aws, vsphere")
cmd.Flags().StringSliceVar(&options.Zones, "zones", options.Zones, "Zones in which to run the cluster")
cmd.Flags().StringSliceVar(&options.MasterZones, "master-zones", options.MasterZones, "Zones in which to run masters (must be an odd number)")
@ -204,6 +215,14 @@ func NewCmdCreateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringVar(&options.MasterTenancy, "master-tenancy", options.MasterTenancy, "The tenancy of the master group on AWS. Can either be default or dedicated.")
cmd.Flags().StringVar(&options.NodeTenancy, "node-tenancy", options.NodeTenancy, "The tenancy of the node group on AWS. Can be either default or dedicated.")
if featureflag.VSphereCloudProvider.Enabled() {
// vSphere flags
cmd.Flags().StringVar(&options.VSphereServer, "vsphere-server", options.VSphereServer, "vsphere-server is required for vSphere. Set vCenter URL Ex: 10.192.10.30 or myvcenter.io (without https://)")
cmd.Flags().StringVar(&options.VSphereDatacenter, "vsphere-datacenter", options.VSphereDatacenter, "vsphere-datacenter is required for vSphere. Set the name of the datacenter in which to deploy Kubernetes VMs.")
cmd.Flags().StringVar(&options.VSphereResourcePool, "vsphere-resource-pool", options.VSphereDatacenter, "vsphere-resource-pool is required for vSphere. Set a valid Cluster, Host or Resource Pool in which to deploy Kubernetes VMs.")
cmd.Flags().StringVar(&options.VSphereCoreDNSServer, "vsphere-coredns-server", options.VSphereCoreDNSServer, "vsphere-coredns-server is required for vSphere.")
cmd.Flags().StringVar(&options.VSphereDatastore, "vsphere-datastore", options.VSphereDatastore, "vsphere-datastore is required for vSphere. Set a valid datastore in which to store dynamic provision volumes.")
}
return cmd
}
@ -520,6 +539,41 @@ func RunCreateCluster(f *util.Factory, out io.Writer, c *CreateClusterOptions) e
if c.Cloud != "" {
cluster.Spec.CloudProvider = c.Cloud
if c.Cloud == "vsphere" {
if !featureflag.VSphereCloudProvider.Enabled() {
return fmt.Errorf("Feature flag VSphereCloudProvider is not set. Cloud vSphere will not be supported.")
}
if cluster.Spec.CloudConfig == nil {
cluster.Spec.CloudConfig = &api.CloudConfiguration{}
}
if c.VSphereServer == "" {
return fmt.Errorf("vsphere-server is required for vSphere. Set vCenter URL Ex: 10.192.10.30 or myvcenter.io (without https://)")
}
cluster.Spec.CloudConfig.VSphereServer = fi.String(c.VSphereServer)
if c.VSphereDatacenter == "" {
return fmt.Errorf("vsphere-datacenter is required for vSphere. Set the name of the datacenter in which to deploy Kubernetes VMs.")
}
cluster.Spec.CloudConfig.VSphereDatacenter = fi.String(c.VSphereDatacenter)
if c.VSphereResourcePool == "" {
return fmt.Errorf("vsphere-resource-pool is required for vSphere. Set a valid Cluster, Host or Resource Pool in which to deploy Kubernetes VMs.")
}
cluster.Spec.CloudConfig.VSphereResourcePool = fi.String(c.VSphereResourcePool)
if c.VSphereCoreDNSServer == "" {
return fmt.Errorf("A coredns server is required for vSphere.")
}
cluster.Spec.CloudConfig.VSphereCoreDNSServer = fi.String(c.VSphereCoreDNSServer)
if c.VSphereDatastore == "" {
return fmt.Errorf("vsphere-datastore is required for vSphere. Set a valid datastore in which to store dynamic provision volumes.")
}
cluster.Spec.CloudConfig.VSphereDatastore = fi.String(c.VSphereDatastore)
}
}
if c.Project != "" {

View File

@ -17,9 +17,12 @@ limitations under the License.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"os"
"strings"
"github.com/golang/glog"
"github.com/spf13/pflag"
@ -30,6 +33,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
_ "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/aws/route53"
k8scoredns "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
_ "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns"
)
@ -47,7 +51,7 @@ func main() {
glog.Flush()
dnsProviderId := "aws-route53"
flags.StringVar(&dnsProviderId, "dns", dnsProviderId, "DNS provider we should use (aws-route53, google-clouddns)")
flags.StringVar(&dnsProviderId, "dns", dnsProviderId, "DNS provider we should use (aws-route53, google-clouddns, coredns)")
var zones []string
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
@ -55,6 +59,9 @@ func main() {
watchIngress := true
flags.BoolVar(&watchIngress, "watch-ingress", watchIngress, "Configure hostnames found in ingress resources")
dnsServer := ""
flag.StringVar(&dnsServer, "dns-server", dnsServer, "DNS Server")
// Trick to avoid 'logging before flag.Parse' warning
flag.CommandLine.Parse([]string{})
@ -86,7 +93,15 @@ func main() {
// glog.Fatalf("error building extensions REST client: %v", err)
//}
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderId, nil)
var file io.Reader
if dnsProviderId == k8scoredns.ProviderName {
var lines []string
lines = append(lines, "etcd-endpoints = "+dnsServer)
lines = append(lines, "zones = "+zones[0])
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
file = bytes.NewReader([]byte(config))
}
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderId, file)
if err != nil {
glog.Errorf("Error initializing DNS provider %q: %v", dnsProviderId, err)
os.Exit(1)
@ -96,7 +111,7 @@ func main() {
os.Exit(1)
}
dnsController, err := dns.NewDNSController(dnsProvider, zoneRules)
dnsController, err := dns.NewDNSController(dnsProvider, zoneRules, dnsProviderId)
if err != nil {
glog.Errorf("Error building DNS controller: %v", err)
os.Exit(1)

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kops/dns-controller/pkg/util"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
k8scoredns "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
"k8s.io/kubernetes/federation/pkg/dnsprovider/rrstype"
)
@ -54,6 +55,9 @@ type DNSController struct {
// changeCount is a change-counter, which helps us avoid computation when nothing has changed
changeCount uint64
//DNS Provider ID, one of aws-route53, google-clouddns, and coredns
dnsProviderId string
}
// DNSController is a Context
@ -80,16 +84,17 @@ type DNSControllerScope struct {
var _ Scope = &DNSControllerScope{}
// NewDnsController creates a DnsController
func NewDNSController(dnsProvider dnsprovider.Interface, zoneRules *ZoneRules) (*DNSController, error) {
func NewDNSController(dnsProvider dnsprovider.Interface, zoneRules *ZoneRules, dnsProviderId string) (*DNSController, error) {
dnsCache, err := newDNSCache(dnsProvider)
if err != nil {
return nil, fmt.Errorf("error initializing DNS cache: %v", err)
}
c := &DNSController{
scopes: make(map[string]*DNSControllerScope),
zoneRules: zoneRules,
dnsCache: dnsCache,
scopes: make(map[string]*DNSControllerScope),
zoneRules: zoneRules,
dnsCache: dnsCache,
dnsProviderId: dnsProviderId,
}
return c, nil
@ -273,7 +278,7 @@ func (c *DNSController) runOnce() error {
dedup = append(dedup, s)
}
err := op.updateRecords(k, dedup, int64(ttl.Seconds()))
err := op.updateRecords(k, dedup, int64(ttl.Seconds()), c.dnsProviderId)
if err != nil {
glog.Infof("error updating records for %s: %v", k, err)
errors = append(errors, err)
@ -288,7 +293,7 @@ func (c *DNSController) runOnce() error {
newValues := newValueMap[k]
if newValues == nil {
err := op.deleteRecords(k)
err := op.deleteRecords(k, c.dnsProviderId)
if err != nil {
glog.Infof("error deleting records for %s: %v", k, err)
errors = append(errors, err)
@ -430,7 +435,7 @@ func (o *dnsOp) listRecords(zone dnsprovider.Zone) ([]dnsprovider.ResourceRecord
return rrs, nil
}
func (o *dnsOp) deleteRecords(k recordKey) error {
func (o *dnsOp) deleteRecords(k recordKey, dnsProviderId string) error {
glog.V(2).Infof("Deleting all records for %s", k)
fqdn := EnsureDotSuffix(k.FQDN)
@ -441,6 +446,32 @@ func (o *dnsOp) deleteRecords(k recordKey) error {
return fmt.Errorf("no suitable zone found for %q", fqdn)
}
// TODO: work-around before ResourceRecordSets.List() is implemented for CoreDNS
if dnsProviderId == k8scoredns.ProviderName {
rrsProvider, ok := zone.ResourceRecordSets()
if !ok {
return fmt.Errorf("zone does not support resource records %q", zone.Name())
}
dnsRecord, err := rrsProvider.Get(fqdn)
if err != nil {
return fmt.Errorf("Failed to get DNS record %s with error: %v", fqdn, err)
}
if dnsRecord != nil && string(dnsRecord.Type()) == string(k.RecordType) {
cs, err := o.getChangeset(zone)
if err != nil {
return err
}
glog.V(2).Infof("Deleting resource record %s %s", fqdn, k.RecordType)
cs.Remove(dnsRecord)
}
return nil
}
// when DNS provider is aws-route53 or google-clouddns
rrs, err := o.listRecords(zone)
if err != nil {
return fmt.Errorf("error querying resource records for zone %q: %v", zone.Name(), err)
@ -469,7 +500,7 @@ func (o *dnsOp) deleteRecords(k recordKey) error {
return nil
}
func (o *dnsOp) updateRecords(k recordKey, newRecords []string, ttl int64) error {
func (o *dnsOp) updateRecords(k recordKey, newRecords []string, ttl int64, dnsProviderId string) error {
fqdn := EnsureDotSuffix(k.FQDN)
zone := o.findZone(fqdn)
@ -483,29 +514,42 @@ func (o *dnsOp) updateRecords(k recordKey, newRecords []string, ttl int64) error
return fmt.Errorf("zone does not support resource records %q", zone.Name())
}
rrs, err := o.listRecords(zone)
if err != nil {
return fmt.Errorf("error querying resource records for zone %q: %v", zone.Name(), err)
}
var existing dnsprovider.ResourceRecordSet
for _, rr := range rrs {
rrName := EnsureDotSuffix(rr.Name())
if rrName != fqdn {
glog.V(8).Infof("Skipping record %q (name != %s)", rrName, fqdn)
continue
// TODO: work-around before ResourceRecordSets.List() is implemented for CoreDNS
if dnsProviderId == k8scoredns.ProviderName {
dnsRecord, err := rrsProvider.Get(fqdn)
if err != nil {
return fmt.Errorf("Failed to get DNS record %s with error: %v", fqdn, err)
}
if string(rr.Type()) != string(k.RecordType) {
glog.V(8).Infof("Skipping record %q (type %s != %s)", rrName, rr.Type(), k.RecordType)
continue
if dnsRecord != nil && string(dnsRecord.Type()) == string(k.RecordType) {
glog.V(8).Infof("Found matching record: %s %s", k.RecordType, fqdn)
existing = dnsRecord
}
} else {
// when DNS provider is aws-route53 or google-clouddns
rrs, err := o.listRecords(zone)
if err != nil {
return fmt.Errorf("error querying resource records for zone %q: %v", zone.Name(), err)
}
if existing != nil {
glog.Warningf("Found multiple matching records: %v and %v", existing, rr)
} else {
glog.V(8).Infof("Found matching record: %s %s", k.RecordType, rrName)
for _, rr := range rrs {
rrName := EnsureDotSuffix(rr.Name())
if rrName != fqdn {
glog.V(8).Infof("Skipping record %q (name != %s)", rrName, fqdn)
continue
}
if string(rr.Type()) != string(k.RecordType) {
glog.V(8).Infof("Skipping record %q (type %s != %s)", rrName, rr.Type(), k.RecordType)
continue
}
if existing != nil {
glog.Warningf("Found multiple matching records: %v and %v", existing, rr)
} else {
glog.V(8).Infof("Found matching record: %s %s", k.RecordType, rrName)
}
existing = rr
}
existing = rr
}
cs, err := o.getChangeset(zone)

View File

@ -0,0 +1,180 @@
# Development process and hacks for vSphere
This document contains few details, guidelines and tips about ongoing effort for vSphere support for kops.
## Contact
We are using [#sig-onprem channel](https://kubernetes.slack.com/messages/sig-onprem/) for discussing vSphere support for kops. Please feel free to join and talk to us.
## Process
Here is a [list of requirements and tasks](https://docs.google.com/document/d/10L7I98GuW7o7QuX_1QTouxC0t0aEO_68uHKNc7o4fXY/edit#heading=h.6wyer21z75n9 "Kops-vSphere specification") that we are working on. Once the basic infrastructure for vSphere is ready, we will move these tasks to issues.
## Setting up DNS
Since vSphere doesn't have built-in DNS service, we use CoreDNS to support the DNS requirement in vSphere provider. This requires the users to setup a CoreDNS server before creating a kubernetes cluster. Please follow the following instructions to setup.
For now we hardcoded DNS zone to skydns.local. So your cluster name should have suffix skydns.local, for example: "mycluster.skydns.local"
### Setup CoreDNS server
1. Login to vSphere Client.
2. Right-Click on ESX host on which you want to deploy the DNS server.
3. Select Deploy OVF template.
4. Copy and paste URL for [OVA](https://storage.googleapis.com/kops-vsphere/DNSStorage.ova) (uploaded 04/18/2017).
5. Follow next steps according to instructions mentioned in wizard.
6. Power on the imported VM.
7. SSH into the VM and execute ./start-dns.sh under /root. Username/Password: root/kubernetes
### Check DNS server is ready
On your local machine, execute the following command:
```bash
dig @[DNS server's IP] -p 53 NS skydns.local
```
Successful answer should look like the following:
```bash
; <<>> DiG 9.8.3-P1 <<>> @10.162.17.161 -p 53 NS skydns.local
; (1 server found)
;; global options: +cmd
;; Got answer:
;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 42011
;; flags: qr aa rd ra; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1
;; QUESTION SECTION:
;skydns.local. IN NS
;; ANSWER SECTION:
skydns.local. 160 IN NS ns1.ns.dns.skydns.local.
;; ADDITIONAL SECTION:
ns1.ns.dns.skydns.local. 160 IN A 192.168.0.1
;; Query time: 74 msec
;; SERVER: 10.162.17.161#53(10.162.17.161)
;; WHEN: Tue Mar 14 22:40:06 2017
;; MSG SIZE rcvd: 71
```
### Add DNS server information when create cluster
Add ```--dns=private --vsphere-coredns-server=http://[DNS server's IP]:2379``` into the ```kops create cluster``` command line.
### Use CoreDNS supported DNS Controller
Information about DNS Controller can be found [here](https://github.com/kubernetes/kops/blob/master/dns-controller/README.md).
Currently the DNS Controller is an add-on container and the image is from kope/dns-controller.
Before the vSphere support is officially merged into upstream, please use the following CoreDNS supported DNS controller.
```bash
export DNSCONTROLLER_IMAGE=cnastorage/dns-controller
```
(The above environment variable is already set in [kops_dir]/hack/vsphere/set_env)
## Setting up cluster state storage
Kops requires the state of clusters to be stored inside certain storage service. AWS S3 is the default option.
More about using AWS S3 for cluster state store can be found at "Cluster State storage" on this [page](https://github.com/kubernetes/kops/blob/master/docs/aws.md).
Users can also setup their own S3 server and use the following instructions to use user-defined S3-compatible applications for cluster state storage.
This is recommended if you don't have AWS account or you don't want to store the status of your clusters on public cloud storage.
Minio is a S3-compatible object storage application. We have included Minio components inside the same OVA template for CoreDNS service.
If you haven't setup CoreDNS according to section "Setup CoreDNS server" of this document, please follow the instructions in section "Setup CoreDNS server" Step 1 to Step 6.
Then SSH into the VM for CoreDNS/Minio service and execute:
```bash
/root/start-minio.sh [bucket_name]
```
Output of the script should look like:
```bash
Please set the following environment variables into hack/vsphere/set_env accordingly, before using kops create cluster:
KOPS_STATE_STORE=s3://[s3_bucket]
S3_ACCESS_KEY_ID=[s3_access_key]
S3_SECRET_ACCESS_KEY=[s3_secret_key]
S3_REGION=[s3_region]
```
Update [kops_dir]hack/vsphere/set_env according to the output of the script and the IP address/service port of the Minio server:
```bash
export KOPS_STATE_STORE=s3://[s3_bucket]
export S3_ACCESS_KEY_ID=[s3_access_key]
export S3_SECRET_ACCESS_KEY=[s3_secret_key]
export S3_REGION=[s3_region]
export S3_ENDPOINT=http://[s3_server_ip]:9000
```
Users can also choose their own S3-compatible storage applications by setting environment varibales similiarly.
## Kops with vSphere
vSphere cloud provider support in kops is a work in progress. To try out deploying kubernetes cluster on vSphere using kops, some extra steps are required.
### Pre-requisites
+ vSphere with at least one ESX, having sufficient free disk space on attached datastore. ESX VM's should have internet connectivity.
+ Setup DNS and S3 storage service following steps given in relevant Section above.
+ Upload VM template. Steps:
1. Login to vSphere Client.
2. Right-Click on ESX host on which you want to deploy the template.
3. Select Deploy OVF template.
4. Copy and paste URL for [OVA](https://storage.googleapis.com/kops-vsphere/kops_ubuntu_16_04.ova) (uploaded 04/18/2017).
5. Follow next steps according to instructions mentioned in wizard.
**NOTE: DO NOT POWER ON THE IMPORTED TEMPLATE VM.**
+ Update ```[kops_dir]/hack/vsphere/set_env``` setting up necessary environment variables.
+ ```source [kops_dir]/hack/vsphere/set_env```
### Installing
Currently vSphere support is not part of upstream kops releases. Please use the following instructions to use binaries/images with vSphere support.
#### Linux
Download kops binary from [here](https://storage.googleapis.com/kops-vsphere/kops-linux-amd64), then:
```bash
chmod +x kops-linux-amd64 # Add execution permissions
mv kops-linux-amd64 /usr/local/bin/kops # Move the kops to /usr/local/bin
```
#### Darwin
Download kops binary from [here](https://storage.googleapis.com/kops-vsphere/kops-darwin-amd64), then:
```bash
chmod +x kops-darwin-amd64 # Add execution permissions
mv kops-darwin-amd64 /usr/local/bin/kops # Move the kops to /usr/local/bin
```
### Building from source
Execute following command(s) to build all necessary components required to run kops for vSphere:
```bash
source [kops_dir]/hack/vsphere/set_env
make vsphere-version-dist
```
```make vsphere-version-dist``` will build and upload protokube image and nodeup binary at the target location specified by you in ```[kops_dir]/hack/vsphere/set_env```.
Please note that dns-controller has also been modified to support vSphere. You can continue to use ```export DNSCONTROLLER_IMAGE=cnastorage/dns-controller```. If you have made any local changes to dns-controller and would like to use your custom image you need to build the dns-controller image using ```DOCKER_REGISTRY=[your docker hub repo] make dns-controller-push``` and set ```DNSCONTROLLER_IMAGE``` accordingly. Please see the relevant Section above, on setting up DNS.
### Launching Cluster
Execute following command to launch cluster.
```bash
kops create cluster kubernetes.skydns.local --cloud=vsphere --zones=vmware-zone --dns-zone=skydns.local --networking=flannel
--vsphere-server=10.160.97.44 --vsphere-datacenter=VSAN-DC --vsphere-resource-pool=VSAN-Cluster --vsphere-datastore=vsanDatastore --dns private --vsphere-coredns-server=http://10.192.217.24:2379 --image="kops_ubuntu_16_04.ova"
```
If kops doesn't exist in default path, locate it inside .build/dist/linux/amd64/kops for linux machine or .build/dist/darwin/amd64/kops for mac under kops source directory.
**Notes**
1. ```clustername``` should end with **skydns.local**. Example: ```kubernetes.cluster.skydns.local```.
2. For ```zones``` any string will do, for now. It's only getting used for the construction of names of various entities. But it's a mandatory argument.
3. Make sure following parameters have these values,
* ```--dns-zone=skydns.local```
* ```--networking=flannel```
* ```--dns=private```
### Cleaning up environment
Run following command to cleanup all set environment variables and regenerate all images and binaries without any of the vSphere specific steps.
```bash
source [kops_dir]/hack/vsphere/cleanup_env
make version-dist
```
### Deleting cluster
Cluster deletion hasn't been fully implemented yet. So you will have to delete vSphere VM's manually for now.
Configuration and spec data can be removed from S3 using following command-
```bash
kops delete cluster yourcluster.skydns.local --yes
```

View File

@ -47,6 +47,7 @@ k8s.io/kops/pkg/model/components
k8s.io/kops/pkg/model/gcemodel
k8s.io/kops/pkg/model/iam
k8s.io/kops/pkg/model/resources
k8s.io/kops/pkg/model/vspheremodel
k8s.io/kops/pkg/resources
k8s.io/kops/pkg/systemd
k8s.io/kops/pkg/util/stringorslice
@ -67,6 +68,8 @@ k8s.io/kops/upup/pkg/fi/cloudup/dnstasks
k8s.io/kops/upup/pkg/fi/cloudup/gce
k8s.io/kops/upup/pkg/fi/cloudup/gcetasks
k8s.io/kops/upup/pkg/fi/cloudup/terraform
k8s.io/kops/upup/pkg/fi/cloudup/vsphere
k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks
k8s.io/kops/upup/pkg/fi/fitasks
k8s.io/kops/upup/pkg/fi/k8sapi
k8s.io/kops/upup/pkg/fi/loader

43
hack/vsphere/cleanup_env Executable file
View File

@ -0,0 +1,43 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export KOPS_FEATURE_FLAGS=
export DNSCONTROLLER_IMAGE=
export KOPS_STATE_STORE=
export S3_REGION=
export S3_ACCESS_KEY_ID=
export S3_SECRET_ACCESS_KEY=
export S3_ENDPOINT=
export VSPHERE_USERNAME=
export VSPHERE_PASSWORD=
export TARGET=
export TARGET_PATH=
export NODEUP_URL=
export PROTOKUBE_IMAGE=
echo "KOPS_FEATURE_FLAGS=${KOPS_FEATURE_FLAGS}"
echo "DNSCONTROLLER_IMAGE=${DNSCONTROLLER_IMAGE}"
echo "KOPS_STATE_STORE=${KOPS_STATE_STORE}"
echo "S3_REGION=${S3_REGION}"
echo "S3_ACCESS_KEY_ID=${S3_ACCESS_KEY_ID}"
echo "S3_SECRET_ACCESS_KEY=${S3_SECRET_ACCESS_KEY}"
echo "S3_ENDPOINT=${S3_ENDPOINT}"
echo "VSPHERE_USERNAME=${VSPHERE_USERNAME}"
echo "VSPHERE_PASSWORD=${VSPHERE_PASSWORD}"
echo "NODEUP_URL=${NODEUP_URL}"
echo "PROTOKUBE_IMAGE=${PROTOKUBE_IMAGE}"
echo "TARGET=${TARGET}"
echo "TARGET_PATH=${TARGET_PATH}"

62
hack/vsphere/set_env Executable file
View File

@ -0,0 +1,62 @@
#!/usr/bin/env bash
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Enabling vSphere cloud provider feature flag in kops.
export KOPS_FEATURE_FLAGS=+VSphereCloudProvider
# If set, this dns controller image will be used.
# Leave this value unmodified if you are not building a new dns-controller image.
export DNSCONTROLLER_IMAGE=cnastorage/dns-controller
# S3 bucket that kops should use.
export KOPS_STATE_STORE=s3://your-obj-store
# S3 state store credentials
export S3_REGION=us-west-2
export S3_ACCESS_KEY_ID=something
export S3_SECRET_ACCESS_KEY=something
export S3_ENDPOINT=http://endpoint_ip:port
# vSphere credentials
export VSPHERE_USERNAME=administrator@vsphere.local
export VSPHERE_PASSWORD=Admin!23
# Set TARGET and TARGET_PATH to values where you want nodeup and protokube binaries to get copied.
# Urls corresponding to this location are set for NODEUP_URL and PROTOKUBE_IMAGE.
# Leave TARGET and TARGET_PATH empty if you want to use the pre-build nodeup and protokube.
export TARGET=jdoe@pa-dbc1131.eng.vmware.com
export TARGET_PATH=/dbc/pa-dbc1131/jdoe/misc/kops/
# Set urls to access nodeup binary and protokube image tar in NODEUP_URL and PROTOKUBE_IMAGE, respectively.
# Leave NODEUP_URL and PROTOKUBE_IMAGE unchanged if you want to use the pre-build nodeup and protokube.
#export NODEUP_URL=http://pa-dbc1131.eng.vmware.com/jdoe/misc/kops/nodeup/nodeup
#export PROTOKUBE_IMAGE=http://pa-dbc1131.eng.vmware.com/jdoe/misc/kops/protokube/protokube.tar.gz
export NODEUP_URL=https://storage.googleapis.com/kops-vsphere/nodeup
export PROTOKUBE_IMAGE=https://storage.googleapis.com/kops-vsphere/protokube.tar.gz
echo "KOPS_FEATURE_FLAGS=${KOPS_FEATURE_FLAGS}"
echo "DNSCONTROLLER_IMAGE=${DNSCONTROLLER_IMAGE}"
echo "KOPS_STATE_STORE=${KOPS_STATE_STORE}"
echo "S3_REGION=${S3_REGION}"
echo "S3_ACCESS_KEY_ID=${S3_ACCESS_KEY_ID}"
echo "S3_SECRET_ACCESS_KEY=${S3_SECRET_ACCESS_KEY}"
echo "S3_ENDPOINT=${S3_ENDPOINT}"
echo "VSPHERE_USERNAME=${VSPHERE_USERNAME}"
echo "VSPHERE_PASSWORD=${VSPHERE_PASSWORD}"
echo "NODEUP_URL=${NODEUP_URL}"
echo "PROTOKUBE_IMAGE=${PROTOKUBE_IMAGE}"
echo "TARGET=${TARGET}"
echo "TARGET_PATH=${TARGET_PATH}"

View File

@ -17,15 +17,24 @@ limitations under the License.
package model
import (
"bufio"
"fmt"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/nodeup/nodetasks"
"os"
"strings"
)
const CloudConfigFilePath = "/etc/kubernetes/cloud.config"
// Required for vSphere CloudProvider
const MinimumVersionForVMUUID = "1.5.3"
// VM UUID is set by cloud-init
const VM_UUID_FILE_PATH = "/etc/vmware/vm_uuid"
// CloudConfigBuilder creates the cloud configuration file
type CloudConfigBuilder struct {
*NodeupModelContext
@ -59,9 +68,41 @@ func (b *CloudConfigBuilder) Build(c *fi.ModelBuilderContext) error {
if cloudConfig.DisableSecurityGroupIngress != nil {
lines = append(lines, fmt.Sprintf("DisableSecurityGroupIngress = %t", *cloudConfig.DisableSecurityGroupIngress))
}
case "vsphere":
vm_uuid, err := getVMUUID(b.Cluster.Spec.KubernetesVersion)
if err != nil {
return err
}
// Note: Segregate configuration for different sections as below
// Global Config for vSphere CloudProvider
if cloudConfig.VSphereUsername != nil {
lines = append(lines, "user = "+*cloudConfig.VSphereUsername)
}
if cloudConfig.VSpherePassword != nil {
lines = append(lines, "password = "+*cloudConfig.VSpherePassword)
}
if cloudConfig.VSphereServer != nil {
lines = append(lines, "server = "+*cloudConfig.VSphereServer)
lines = append(lines, "port = 443")
lines = append(lines, fmt.Sprintf("insecure-flag = %t", true))
}
if cloudConfig.VSphereDatacenter != nil {
lines = append(lines, "datacenter = "+*cloudConfig.VSphereDatacenter)
}
if cloudConfig.VSphereDatastore != nil {
lines = append(lines, "datastore = "+*cloudConfig.VSphereDatastore)
}
if vm_uuid != "" {
lines = append(lines, "vm-uuid = "+strings.Trim(vm_uuid, "\n"))
}
// Disk Config for vSphere CloudProvider
// We need this to support Kubernetes vSphere CloudProvider < v1.5.3
lines = append(lines, "[disk]")
lines = append(lines, "scsicontrollertype = pvscsi")
}
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
t := &nodetasks.File{
Path: CloudConfigFilePath,
Contents: fi.NewStringResource(config),
@ -71,3 +112,33 @@ func (b *CloudConfigBuilder) Build(c *fi.ModelBuilderContext) error {
return nil
}
// We need this for vSphere CloudProvider
// getVMUUID gets instance uuid of the VM from the file written by cloud-init
func getVMUUID(kubernetesVersion string) (string, error) {
actualKubernetesVersion, err := util.ParseKubernetesVersion(kubernetesVersion)
if err != nil {
return "", err
}
minimumVersionForUUID, err := util.ParseKubernetesVersion(MinimumVersionForVMUUID)
if err != nil {
return "", err
}
// VM UUID is required only for Kubernetes version greater than 1.5.3
if actualKubernetesVersion.GTE(*minimumVersionForUUID) {
file, err := os.Open(VM_UUID_FILE_PATH)
defer file.Close()
if err != nil {
return "", err
}
vm_uuid, err := bufio.NewReader(file).ReadString('\n')
if err != nil {
return "", err
}
return vm_uuid, err
}
return "", err
}

View File

@ -163,6 +163,10 @@ type ProtokubeFlags struct {
Cloud *string `json:"cloud,omitempty" flag:"cloud"`
ApplyTaints *bool `json:"applyTaints,omitempty" flag:"apply-taints"`
// ClusterId flag is required only for vSphere cloud type, to pass cluster id information to protokube. AWS and GCE workflows ignore this flag.
ClusterId *string `json:"cluster-id,omitempty" flag:"cluster-id"`
DNSServer *string `json:"dns-server,omitempty" flag:"dns-server"`
}
// ProtokubeFlags returns the flags object for protokube
@ -208,6 +212,10 @@ func (t *ProtokubeBuilder) ProtokubeFlags(k8sVersion semver.Version) *ProtokubeF
f.DNSProvider = fi.String("aws-route53")
case fi.CloudProviderGCE:
f.DNSProvider = fi.String("google-clouddns")
case fi.CloudProviderVSphere:
f.DNSProvider = fi.String("coredns")
f.ClusterId = fi.String(t.Cluster.ObjectMeta.Name)
f.DNSServer = fi.String(*t.Cluster.Spec.CloudConfig.VSphereCoreDNSServer)
default:
glog.Warningf("Unknown cloudprovider %q; won't set DNS provider")
}

View File

@ -691,4 +691,13 @@ type CloudConfiguration struct {
NodeInstancePrefix *string `json:"nodeInstancePrefix,omitempty"`
// AWS cloud-config options
DisableSecurityGroupIngress *bool `json:"disableSecurityGroupIngress,omitempty"`
// vSphere cloud-config specs
VSphereUsername *string `json:"vSphereUsername,omitempty"`
VSpherePassword *string `json:"vSpherePassword,omitempty"`
VSphereServer *string `json:"vSphereServer,omitempty"`
VSphereDatacenter *string `json:"vSphereDatacenter,omitempty"`
VSphereResourcePool *string `json:"vSphereResourcePool,omitempty"`
VSphereDatastore *string `json:"vSphereDatastore,omitempty"`
VSphereCoreDNSServer *string `json:"vSphereCoreDNSServer,omitempty"`
}

View File

@ -668,4 +668,13 @@ type CloudConfiguration struct {
Multizone *bool `json:"multizone,omitempty"`
NodeTags *string `json:"nodeTags,omitempty"`
NodeInstancePrefix *string `json:"nodeInstancePrefix,omitempty"`
// vSphere cloud-config specs
VSphereUsername *string `json:"vSphereUsername,omitempty"`
VSpherePassword *string `json:"vSpherePassword,omitempty"`
VSphereServer *string `json:"vSphereServer,omitempty"`
VSphereDatacenter *string `json:"vSphereDatacenter,omitempty"`
VSphereDatastore *string `json:"vSphereDatastore,omitempty"`
VSphereResourcePool *string `json:"vSphereResourcePool,omitempty"`
VSphereCoreDNSServer *string `json:"vSphereCoreDNSServer,omitempty"`
}

View File

@ -304,6 +304,13 @@ func autoConvert_v1alpha1_CloudConfiguration_To_kops_CloudConfiguration(in *Clou
out.Multizone = in.Multizone
out.NodeTags = in.NodeTags
out.NodeInstancePrefix = in.NodeInstancePrefix
out.VSphereUsername = in.VSphereUsername
out.VSpherePassword = in.VSpherePassword
out.VSphereServer = in.VSphereServer
out.VSphereDatacenter = in.VSphereDatacenter
out.VSphereDatastore = in.VSphereDatastore
out.VSphereResourcePool = in.VSphereResourcePool
out.VSphereCoreDNSServer = in.VSphereCoreDNSServer
return nil
}
@ -315,6 +322,13 @@ func autoConvert_kops_CloudConfiguration_To_v1alpha1_CloudConfiguration(in *kops
out.Multizone = in.Multizone
out.NodeTags = in.NodeTags
out.NodeInstancePrefix = in.NodeInstancePrefix
out.VSphereUsername = in.VSphereUsername
out.VSpherePassword = in.VSpherePassword
out.VSphereServer = in.VSphereServer
out.VSphereDatacenter = in.VSphereDatacenter
out.VSphereResourcePool = in.VSphereResourcePool
out.VSphereDatastore = in.VSphereDatastore
out.VSphereCoreDNSServer = in.VSphereCoreDNSServer
return nil
}

View File

@ -297,4 +297,13 @@ type CloudConfiguration struct {
Multizone *bool `json:"multizone,omitempty"`
NodeTags *string `json:"nodeTags,omitempty"`
NodeInstancePrefix *string `json:"nodeInstancePrefix,omitempty"`
// vSphere cloud-config specs
VSphereUsername *string `json:"vSphereUsername,omitempty"`
VSpherePassword *string `json:"vSpherePassword,omitempty"`
VSphereServer *string `json:"vSphereServer,omitempty"`
VSphereDatacenter *string `json:"vSphereDatacenter,omitempty"`
VSphereDatastore *string `json:"vSphereDatastore,omitempty"`
VSphereResourcePool *string `json:"vSphereResourcePool,omitempty"`
VSphereCoreDNSServer *string `json:"vSphereCoreDNSServer,omitempty"`
}

View File

@ -330,6 +330,13 @@ func autoConvert_v1alpha2_CloudConfiguration_To_kops_CloudConfiguration(in *Clou
out.Multizone = in.Multizone
out.NodeTags = in.NodeTags
out.NodeInstancePrefix = in.NodeInstancePrefix
out.VSphereUsername = in.VSphereUsername
out.VSpherePassword = in.VSpherePassword
out.VSphereServer = in.VSphereServer
out.VSphereDatacenter = in.VSphereDatacenter
out.VSphereDatastore = in.VSphereDatastore
out.VSphereResourcePool = in.VSphereResourcePool
out.VSphereCoreDNSServer = in.VSphereCoreDNSServer
return nil
}
@ -341,6 +348,13 @@ func autoConvert_kops_CloudConfiguration_To_v1alpha2_CloudConfiguration(in *kops
out.Multizone = in.Multizone
out.NodeTags = in.NodeTags
out.NodeInstancePrefix = in.NodeInstancePrefix
out.VSphereUsername = in.VSphereUsername
out.VSpherePassword = in.VSpherePassword
out.VSphereServer = in.VSphereServer
out.VSphereDatacenter = in.VSphereDatacenter
out.VSphereResourcePool = in.VSphereResourcePool
out.VSphereDatastore = in.VSphereDatastore
out.VSphereCoreDNSServer = in.VSphereCoreDNSServer
return nil
}

View File

@ -47,6 +47,8 @@ var VPCSkipEnableDNSSupport = New("VPCSkipEnableDNSSupport", Bool(false))
// SkipTerraformFormat if set will mean that we will not `tf fmt` the generated terraform.
var SkipTerraformFormat = New("SkipTerraformFormat", Bool(false))
var VSphereCloudProvider = New("VSphereCloudProvider", Bool(false))
var flags = make(map[string]*FeatureFlag)
var flagsMutex sync.Mutex

View File

@ -104,6 +104,9 @@ func (b *KubeControllerManagerOptionsBuilder) BuildOptions(o interface{}) error
kcm.CloudProvider = "gce"
kcm.ClusterName = gce.SafeClusterName(b.Context.ClusterName)
case fi.CloudProviderVSphere:
kcm.CloudProvider = "vsphere"
default:
return fmt.Errorf("unknown cloud provider %q", clusterSpec.CloudProvider)
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package components
import (
"github.com/golang/glog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/loader"
@ -131,6 +132,7 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
clusterSpec.Kubelet.CgroupRoot = "/"
glog.V(1).Infof("Cloud Provider: %s", cloudProvider)
if cloudProvider == fi.CloudProviderAWS {
clusterSpec.Kubelet.CloudProvider = "aws"
@ -156,6 +158,11 @@ func (b *KubeletOptionsBuilder) BuildOptions(o interface{}) error {
clusterSpec.CloudConfig.NodeTags = fi.String(GCETagForRole(b.Context.ClusterName, kops.InstanceGroupRoleNode))
}
if cloudProvider == fi.CloudProviderVSphere {
clusterSpec.Kubelet.CloudProvider = "vsphere"
clusterSpec.Kubelet.HairpinMode = "promiscuous-bridge"
}
usesKubenet, err := UsesKubenet(clusterSpec)
if err != nil {
return err

View File

@ -88,6 +88,8 @@ func (b *MasterVolumeBuilder) Build(c *fi.ModelBuilderContext) error {
b.addAWSVolume(c, name, volumeSize, subnet, etcd, m, allMembers)
case fi.CloudProviderGCE:
b.addGCEVolume(c, name, volumeSize, subnet, etcd, m, allMembers)
case fi.CloudProviderVSphere:
b.addVSphereVolume(c, name, volumeSize, subnet, etcd, m, allMembers)
default:
return fmt.Errorf("unknown cloudprovider %q", b.Cluster.Spec.CloudProvider)
}
@ -165,3 +167,7 @@ func (b *MasterVolumeBuilder) addGCEVolume(c *fi.ModelBuilderContext, name strin
c.AddTask(t)
}
func (b *MasterVolumeBuilder) addVSphereVolume(c *fi.ModelBuilderContext, name string, volumeSize int32, subnet *kops.ClusterSubnetSpec, etcd *kops.EtcdClusterSpec, m *kops.EtcdMemberSpec, allMembers []string) {
fmt.Print("addVSphereVolume to be implemented")
}

View File

@ -203,3 +203,7 @@ func (b *KopsModelContext) NamePrivateRouteTableInZone(zoneName string) string {
func (b *KopsModelContext) LinkToPrivateRouteTableInZone(zoneName string) *awstasks.RouteTable {
return &awstasks.RouteTable{Name: s(b.NamePrivateRouteTableInZone(zoneName))}
}
func (b *KopsModelContext) InstanceName(ig *kops.InstanceGroup, suffix string) string {
return b.AutoscalingGroupName(ig) + suffix
}

View File

@ -0,0 +1,77 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vspheremodel
// autoscalinggroup is a model for vSphere cloud. It's responsible for building tasks, necessary for kubernetes cluster deployment.
import (
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks"
"strconv"
)
// AutoscalingGroupModelBuilder configures AutoscalingGroup objects
type AutoscalingGroupModelBuilder struct {
*VSphereModelContext
BootstrapScript *model.BootstrapScript
}
var _ fi.ModelBuilder = &AutoscalingGroupModelBuilder{}
// Build creates tasks related to cluster deployment and adds them to ModelBuilderContext.
func (b *AutoscalingGroupModelBuilder) Build(c *fi.ModelBuilderContext) error {
// Note that we are creating a VM per instance group. Instance group represents a group of VMs.
// The following logic should considerably change once we add support for multiple master/worker nodes,
// cloud-init etc.
for _, ig := range b.InstanceGroups {
instanceCount := int(fi.Int32Value(ig.Spec.MinSize))
if ig.Spec.Role == kops.InstanceGroupRoleMaster {
instanceCount = 1
}
for i := 1; i <= instanceCount; i++ {
name := b.InstanceName(ig, strconv.Itoa(i))
createVmTask := &vspheretasks.VirtualMachine{
Name: &name,
VMTemplateName: fi.String(ig.Spec.Image),
}
c.AddTask(createVmTask)
attachISOTaskName := "AttachISO-" + name
attachISOTask := &vspheretasks.AttachISO{
Name: &attachISOTaskName,
VM: createVmTask,
IG: ig,
BootstrapScript: b.BootstrapScript,
EtcdClusters: b.Cluster.Spec.EtcdClusters,
}
c.AddTask(attachISOTask)
powerOnTaskName := "PowerON-" + name
powerOnTask := &vspheretasks.VMPowerOn{
Name: &powerOnTaskName,
AttachISO: attachISOTask,
}
c.AddTask(powerOnTask)
}
}
return nil
}

View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vspheremodel
import "k8s.io/kops/pkg/model"
type VSphereModelContext struct {
*model.KopsModelContext
}

View File

@ -55,6 +55,8 @@ func (c *AwsCluster) ListResources() (map[string]*ResourceTracker, error) {
return c.listResourcesAWS()
case fi.CloudProviderGCE:
return c.listResourcesGCE()
case fi.CloudProviderVSphere:
return c.listResourcesVSphere()
default:
return nil, fmt.Errorf("Delete on clusters on %q not (yet) supported", c.Cloud.ProviderID())
}

View File

@ -0,0 +1,133 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
"context"
"github.com/golang/glog"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
)
const (
typeVM = "VM"
)
type clusterDiscoveryVSphere struct {
cloud fi.Cloud
vsphereCloud *vsphere.VSphereCloud
clusterName string
}
type vsphereListFn func() ([]*ResourceTracker, error)
func (c *AwsCluster) listResourcesVSphere() (map[string]*ResourceTracker, error) {
vsphereCloud := c.Cloud.(*vsphere.VSphereCloud)
resources := make(map[string]*ResourceTracker)
d := &clusterDiscoveryVSphere{
cloud: c.Cloud,
vsphereCloud: vsphereCloud,
clusterName: c.ClusterName,
}
listFunctions := []vsphereListFn{
d.listVMs,
}
for _, fn := range listFunctions {
trackers, err := fn()
if err != nil {
return nil, err
}
for _, t := range trackers {
resources[GetResourceTrackerKey(t)] = t
}
}
return resources, nil
}
func (d *clusterDiscoveryVSphere) listVMs() ([]*ResourceTracker, error) {
c := d.vsphereCloud
regexForMasterVMs := "*" + "." + "masters" + "." + d.clusterName + "*"
regexForNodeVMs := "nodes" + "." + d.clusterName + "*"
vms, err := c.GetVirtualMachines([]string{regexForMasterVMs, regexForNodeVMs})
if err != nil {
if _, ok := err.(*find.NotFoundError); !ok {
return nil, err
}
glog.Warning(err)
}
var trackers []*ResourceTracker
for _, vm := range vms {
tracker := &ResourceTracker{
Name: vm.Name(),
ID: vm.Name(),
Type: typeVM,
deleter: deleteVM,
Dumper: DumpVMInfo,
obj: vm,
}
trackers = append(trackers, tracker)
}
return trackers, nil
}
func deleteVM(cloud fi.Cloud, r *ResourceTracker) error {
vsphereCloud := cloud.(*vsphere.VSphereCloud)
vm := r.obj.(*object.VirtualMachine)
task, err := vm.PowerOff(context.TODO())
if err != nil {
return err
}
task.Wait(context.TODO())
vsphereCloud.DeleteCloudInitISO(fi.String(vm.Name()))
task, err = vm.Destroy(context.TODO())
if err != nil {
return err
}
err = task.Wait(context.TODO())
if err != nil {
glog.Fatalf("Destroy VM failed: %q", err)
}
return nil
}
func DumpVMInfo(r *ResourceTracker) (interface{}, error) {
data := make(map[string]interface{})
data["id"] = r.ID
data["type"] = r.Type
data["raw"] = r.obj
return data, nil
}
func GetResourceTrackerKey(t *ResourceTracker) string {
return t.Type + ":" + t.ID
}

View File

@ -17,10 +17,12 @@ limitations under the License.
package main
import (
"bytes"
"flag"
"fmt"
"github.com/golang/glog"
"github.com/spf13/pflag"
"io"
"k8s.io/kops/dns-controller/pkg/dns"
"k8s.io/kops/protokube/pkg/protokube"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
@ -30,6 +32,7 @@ import (
// Load DNS plugins
_ "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/aws/route53"
k8scoredns "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
_ "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/google/clouddns"
)
@ -53,7 +56,7 @@ func main() {
func run() error {
dnsProviderId := "aws-route53"
flags.StringVar(&dnsProviderId, "dns", dnsProviderId, "DNS provider we should use (aws-route53, google-clouddns)")
flags.StringVar(&dnsProviderId, "dns", dnsProviderId, "DNS provider we should use (aws-route53, google-clouddns, coredns)")
var zones []string
flags.StringSliceVarP(&zones, "zone", "z", []string{}, "Configure permitted zones and their mappings")
@ -79,6 +82,9 @@ func run() error {
clusterID := ""
flag.StringVar(&clusterID, "cluster-id", clusterID, "Cluster ID")
dnsServer := ""
flag.StringVar(&dnsServer, "dns-server", dnsServer, "DNS Server")
flagChannels := ""
flag.StringVar(&flagChannels, "channels", flagChannels, "channels to install")
@ -126,6 +132,18 @@ func run() error {
if internalIP == nil {
internalIP = gceVolumes.InternalIP()
}
} else if cloud == "vsphere" {
glog.Info("Initializing vSphere volumes")
vsphereVolumes, err := protokube.NewVSphereVolumes()
if err != nil {
glog.Errorf("Error initializing vSphere: %q", err)
os.Exit(1)
}
volumes = vsphereVolumes
if internalIP == nil {
internalIP = vsphereVolumes.InternalIp()
}
} else {
glog.Errorf("Unknown cloud %q", cloud)
os.Exit(1)
@ -166,7 +184,16 @@ func run() error {
var dnsScope dns.Scope
var dnsController *dns.DNSController
{
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderId, nil)
var file io.Reader
if dnsProviderId == k8scoredns.ProviderName {
var lines []string
lines = append(lines, "etcd-endpoints = "+dnsServer)
lines = append(lines, "zones = "+zones[0])
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
file = bytes.NewReader([]byte(config))
}
dnsProvider, err := dnsprovider.GetDnsProvider(dnsProviderId, file)
if err != nil {
return fmt.Errorf("Error initializing DNS provider %q: %v", dnsProviderId, err)
}
@ -179,7 +206,7 @@ func run() error {
return fmt.Errorf("unexpected zone flags: %q", err)
}
dnsController, err = dns.NewDNSController(dnsProvider, zoneRules)
dnsController, err = dns.NewDNSController(dnsProvider, zoneRules, dnsProviderId)
if err != nil {
return err
}

View File

@ -0,0 +1,188 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package protokube
// vspehre_volume houses vSphere volume and implements relevant interfaces.
import (
"errors"
"fmt"
"github.com/golang/glog"
"io/ioutil"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"net"
"os/exec"
"runtime"
"strings"
)
const VolumeMetaDataFile = "/vol-metadata/metadata.json"
const VolStatusValue = "attached"
// VSphereVolumes represents vSphere volume and implements Volumes interface.
type VSphereVolumes struct{}
var _ Volumes = &VSphereVolumes{}
var machineIp net.IP
// NewVSphereVolumes returns instance of VSphereVolumes type.
func NewVSphereVolumes() (*VSphereVolumes, error) {
vsphereVolumes := &VSphereVolumes{}
return vsphereVolumes, nil
}
// FindVolumes returns Volume instances associated with this VSphereVolumes.
// EtcdClusterSpec is populated using vSphere volume metadata.
func (v *VSphereVolumes) FindVolumes() ([]*Volume, error) {
var volumes []*Volume
ip := v.InternalIp()
attachedTo := ""
if ip != nil {
attachedTo = ip.String()
}
etcdClusters, err := getVolMetadata()
if err != nil {
return nil, err
}
for _, etcd := range etcdClusters {
mountPoint := vsphere.GetMountPoint(etcd.VolumeId)
localDevice, err := getDevice(mountPoint)
if err != nil {
return nil, err
}
vol := &Volume{
ID: etcd.VolumeId,
LocalDevice: localDevice,
AttachedTo: attachedTo,
Mountpoint: mountPoint,
Status: VolStatusValue,
Info: VolumeInfo{
Description: etcd.EtcdClusterName,
},
}
etcdSpec := &EtcdClusterSpec{
ClusterKey: etcd.EtcdClusterName,
NodeName: etcd.EtcdNodeName,
}
var nodeNames []string
for _, member := range etcd.Members {
nodeNames = append(nodeNames, member.Name)
}
etcdSpec.NodeNames = nodeNames
vol.Info.EtcdClusters = []*EtcdClusterSpec{etcdSpec}
volumes = append(volumes, vol)
}
glog.V(4).Infof("Found volumes: %v", volumes)
return volumes, nil
}
func getDevice(mountPoint string) (string, error) {
if runtime.GOOS == "linux" {
cmd := "lsblk"
arg := "-l"
out, err := exec.Command(cmd, arg).Output()
if err != nil {
return "", err
}
if Containerized {
mountPoint = PathFor(mountPoint)
}
lines := strings.Split(string(out), "\n")
for _, line := range lines {
if strings.Contains(line, mountPoint) {
lsblkOutput := strings.Split(line, " ")
glog.V(4).Infof("Found device: %v ", lsblkOutput[0])
return "/dev/" + lsblkOutput[0], nil
}
}
} else {
return "", fmt.Errorf("Failed to find device. OS %v is not supported for vSphere.", runtime.GOOS)
}
return "", fmt.Errorf("No device has been mounted on mountPoint %v.", mountPoint)
}
func getVolMetadata() ([]vsphere.VolumeMetadata, error) {
rawData, err := ioutil.ReadFile(PathFor(VolumeMetaDataFile))
if err != nil {
return nil, err
}
return vsphere.UnmarshalVolumeMetadata(string(rawData))
}
// AttachVolume attaches given volume. In case of vSphere, volumes are statically mounted, so no operation is performed.
func (v *VSphereVolumes) AttachVolume(volume *Volume) error {
// Currently this is a no-op for vSphere. The virtual disks should already be mounted on this VM.
glog.Infof("All volumes should already be attached. No operation done.")
return nil
}
// InternalIp returns IP of machine associated with this volume.
func (v *VSphereVolumes) InternalIp() net.IP {
if machineIp == nil {
ip, err := getMachineIp()
if err != nil {
return ip
}
machineIp = ip
}
return machineIp
}
func getMachineIp() (net.IP, error) {
ifaces, err := net.Interfaces()
if err != nil {
return nil, err
}
for _, iface := range ifaces {
if iface.Flags&net.FlagUp == 0 {
continue // interface down
}
if iface.Flags&net.FlagLoopback != 0 {
continue // loopback interface
}
addrs, err := iface.Addrs()
if err != nil {
return nil, err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip == nil || ip.IsLoopback() {
continue
}
ip = ip.To4()
if ip == nil {
continue // not an ipv4 address
}
return ip, nil
}
}
return nil, errors.New("No IP found.")
}

View File

@ -28,7 +28,7 @@ spec:
hostNetwork: true
containers:
- name: dns-controller
image: kope/dns-controller:1.6.0
image: {{ DnsControllerImage }}:1.6.0
command:
{{ range $arg := DnsControllerArgv }}
- "{{ $arg }}"

View File

@ -0,0 +1,2 @@
KubeAPIServer:
CloudProvider: vsphere

View File

@ -22,6 +22,7 @@ type CloudProviderID string
const CloudProviderAWS CloudProviderID = "aws"
const CloudProviderGCE CloudProviderID = "gce"
const CloudProviderVSphere CloudProviderID = "vsphere"
type Cloud interface {
ProviderID() CloudProviderID

View File

@ -37,6 +37,7 @@ import (
"k8s.io/kops/pkg/model/awsmodel"
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/pkg/model/gcemodel"
"k8s.io/kops/pkg/model/vspheremodel"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awstasks"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
@ -44,6 +45,8 @@ import (
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/gcetasks"
"k8s.io/kops/upup/pkg/fi/cloudup/terraform"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"k8s.io/kops/upup/pkg/fi/cloudup/vspheretasks"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/util/pkg/hashing"
"k8s.io/kops/util/pkg/vfs"
@ -364,6 +367,17 @@ func (c *ApplyClusterCmd) Run() error {
l.TemplateFunctions["MachineTypeInfo"] = awsup.GetMachineTypeInfo
}
case fi.CloudProviderVSphere:
{
vsphereCloud := cloud.(*vsphere.VSphereCloud)
// TODO: map region with vCenter cluster, or datacenter, or datastore?
region = vsphereCloud.Cluster
l.AddTypes(map[string]interface{}{
"instance": &vspheretasks.VirtualMachine{},
})
}
default:
return fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)
}
@ -446,6 +460,9 @@ func (c *ApplyClusterCmd) Run() error {
&gcemodel.NetworkModelBuilder{GCEModelContext: gceModelContext},
//&model.SSHKeyModelBuilder{KopsModelContext: modelContext},
)
case fi.CloudProviderVSphere:
l.Builders = append(l.Builders,
&model.PKIModelBuilder{KopsModelContext: modelContext})
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
@ -570,6 +587,17 @@ func (c *ApplyClusterCmd) Run() error {
BootstrapScript: bootstrapScriptBuilder,
})
}
case fi.CloudProviderVSphere:
{
vsphereModelContext := &vspheremodel.VSphereModelContext{
KopsModelContext: modelContext,
}
l.Builders = append(l.Builders, &vspheremodel.AutoscalingGroupModelBuilder{
VSphereModelContext: vsphereModelContext,
BootstrapScript: bootstrapScriptBuilder,
})
}
default:
return fmt.Errorf("unknown cloudprovider %q", cluster.Spec.CloudProvider)
@ -616,6 +644,8 @@ func (c *ApplyClusterCmd) Run() error {
target = gce.NewGCEAPITarget(cloud.(*gce.GCECloud))
case "aws":
target = awsup.NewAWSAPITarget(cloud.(awsup.AWSCloud))
case "vsphere":
target = vsphere.NewVSphereAPITarget(cloud.(*vsphere.VSphereCloud))
default:
return fmt.Errorf("direct configuration not supported with CloudProvider:%q", cluster.Spec.CloudProvider)
}

View File

@ -150,18 +150,22 @@ func precreateDNS(cluster *api.Cluster, cloud fi.Cloud) error {
return fmt.Errorf("error getting DNS resource records for %q", zone.Name())
}
// TODO: We should change the filter to be a suffix match instead
//records, err := rrs.List("", "")
records, err := rrs.List()
if err != nil {
return fmt.Errorf("error listing DNS resource records for %q: %v", zone.Name(), err)
}
recordsMap := make(map[string]dnsprovider.ResourceRecordSet)
for _, record := range records {
name := dns.EnsureDotSuffix(record.Name())
key := string(record.Type()) + "::" + name
recordsMap[key] = record
// vSphere provider uses CoreDNS, which doesn't have rrs.List() function supported.
// Thus we use rrs.Get() to check every dnsHostname instead
if cloud.ProviderID() != fi.CloudProviderVSphere {
// TODO: We should change the filter to be a suffix match instead
//records, err := rrs.List("", "")
records, err := rrs.List()
if err != nil {
return fmt.Errorf("error listing DNS resource records for %q: %v", zone.Name(), err)
}
for _, record := range records {
name := dns.EnsureDotSuffix(record.Name())
key := string(record.Type()) + "::" + name
recordsMap[key] = record
}
}
changeset := rrs.StartChangeset()
@ -170,17 +174,39 @@ func precreateDNS(cluster *api.Cluster, cloud fi.Cloud) error {
for _, dnsHostname := range dnsHostnames {
dnsHostname = dns.EnsureDotSuffix(dnsHostname)
dnsRecord := recordsMap["A::"+dnsHostname]
found := false
if dnsRecord != nil {
rrdatas := dnsRecord.Rrdatas()
if len(rrdatas) > 0 {
glog.V(4).Infof("Found DNS record %s => %s; won't create", dnsHostname, rrdatas)
found = true
} else {
// This is probably an alias target; leave it alone...
glog.V(4).Infof("Found DNS record %s, but no records", dnsHostname)
found = true
if cloud.ProviderID() != fi.CloudProviderVSphere {
dnsRecord := recordsMap["A::"+dnsHostname]
if dnsRecord != nil {
rrdatas := dnsRecord.Rrdatas()
if len(rrdatas) > 0 {
glog.V(4).Infof("Found DNS record %s => %s; won't create", dnsHostname, rrdatas)
found = true
} else {
// This is probably an alias target; leave it alone...
glog.V(4).Infof("Found DNS record %s, but no records", dnsHostname)
found = true
}
}
} else {
dnsRecord, err := rrs.Get(dnsHostname)
if err != nil {
return fmt.Errorf("Failed to get DNS record %s with error: %v", dnsHostname, err)
}
if dnsRecord != nil {
if dnsRecord.Type() != "A" {
glog.V(4).Infof("Found DNS record %s with type %s, continue to create A type", dnsHostname, dnsRecord.Type())
} else {
rrdatas := dnsRecord.Rrdatas()
if len(rrdatas) > 0 {
glog.V(4).Infof("Found DNS record %s => %s; won't create", dnsHostname, rrdatas)
found = true
} else {
// This is probably an alias target; leave it alone...
glog.V(4).Infof("Found DNS record %s, but no records", dnsHostname)
found = true
}
}
}
}

View File

@ -31,14 +31,19 @@ import (
// Default Machine types for various types of instance group machine
const (
defaultNodeMachineTypeAWS = "t2.medium"
defaultNodeMachineTypeGCE = "n1-standard-2"
defaultNodeMachineTypeAWS = "t2.medium"
defaultNodeMachineTypeGCE = "n1-standard-2"
defaultNodeMachineTypeVSphere = "vsphere_node"
defaultBastionMachineTypeAWS = "t2.micro"
defaultBastionMachineTypeGCE = "f1-micro"
defaultBastionMachineTypeAWS = "t2.micro"
defaultBastionMachineTypeGCE = "f1-micro"
defaultBastionMachineTypeVSphere = "vsphere_bastion"
defaultMasterMachineTypeGCE = "n1-standard-1"
defaultMasterMachineTypeAWS = "m3.medium"
defaultMasterMachineTypeGCE = "n1-standard-1"
defaultMasterMachineTypeAWS = "m3.medium"
defaultMasterMachineTypeVSphere = "vsphere_master"
defaultVSphereNodeImage = "kops_ubuntu_16_04.ova"
)
var masterMachineTypeExceptions = map[string]string{
@ -153,6 +158,8 @@ func defaultNodeMachineType(cluster *api.Cluster) string {
return defaultNodeMachineTypeAWS
case fi.CloudProviderGCE:
return defaultNodeMachineTypeGCE
case fi.CloudProviderVSphere:
return defaultNodeMachineTypeVSphere
default:
glog.V(2).Infof("Cannot set default MachineType for CloudProvider=%q", cluster.Spec.CloudProvider)
return ""
@ -205,6 +212,8 @@ func defaultMasterMachineType(cluster *api.Cluster) string {
return defaultMasterMachineTypeAWS
case fi.CloudProviderGCE:
return defaultMasterMachineTypeGCE
case fi.CloudProviderVSphere:
return defaultMasterMachineTypeVSphere
default:
glog.V(2).Infof("Cannot set default MachineType for CloudProvider=%q", cluster.Spec.CloudProvider)
return ""
@ -218,6 +227,8 @@ func defaultBastionMachineType(cluster *api.Cluster) string {
return defaultBastionMachineTypeAWS
case fi.CloudProviderGCE:
return defaultBastionMachineTypeGCE
case fi.CloudProviderVSphere:
return defaultBastionMachineTypeVSphere
default:
glog.V(2).Infof("Cannot set default MachineType for CloudProvider=%q", cluster.Spec.CloudProvider)
return ""
@ -241,8 +252,9 @@ func defaultImage(cluster *api.Cluster, channel *api.Channel) string {
return image.Name
}
}
} else if fi.CloudProviderID(cluster.Spec.CloudProvider) == fi.CloudProviderVSphere {
return defaultVSphereNodeImage
}
glog.Infof("Cannot set default Image for CloudProvider=%q", cluster.Spec.CloudProvider)
return ""
}

View File

@ -66,6 +66,10 @@ func buildCloudupTags(cluster *api.Cluster) (sets.String, error) {
{
tags.Insert("_aws")
}
case "vsphere":
{
tags.Insert("_vsphere")
}
default:
return nil, fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kops/pkg/model/components"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"os"
"strings"
"text/template"
)
@ -95,6 +96,7 @@ func (tf *TemplateFunctions) AddTo(dest template.FuncMap) {
// TODO: Only for GCE?
dest["EncodeGCELabel"] = gce.EncodeGCELabel
dest["DnsControllerImage"] = tf.DnsControllerImage
}
// SharedVPC is a simple helper function which makes the templates for a shared VPC clearer
@ -135,6 +137,9 @@ func (tf *TemplateFunctions) DnsControllerArgv() ([]string, error) {
argv = append(argv, "--dns=aws-route53")
case fi.CloudProviderGCE:
argv = append(argv, "--dns=google-clouddns")
case fi.CloudProviderVSphere:
argv = append(argv, "--dns=coredns")
argv = append(argv, "--dns-server="+*tf.cluster.Spec.CloudConfig.VSphereCoreDNSServer)
default:
return nil, fmt.Errorf("unhandled cloudprovider %q", tf.cluster.Spec.CloudProvider)
@ -158,3 +163,16 @@ func (tf *TemplateFunctions) DnsControllerArgv() ([]string, error) {
return argv, nil
}
// To use user-defined DNS Controller:
// 1. DOCKER_REGISTRY=[your docker hub repo] make dns-controller-push
// 2. export DNSCONTROLLER_IMAGE=[your docker hub repo]
// 3. make kops and create/apply cluster
func (tf *TemplateFunctions) DnsControllerImage() (string, error) {
image := os.Getenv("DNSCONTROLLER_IMAGE")
if image == "" {
return "kope/dns-controller", nil
} else {
return image, nil
}
}

View File

@ -185,6 +185,10 @@ func (t *TerraformTarget) Finish(taskMap map[string]fi.Task) error {
providerAWS := make(map[string]interface{})
providerAWS["region"] = t.Region
providersByName["aws"] = providerAWS
} else if t.Cloud.ProviderID() == fi.CloudProviderVSphere {
providerVSphere := make(map[string]interface{})
providerVSphere["region"] = t.Region
providersByName["vsphere"] = providerVSphere
}
outputVariables := make(map[string]interface{})

View File

@ -23,6 +23,7 @@ import (
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
"k8s.io/kops/upup/pkg/fi/cloudup/gce"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
"strings"
)
@ -96,6 +97,14 @@ func BuildCloud(cluster *api.Cluster) (fi.Cloud, error) {
}
cloud = awsCloud
}
case "vsphere":
{
vsphereCloud, err := vsphere.NewVSphereCloud(&cluster.Spec)
if err != nil {
return nil, err
}
cloud = vsphereCloud
}
default:
return nil, fmt.Errorf("unknown CloudProvider %q", cluster.Spec.CloudProvider)

View File

@ -0,0 +1,45 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
// vsphere_target represents API execution target for vSphere.
import "k8s.io/kops/upup/pkg/fi"
// VSphereAPITarget represents target for vSphere, where cluster deployment with take place.
type VSphereAPITarget struct {
Cloud *VSphereCloud
}
var _ fi.Target = &VSphereAPITarget{}
// NewVSphereAPITarget returns VSphereAPITarget instance for vSphere cloud provider.
func NewVSphereAPITarget(cloud *VSphereCloud) *VSphereAPITarget {
return &VSphereAPITarget{
Cloud: cloud,
}
}
// Finish is no-op for vSphere cloud.
func (t *VSphereAPITarget) Finish(taskMap map[string]fi.Task) error {
return nil
}
// ProcessDeletions is no-op for vSphere cloud.
func (t *VSphereAPITarget) ProcessDeletions() bool {
return true
}

View File

@ -0,0 +1,424 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
// vsphere_cloud is the entry point to vSphere. All operations that need access to vSphere should be housed here.
import (
"bytes"
"context"
"fmt"
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/vmware/govmomi"
"github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kubernetes/federation/pkg/dnsprovider"
k8scoredns "k8s.io/kubernetes/federation/pkg/dnsprovider/providers/coredns"
"net/url"
"os"
"strings"
)
// VSphereCloud represents a vSphere cloud instance.
type VSphereCloud struct {
Server string
Datacenter string
Cluster string
Username string
Password string
Client *govmomi.Client
CoreDNSServer string
DNSZone string
}
const (
snapshotName string = "LinkCloneSnapshotPoint"
snapshotDesc string = "Snapshot created by kops"
cloudInitFile string = "cloud-init.iso"
)
var _ fi.Cloud = &VSphereCloud{}
// ProviderID returns ID for vSphere type cloud provider.
func (c *VSphereCloud) ProviderID() fi.CloudProviderID {
return fi.CloudProviderVSphere
}
// NewVSphereCloud returns VSphereCloud instance for given ClusterSpec.
func NewVSphereCloud(spec *kops.ClusterSpec) (*VSphereCloud, error) {
server := *spec.CloudConfig.VSphereServer
datacenter := *spec.CloudConfig.VSphereDatacenter
cluster := *spec.CloudConfig.VSphereResourcePool
glog.V(2).Infof("Creating vSphere Cloud with server(%s), datacenter(%s), cluster(%s)", server, datacenter, cluster)
dns_server := *spec.CloudConfig.VSphereCoreDNSServer
dns_zone := spec.DNSZone
username := os.Getenv("VSPHERE_USERNAME")
password := os.Getenv("VSPHERE_PASSWORD")
if username == "" || password == "" {
return nil, fmt.Errorf("Failed to detect vSphere username and password. Please set env variables: VSPHERE_USERNAME and VSPHERE_PASSWORD accordingly.")
}
u, err := url.Parse(fmt.Sprintf("https://%s/sdk", server))
if err != nil {
return nil, err
}
glog.V(2).Infof("Creating vSphere Cloud URL is %s", u)
// set username and password in URL
u.User = url.UserPassword(username, password)
c, err := govmomi.NewClient(context.TODO(), u, true)
if err != nil {
return nil, err
}
// Add retry functionality
c.RoundTripper = vim25.Retry(c.RoundTripper, vim25.TemporaryNetworkError(5))
vsphereCloud := &VSphereCloud{Server: server, Datacenter: datacenter, Cluster: cluster, Username: username, Password: password, Client: c, CoreDNSServer: dns_server, DNSZone: dns_zone}
spec.CloudConfig.VSphereUsername = fi.String(username)
spec.CloudConfig.VSpherePassword = fi.String(password)
glog.V(2).Infof("Created vSphere Cloud successfully: %+v", vsphereCloud)
return vsphereCloud, nil
}
// DNS returns dnsprovider interface for this vSphere cloud.
func (c *VSphereCloud) DNS() (dnsprovider.Interface, error) {
var provider dnsprovider.Interface
var err error
var lines []string
lines = append(lines, "etcd-endpoints = "+c.CoreDNSServer)
lines = append(lines, "zones = "+c.DNSZone)
config := "[global]\n" + strings.Join(lines, "\n") + "\n"
file := bytes.NewReader([]byte(config))
provider, err = dnsprovider.GetDnsProvider(k8scoredns.ProviderName, file)
if err != nil {
return nil, fmt.Errorf("Error building (k8s) DNS provider: %v", err)
}
return provider, nil
}
// FindVPCInfo doesn't perform any operation for now. No VPC is present for vSphere.
func (c *VSphereCloud) FindVPCInfo(id string) (*fi.VPCInfo, error) {
glog.Warning("FindVPCInfo not (yet) implemented on VSphere")
return nil, nil
}
// CreateLinkClonedVm creates linked clone of given VM image. This method will perform all necessary steps, like creating snapshot if it's not already present.
func (c *VSphereCloud) CreateLinkClonedVm(vmName, vmImage *string) (string, error) {
f := find.NewFinder(c.Client.Client, true)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, err := f.Datacenter(ctx, c.Datacenter)
if err != nil {
return "", err
}
f.SetDatacenter(dc)
templateVm, err := f.VirtualMachine(ctx, *vmImage)
if err != nil {
return "", err
}
glog.V(2).Infof("Template VM ref is %+v", templateVm)
datacenterFolders, err := dc.Folders(ctx)
if err != nil {
return "", err
}
// Create snapshot of the template VM if not already snapshotted.
snapshot, err := createSnapshot(ctx, templateVm, snapshotName, snapshotDesc)
if err != nil {
return "", err
}
clsComputeRes, err := f.ClusterComputeResource(ctx, c.Cluster)
glog.V(4).Infof("Cluster compute resource is %+v", clsComputeRes)
if err != nil {
return "", err
}
resPool, err := clsComputeRes.ResourcePool(ctx)
glog.V(4).Infof("Cluster resource pool is %+v", resPool)
if err != nil {
return "", err
}
if resPool == nil {
return "", errors.New(fmt.Sprintf("No resource pool found for cluster %s", c.Cluster))
}
resPoolRef := resPool.Reference()
snapshotRef := snapshot.Reference()
cloneSpec := &types.VirtualMachineCloneSpec{
Config: &types.VirtualMachineConfigSpec{
Flags: &types.VirtualMachineFlagInfo{
DiskUuidEnabled: fi.Bool(true),
},
},
Location: types.VirtualMachineRelocateSpec{
Pool: &resPoolRef,
DiskMoveType: "createNewChildDiskBacking",
},
Snapshot: &snapshotRef,
}
// Create a link cloned VM from the template VM's snapshot
clonedVmTask, err := templateVm.Clone(ctx, datacenterFolders.VmFolder, *vmName, *cloneSpec)
if err != nil {
return "", err
}
clonedVmTaskInfo, err := clonedVmTask.WaitForResult(ctx, nil)
if err != nil {
return "", err
}
clonedVm := clonedVmTaskInfo.Result.(object.Reference)
glog.V(2).Infof("Created VM %s successfully", clonedVm)
return clonedVm.Reference().Value, nil
}
// PowerOn powers on given VM.
func (c *VSphereCloud) PowerOn(vm string) error {
f := find.NewFinder(c.Client.Client, true)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, err := f.Datacenter(ctx, c.Datacenter)
if err != nil {
return err
}
f.SetDatacenter(dc)
vmRef, err := f.VirtualMachine(ctx, vm)
if err != nil {
return err
}
task, err := vmRef.PowerOn(ctx)
if err != nil {
return err
}
task.Wait(ctx)
return nil
}
// UploadAndAttachISO uploads the ISO to datastore and attaches it to the given VM.
func (c *VSphereCloud) UploadAndAttachISO(vm *string, isoFile string) error {
f := find.NewFinder(c.Client.Client, true)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, err := f.Datacenter(ctx, c.Datacenter)
if err != nil {
return err
}
f.SetDatacenter(dc)
vmRef, err := f.VirtualMachine(ctx, *vm)
if err != nil {
return err
}
var refs []types.ManagedObjectReference
refs = append(refs, vmRef.Reference())
var vmResult mo.VirtualMachine
pc := property.DefaultCollector(c.Client.Client)
err = pc.RetrieveOne(ctx, vmRef.Reference(), []string{"datastore"}, &vmResult)
if err != nil {
glog.Fatalf("Unable to retrieve VM summary for VM %s", *vm)
}
glog.V(4).Infof("vm property collector result :%+v\n", vmResult)
// We expect the VM to be on only 1 datastore
dsRef := vmResult.Datastore[0].Reference()
var dsResult mo.Datastore
err = pc.RetrieveOne(ctx, dsRef, []string{"summary"}, &dsResult)
if err != nil {
glog.Fatalf("Unable to retrieve datastore summary for datastore %s", dsRef)
}
glog.V(4).Infof("datastore property collector result :%+v\n", dsResult)
dsObj, err := f.Datastore(ctx, dsResult.Summary.Name)
if err != nil {
return err
}
p := soap.DefaultUpload
dstIsoFile := getCloudInitFileName(*vm)
glog.V(2).Infof("Uploading ISO file %s to datastore %+v, destination iso is %s\n", isoFile, dsObj, dstIsoFile)
err = dsObj.UploadFile(ctx, isoFile, dstIsoFile, &p)
if err != nil {
return err
}
glog.V(2).Infof("Uploaded ISO file %s", isoFile)
// Find the cd-rom devide and insert the cloud init iso file into it.
devices, err := vmRef.Device(ctx)
if err != nil {
return err
}
// passing empty cd-rom name so that the first one gets returned
cdrom, err := devices.FindCdrom("")
cdrom.Connectable.StartConnected = true
if err != nil {
return err
}
iso := dsObj.Path(dstIsoFile)
glog.V(2).Infof("Inserting ISO file %s into cd-rom", iso)
return vmRef.EditDevice(ctx, devices.InsertIso(cdrom, iso))
}
// Returns VM's instance uuid
func (c *VSphereCloud) FindVMUUID(vm *string) (string, error) {
f := find.NewFinder(c.Client.Client, true)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, err := f.Datacenter(ctx, c.Datacenter)
if err != nil {
return "", err
}
f.SetDatacenter(dc)
vmRef, err := f.VirtualMachine(ctx, *vm)
if err != nil {
return "", err
}
var refs []types.ManagedObjectReference
refs = append(refs, vmRef.Reference())
var vmResult mo.VirtualMachine
pc := property.DefaultCollector(c.Client.Client)
err = pc.RetrieveOne(ctx, vmRef.Reference(), []string{"config.uuid"}, &vmResult)
if err != nil {
return "", err
}
glog.V(4).Infof("vm property collector result :%+v\n", vmResult)
glog.V(3).Infof("retrieved vm uuid as %q for vm %q", vmResult.Config.Uuid, *vm)
return vmResult.Config.Uuid, nil
}
// GetVirtualMachines returns the VMs where the VM name matches the strings in the argument
func (c *VSphereCloud) GetVirtualMachines(args []string) ([]*object.VirtualMachine, error) {
var out []*object.VirtualMachine
// List virtual machines
if len(args) == 0 {
return nil, errors.New("no argument")
}
f := find.NewFinder(c.Client.Client, true)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, err := f.Datacenter(ctx, c.Datacenter)
if err != nil {
return nil, err
}
f.SetDatacenter(dc)
var nfe error
// List virtual machines for every argument
for _, arg := range args {
vms, err := f.VirtualMachineList(context.TODO(), arg)
if err != nil {
if _, ok := err.(*find.NotFoundError); ok {
// Let caller decide how to handle NotFoundError
nfe = err
continue
}
return nil, err
}
out = append(out, vms...)
}
return out, nfe
}
func (c *VSphereCloud) DeleteCloudInitISO(vm *string) error {
f := find.NewFinder(c.Client.Client, true)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
dc, err := f.Datacenter(ctx, c.Datacenter)
if err != nil {
return err
}
f.SetDatacenter(dc)
vmRef, err := f.VirtualMachine(ctx, *vm)
if err != nil {
return err
}
var refs []types.ManagedObjectReference
refs = append(refs, vmRef.Reference())
var vmResult mo.VirtualMachine
pc := property.DefaultCollector(c.Client.Client)
err = pc.RetrieveOne(ctx, vmRef.Reference(), []string{"datastore"}, &vmResult)
if err != nil {
glog.Fatalf("Unable to retrieve VM summary for VM %s", *vm)
}
glog.V(4).Infof("vm property collector result :%+v\n", vmResult)
// We expect the VM to be on only 1 datastore
dsRef := vmResult.Datastore[0].Reference()
var dsResult mo.Datastore
err = pc.RetrieveOne(ctx, dsRef, []string{"summary"}, &dsResult)
if err != nil {
glog.Fatalf("Unable to retrieve datastore summary for datastore %s", dsRef)
}
glog.V(4).Infof("datastore property collector result :%+v\n", dsResult)
dsObj, err := f.Datastore(ctx, dsResult.Summary.Name)
if err != nil {
return err
}
isoFileName := getCloudInitFileName(*vm)
fileManager := dsObj.NewFileManager(dc, false)
err = fileManager.DeleteFile(ctx, isoFileName)
if err != nil {
if types.IsFileNotFound(err) {
glog.Warningf("ISO file not found: %q", isoFileName)
return nil
}
return err
}
glog.V(2).Infof("Deleted ISO file %q", isoFileName)
return nil
}
func getCloudInitFileName(vmName string) string {
return vmName + "/" + cloudInitFile
}

View File

@ -0,0 +1,105 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
// vsphere_utils houses various utility methods related to vSphere cloud.
import (
"context"
"github.com/golang/glog"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
"path"
"sync"
)
var snapshotLock sync.Mutex
func createSnapshot(ctx context.Context, vm *object.VirtualMachine, snapshotName string, snapshotDesc string) (object.Reference, error) {
snapshotLock.Lock()
defer snapshotLock.Unlock()
snapshotRef, err := findSnapshot(vm, ctx, snapshotName)
if err != nil {
return nil, err
}
glog.V(4).Infof("Template VM is %s and snapshot is %s", vm, snapshotRef)
if snapshotRef != nil {
return snapshotRef, nil
}
task, err := vm.CreateSnapshot(ctx, snapshotName, snapshotDesc, false, false)
if err != nil {
return nil, err
}
taskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
return nil, err
}
glog.Infof("taskInfo.Result is %s", taskInfo.Result)
return taskInfo.Result.(object.Reference), nil
}
type snapshotMap map[string][]object.Reference
func (m snapshotMap) add(parent string, tree []types.VirtualMachineSnapshotTree) {
for i, st := range tree {
sname := st.Name
names := []string{sname, st.Snapshot.Value}
if parent != "" {
sname = path.Join(parent, sname)
// Add full path as an option to resolve duplicate names
names = append(names, sname)
}
for _, name := range names {
m[name] = append(m[name], &tree[i].Snapshot)
}
m.add(sname, st.ChildSnapshotList)
}
}
func findSnapshot(v *object.VirtualMachine, ctx context.Context, name string) (object.Reference, error) {
var o mo.VirtualMachine
err := v.Properties(ctx, v.Reference(), []string{"snapshot"}, &o)
if err != nil {
return nil, err
}
if o.Snapshot == nil || len(o.Snapshot.RootSnapshotList) == 0 {
return nil, nil
}
m := make(snapshotMap)
m.add("", o.Snapshot.RootSnapshotList)
s := m[name]
switch len(s) {
case 0:
return nil, nil
case 1:
return s[0], nil
default:
glog.Warningf("VM %s seems to have more than one snapshots with name %s. Using a random snapshot.", v, name)
return s[0], nil
}
}

View File

@ -0,0 +1,73 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
// vsphere_volume_metadata houses the volume metadata and related methods for vSphere cloud.
import (
"encoding/json"
"strconv"
)
// VolumeMetadata represents metadata for vSphere volumes. Unlike aws and gce clouds, vSphere doesn't support tags for volumes/vmdks yet. This metadata is used to pass the information that aws and gce clouds associate with volumes using tags.
type VolumeMetadata struct {
// EtcdClusterName is the name of the etcd cluster (main, events etc)
EtcdClusterName string `json:"etcdClusterName,omitempty"`
// EtcdNodeName is the name of a node in etcd cluster for which this volume will be used
EtcdNodeName string `json:"etcdNodeName,omitempty"`
// EtcdMember stores the configurations for each member of the cluster
Members []EtcdMemberSpec `json:"etcdMembers,omitempty"`
// Volume id
VolumeId string `json:"volumeId,omitempty"`
}
// EtcdMemberSpec is the specification of members of etcd cluser, to be associated with this volume.
type EtcdMemberSpec struct {
// Name is the name of the member within the etcd cluster
Name string `json:"name,omitempty"`
InstanceGroup string `json:"instanceGroup,omitempty"`
}
// MarshalVolumeMetadata marshals given VolumeMetadata to json string.
func MarshalVolumeMetadata(v []VolumeMetadata) (string, error) {
metadata, err := json.Marshal(v)
if err != nil {
return "", err
}
return string(metadata), nil
}
// UnmarshalVolumeMetadata unmarshals given json string into VolumeMetadata.
func UnmarshalVolumeMetadata(text string) ([]VolumeMetadata, error) {
var v []VolumeMetadata
err := json.Unmarshal([]byte(text), &v)
return v, err
}
// GetVolumeId returns given integer value to VolumeId format, eg: for i=2, volume id="02".
func GetVolumeId(i int) string {
return "0" + strconv.Itoa(i)
}
/*
* GetMountPoint will return the mount point where the volume is expected to be mounted.
* This path would be /mnt/master-<volumeId>, eg: /mnt/master-01.
*/
func GetMountPoint(volumeId string) string {
return "/mnt/master-" + volumeId
}

View File

@ -0,0 +1,278 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vspheretasks
// attachiso houses the task that creates cloud-init ISO file, uploads and attaches it to a VM on vSphere cloud.
import (
"bytes"
"fmt"
"github.com/golang/glog"
"github.com/pborman/uuid"
"io/ioutil"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/model"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
"net"
"net/url"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
)
// AttachISO represents the cloud-init ISO file attached to a VM on vSphere cloud.
//go:generate fitask -type=AttachISO
type AttachISO struct {
Name *string
VM *VirtualMachine
IG *kops.InstanceGroup
BootstrapScript *model.BootstrapScript
EtcdClusters []*kops.EtcdClusterSpec
}
var _ fi.HasName = &AttachISO{}
var _ fi.HasDependencies = &AttachISO{}
// GetDependencies returns map of tasks on which this task depends.
func (o *AttachISO) GetDependencies(tasks map[string]fi.Task) []fi.Task {
var deps []fi.Task
vmCreateTask := tasks["VirtualMachine/"+*o.VM.Name]
if vmCreateTask == nil {
glog.Fatalf("Unable to find create VM task %s dependency for AttachISO %s", *o.VM.Name, *o.Name)
}
deps = append(deps, vmCreateTask)
return deps
}
// GetName returns the Name of the object, implementing fi.HasName
func (o *AttachISO) GetName() *string {
return o.Name
}
// SetName sets the Name of the object, implementing fi.SetName
func (o *AttachISO) SetName(name string) {
o.Name = &name
}
// Run invokes DefaultDeltaRunMethod for this task.
func (e *AttachISO) Run(c *fi.Context) error {
glog.Info("AttachISO.Run invoked!")
return fi.DefaultDeltaRunMethod(e, c)
}
// Find is a no-op for this task.
func (e *AttachISO) Find(c *fi.Context) (*AttachISO, error) {
glog.Info("AttachISO.Find invoked!")
return nil, nil
}
// CheckChanges is a no-op for this task.
func (_ *AttachISO) CheckChanges(a, e, changes *AttachISO) error {
glog.Info("AttachISO.CheckChanges invoked!")
return nil
}
// RenderVSphere executes the actual task logic, for vSphere cloud.
func (_ *AttachISO) RenderVSphere(t *vsphere.VSphereAPITarget, a, e, changes *AttachISO) error {
startupScript, err := changes.BootstrapScript.ResourceNodeUp(changes.IG)
startupStr, err := startupScript.AsString()
if err != nil {
return fmt.Errorf("error rendering startup script: %v", err)
}
dir, err := ioutil.TempDir("", *changes.VM.Name)
defer os.RemoveAll(dir)
// Need this in cloud config file for vSphere CloudProvider
vmUUID, err := t.Cloud.FindVMUUID(changes.VM.Name)
if err != nil {
return err
}
isoFile, err := createISO(changes, startupStr, dir, t.Cloud.CoreDNSServer, vmUUID)
if err != nil {
glog.Errorf("Failed to createISO for vspheretasks, err: %v", err)
return err
}
err = t.Cloud.UploadAndAttachISO(changes.VM.Name, isoFile)
if err != nil {
return err
}
return nil
}
func createUserData(changes *AttachISO, startupStr string, dir string, dnsServer string, vmUUID string) error {
// Populate nodeup initialization script.
// Update the startup script to add the extra spaces for
// indentation when copied to the user-data file.
strArray := strings.Split(startupStr, "\n")
for i, str := range strArray {
if len(str) > 0 {
strArray[i] = " " + str
}
}
startupStr = strings.Join(strArray, "\n")
data := strings.Replace(userDataTemplate, "$SCRIPT", startupStr, -1)
// Populate script to update nameserver for the VM.
dnsURL, err := url.Parse(dnsServer)
if err != nil {
return err
}
dnsHost, _, err := net.SplitHostPort(dnsURL.Host)
if err != nil {
return err
}
var lines []string
lines = append(lines, " echo \"nameserver "+dnsHost+"\" >> /etc/resolvconf/resolv.conf.d/head")
lines = append(lines, " resolvconf -u")
dnsUpdateStr := strings.Join(lines, "\n")
data = strings.Replace(data, "$DNS_SCRIPT", dnsUpdateStr, -1)
// Populate VM UUID information.
vmUUIDStr := " " + vmUUID + "\n"
data = strings.Replace(data, "$VM_UUID", vmUUIDStr, -1)
// Populate volume metadata.
data, err = createVolumeScript(changes, data)
if err != nil {
return err
}
userDataFile := filepath.Join(dir, "user-data")
glog.V(4).Infof("User data file content: %s", data)
if err = ioutil.WriteFile(userDataFile, []byte(data), 0644); err != nil {
glog.Errorf("Unable to write user-data into file %s", userDataFile)
return err
}
return nil
}
func createVolumeScript(changes *AttachISO, data string) (string, error) {
if changes.IG.Spec.Role != kops.InstanceGroupRoleMaster {
return strings.Replace(data, "$VOLUME_SCRIPT", " No volume metadata needed for "+string(changes.IG.Spec.Role)+".", -1), nil
}
volsString, err := getVolMetadata(changes)
if err != nil {
return "", err
}
return strings.Replace(data, "$VOLUME_SCRIPT", " "+volsString, -1), nil
}
func getVolMetadata(changes *AttachISO) (string, error) {
var volsMetadata []vsphere.VolumeMetadata
// Creating vsphere.VolumeMetadata using clusters EtcdClusterSpec
for i, etcd := range changes.EtcdClusters {
volMetadata := vsphere.VolumeMetadata{}
volMetadata.EtcdClusterName = etcd.Name
volMetadata.VolumeId = vsphere.GetVolumeId(i + 1)
var members []vsphere.EtcdMemberSpec
var thisNode string
for _, member := range etcd.Members {
if *member.InstanceGroup == changes.IG.Name {
thisNode = member.Name
}
etcdMember := vsphere.EtcdMemberSpec{
Name: member.Name,
InstanceGroup: *member.InstanceGroup,
}
members = append(members, etcdMember)
}
if thisNode == "" {
return "", fmt.Errorf("Failed to construct volume metadata for %v InstanceGroup.", changes.IG.Name)
}
volMetadata.EtcdNodeName = thisNode
volMetadata.Members = members
volsMetadata = append(volsMetadata, volMetadata)
}
glog.V(4).Infof("Marshaling master vol metadata : %v", volsMetadata)
volsString, err := vsphere.MarshalVolumeMetadata(volsMetadata)
glog.V(4).Infof("Marshaled master vol metadata: %v", volsString)
if err != nil {
return "", err
}
return volsString, nil
}
func createMetaData(dir string, vmName string) error {
data := strings.Replace(metaDataTemplate, "$INSTANCE_ID", uuid.NewUUID().String(), -1)
data = strings.Replace(data, "$LOCAL_HOST_NAME", vmName, -1)
glog.V(4).Infof("Meta data file content: %s", string(data))
metaDataFile := filepath.Join(dir, "meta-data")
if err := ioutil.WriteFile(metaDataFile, []byte(data), 0644); err != nil {
glog.Errorf("Unable to write meta-data into file %s", metaDataFile)
return err
}
return nil
}
func createISO(changes *AttachISO, startupStr string, dir string, dnsServer, vmUUID string) (string, error) {
err := createUserData(changes, startupStr, dir, dnsServer, vmUUID)
if err != nil {
return "", err
}
err = createMetaData(dir, *changes.VM.Name)
if err != nil {
return "", err
}
isoFile := filepath.Join(dir, *changes.VM.Name+".iso")
var commandName string
switch os := runtime.GOOS; os {
case "darwin":
commandName = "mkisofs"
case "linux":
commandName = "genisoimage"
default:
return "", fmt.Errorf("Cannot generate ISO file %s. Unsupported operation system (%s)!!!", isoFile, os)
}
cmd := exec.Command(commandName, "-o", isoFile, "-volid", "cidata", "-joliet", "-rock", dir)
var out bytes.Buffer
cmd.Stdout = &out
var stderr bytes.Buffer
cmd.Stderr = &stderr
err = cmd.Run()
if err != nil {
glog.Errorf("Error %s occurred while executing command %+v", err, cmd)
return "", err
}
glog.V(4).Infof("%s std output : %s\n", commandName, out.String())
glog.V(4).Infof("%s std error : %s\n", commandName, stderr.String())
return isoFile, nil
}

View File

@ -0,0 +1,49 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vspheretasks
// Template for user-data file in the cloud-init ISO
const userDataTemplate = `#cloud-config
write_files:
- content: |
$SCRIPT
owner: root:root
path: /root/script.sh
permissions: "0644"
- content: |
$DNS_SCRIPT
owner: root:root
path: /root/update_dns.sh
permissions: "0644"
- content: |
$VM_UUID
owner: root:root
path: /etc/vmware/vm_uuid
permissions: "0644"
- content: |
$VOLUME_SCRIPT
owner: root:root
path: /vol-metadata/metadata.json
permissions: "0644"
runcmd:
- bash /root/update_dns.sh 2>&1 > /var/log/update_dns.log
- bash /root/script.sh 2>&1 > /var/log/script.log`
// Template for meta-data file in the cloud-init ISO
const metaDataTemplate = `instance-id: $INSTANCE_ID
local-hostname: $LOCAL_HOST_NAME`

View File

@ -0,0 +1,84 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vspheretasks
// virtualmachine houses task that creates VM on vSphere cloud.
import (
"github.com/golang/glog"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
)
// VirtualMachine represents a VMware VM
//go:generate fitask -type=VirtualMachine
type VirtualMachine struct {
Name *string
VMTemplateName *string
}
var _ fi.CompareWithID = &VirtualMachine{}
var _ fi.HasName = &VirtualMachine{}
// GetName returns the Name of the object, implementing fi.HasName
func (o *VirtualMachine) GetName() *string {
return o.Name
}
// SetName sets the Name of the object, implementing fi.SetName
func (o *VirtualMachine) SetName(name string) {
o.Name = &name
}
// String is the stringer function for the task, producing readable output using fi.TaskAsString
func (o *VirtualMachine) String() string {
return fi.TaskAsString(o)
}
// CompareWithID is returning name of this VirtualMachine.
func (e *VirtualMachine) CompareWithID() *string {
glog.V(4).Info("VirtualMachine.CompareWithID invoked!")
return e.Name
}
// Find is a no-op for vSphere.
func (e *VirtualMachine) Find(c *fi.Context) (*VirtualMachine, error) {
glog.V(4).Info("VirtualMachine.Find invoked!")
return nil, nil
}
// Run executes DefaultDeltaRunMethod for this task.
func (e *VirtualMachine) Run(c *fi.Context) error {
glog.V(4).Info("VirtualMachine.Run invoked!")
return fi.DefaultDeltaRunMethod(e, c)
}
// CheckChanges is a no-op for vSphere, for now.
func (_ *VirtualMachine) CheckChanges(a, e, changes *VirtualMachine) error {
glog.V(4).Info("VirtualMachine.CheckChanges invoked!")
return nil
}
// RenderVSphere executes the actual VM clone creation for vSphere cloud.
func (_ *VirtualMachine) RenderVSphere(t *vsphere.VSphereAPITarget, a, e, changes *VirtualMachine) error {
glog.V(4).Infof("VirtualMachine.RenderVSphere invoked with a(%+v) e(%+v) and changes(%+v)", a, e, changes)
_, err := t.Cloud.CreateLinkClonedVm(changes.Name, changes.VMTemplateName)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,81 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vspheretasks
// vmpoweron houses task that powers on VM on vSphere cloud.
import (
"github.com/golang/glog"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup/vsphere"
)
// VMPowerOn powers on a VMware VM
//go:generate fitask -type=VMPowerOn
type VMPowerOn struct {
Name *string
AttachISO *AttachISO
}
var _ fi.HasName = &VMPowerOn{}
var _ fi.HasDependencies = &VMPowerOn{}
// GetDependencies returns map of tasks on which this task depends.
func (o *VMPowerOn) GetDependencies(tasks map[string]fi.Task) []fi.Task {
var deps []fi.Task
attachISOTask := tasks["AttachISO/"+*o.AttachISO.Name]
if attachISOTask == nil {
glog.Fatalf("Unable to find attachISO task %s dependency for VMPowerOn %s", *o.AttachISO.Name, *o.Name)
}
deps = append(deps, attachISOTask)
return deps
}
// GetName returns the Name of the object, implementing fi.HasName
func (o *VMPowerOn) GetName() *string {
return o.Name
}
// SetName sets the Name of the object, implementing fi.SetName
func (o *VMPowerOn) SetName(name string) {
o.Name = &name
}
// Run executes DefaultDeltaRunMethod for this task.
func (e *VMPowerOn) Run(c *fi.Context) error {
glog.Info("VMPowerOn.Run invoked!")
return fi.DefaultDeltaRunMethod(e, c)
}
// Find is a no-op for vSphere cloud, for now.
func (e *VMPowerOn) Find(c *fi.Context) (*VMPowerOn, error) {
glog.Info("VMPowerOn.Find invoked!")
return nil, nil
}
// CheckChanges is a no-op for vSphere cloud, for now.
func (_ *VMPowerOn) CheckChanges(a, e, changes *VMPowerOn) error {
glog.Info("VMPowerOn.CheckChanges invoked!")
return nil
}
// RenderVSphere executes the actual power on operation for VM on vSphere cloud.
func (_ *VMPowerOn) RenderVSphere(t *vsphere.VSphereAPITarget, a, e, changes *VMPowerOn) error {
glog.V(2).Infof("VMPowerOn.RenderVSphere invoked for vm %s", *changes.AttachISO.VM.Name)
err := t.Cloud.PowerOn(*changes.AttachISO.VM.Name)
return err
}