mirror of https://github.com/kubernetes/kops.git
Remove 'kops import'
This commit is contained in:
parent
002a1f7fd3
commit
c904c743da
|
@ -36,8 +36,6 @@ go_library(
|
|||
"get_instances.go",
|
||||
"get_keypairs.go",
|
||||
"get_secrets.go",
|
||||
"import.go",
|
||||
"import_cluster.go",
|
||||
"main.go",
|
||||
"replace.go",
|
||||
"rollingupdate.go",
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
"k8s.io/kubectl/pkg/util/templates"
|
||||
)
|
||||
|
||||
var (
|
||||
importLong = templates.LongDesc(i18n.T(`
|
||||
Imports a kubernetes cluster created by kube-up.sh into a state store. This command
|
||||
only support AWS clusters at this time.`))
|
||||
|
||||
importExample = templates.Examples(i18n.T(`
|
||||
# Import a cluster
|
||||
kops import cluster --name k8s-cluster.example.com --region us-east-1 \
|
||||
--state=s3://k8s-cluster.example.com`))
|
||||
|
||||
importShort = i18n.T(`Import a cluster.`)
|
||||
)
|
||||
|
||||
// importCmd represents the import command
|
||||
var importCmd = &cobra.Command{
|
||||
Use: "import",
|
||||
Short: importShort,
|
||||
Long: importLong,
|
||||
Example: importExample,
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCommand.AddCommand(importCmd)
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
|
||||
"k8s.io/kops/upup/pkg/kutil"
|
||||
)
|
||||
|
||||
type ImportClusterCmd struct {
|
||||
Region string
|
||||
}
|
||||
|
||||
var importCluster ImportClusterCmd
|
||||
|
||||
func init() {
|
||||
cmd := &cobra.Command{
|
||||
Use: "cluster",
|
||||
Short: importShort,
|
||||
Long: importLong,
|
||||
Example: importExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
ctx := context.TODO()
|
||||
err := importCluster.Run(ctx)
|
||||
if err != nil {
|
||||
exitWithError(err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
importCmd.AddCommand(cmd)
|
||||
|
||||
cmd.Flags().StringVar(&importCluster.Region, "region", "", "region")
|
||||
}
|
||||
|
||||
func (c *ImportClusterCmd) Run(ctx context.Context) error {
|
||||
if c.Region == "" {
|
||||
return fmt.Errorf("--region is required")
|
||||
}
|
||||
clusterName := rootCommand.clusterName
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("--name is required")
|
||||
}
|
||||
|
||||
tags := map[string]string{awsup.TagClusterName: clusterName}
|
||||
cloud, err := awsup.NewAWSCloud(c.Region, tags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error initializing AWS client: %v", err)
|
||||
}
|
||||
|
||||
clientset, err := rootCommand.Clientset()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cluster, err := clientset.GetCluster(ctx, clusterName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cluster != nil {
|
||||
return fmt.Errorf("cluster already exists: %q", clusterName)
|
||||
}
|
||||
|
||||
d := &kutil.ImportCluster{}
|
||||
d.ClusterName = clusterName
|
||||
d.Cloud = cloud
|
||||
d.Clientset = clientset
|
||||
|
||||
err = d.ImportAWSCluster(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("\nImported settings for cluster %q\n", clusterName)
|
||||
|
||||
return nil
|
||||
}
|
|
@ -54,7 +54,6 @@ For a better viewing experience please check out our live documentation site at
|
|||
* [Cluster addons](operations/addons.md)
|
||||
* [Cluster configuration management](changing_configuration.md)
|
||||
* [Cluster desired configuration creation from template](operations/cluster_template.md)
|
||||
* [Cluster upgrades and migrations](operations/cluster_upgrades_and_migrations.md)
|
||||
* [`etcd` volume encryption setup](operations/etcd_backup_restore_encryption.md#etcd-volume-encryption)
|
||||
* [`etcd` backup/restore](operations/etcd_backup_restore_encryption.md#backing-up-etcd)
|
||||
* [GPU setup](gpu.md)
|
||||
|
|
|
@ -44,7 +44,6 @@ kOps is Kubernetes Operations.
|
|||
* [kops edit](kops_edit.md) - Edit clusters and other resources.
|
||||
* [kops export](kops_export.md) - Export configuration.
|
||||
* [kops get](kops_get.md) - Get one or many resources.
|
||||
* [kops import](kops_import.md) - Import a cluster.
|
||||
* [kops replace](kops_replace.md) - Replace cluster resources.
|
||||
* [kops rolling-update](kops_rolling-update.md) - Rolling update a cluster.
|
||||
* [kops set](kops_set.md) - Set fields on clusters and other resources.
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
|
||||
<!--- This file is automatically generated by make gen-cli-docs; changes should be made in the go CLI command code (under cmd/kops) -->
|
||||
|
||||
## kops import
|
||||
|
||||
Import a cluster.
|
||||
|
||||
### Synopsis
|
||||
|
||||
Imports a kubernetes cluster created by kube-up.sh into a state store. This command only support AWS clusters at this time.
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
# Import a cluster
|
||||
kops import cluster --name k8s-cluster.example.com --region us-east-1 \
|
||||
--state=s3://k8s-cluster.example.com
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for import
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--config string yaml config file (default is $HOME/.kops.yaml)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files (default true)
|
||||
--name string Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level)
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
--state string Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kops](kops.md) - kOps is Kubernetes Operations.
|
||||
* [kops import cluster](kops_import_cluster.md) - Import a cluster.
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
|
||||
<!--- This file is automatically generated by make gen-cli-docs; changes should be made in the go CLI command code (under cmd/kops) -->
|
||||
|
||||
## kops import cluster
|
||||
|
||||
Import a cluster.
|
||||
|
||||
### Synopsis
|
||||
|
||||
Imports a kubernetes cluster created by kube-up.sh into a state store. This command only support AWS clusters at this time.
|
||||
|
||||
```
|
||||
kops import cluster [flags]
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
```
|
||||
# Import a cluster
|
||||
kops import cluster --name k8s-cluster.example.com --region us-east-1 \
|
||||
--state=s3://k8s-cluster.example.com
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for cluster
|
||||
--region string region
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--add_dir_header If true, adds the file directory to the header of the log messages
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--config string yaml config file (default is $HOME/.kops.yaml)
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--log_file string If non-empty, use this log file
|
||||
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
|
||||
--logtostderr log to standard error instead of files (default true)
|
||||
--name string Name of cluster. Overrides KOPS_CLUSTER_NAME environment variable
|
||||
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level)
|
||||
--skip_headers If true, avoid header prefixes in the log messages
|
||||
--skip_log_headers If true, avoid headers when opening log files
|
||||
--state string Location of state storage (kops 'config' file). Overrides KOPS_STATE_STORE environment variable
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level number for the log level verbosity
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
|
||||
* [kops import](kops_import.md) - Import a cluster.
|
||||
|
|
@ -1,232 +0,0 @@
|
|||
# Cluster Version Upgrades and Migrations
|
||||
|
||||
At some point you will almost definitely want to upgrade the Kubernetes version of your cluster, or even migrate from a cluster managed/provisioned by another tool to one managed by `kops`. There are a few different ways to accomplish this depending on your existing cluster situation and any requirements for zero-downtime migrations.
|
||||
|
||||
- Upgrade an existing `kube-up` managed cluster to one managed by `kops`
|
||||
+ [The simple method with downtime](#kube-up---kops-downtime)
|
||||
+ [The more complex method with zero-downtime](#kube-up---kops-sans-downtime)
|
||||
- [Upgrade a `kops` cluster from one Kubernetes version to another](updates_and_upgrades.md)
|
||||
|
||||
## `kube-up` -> `kops`, with downtime
|
||||
|
||||
`kops` lets you upgrade an existing 1.x cluster, installed using `kube-up`, to a cluster managed by `kops` running the latest kubernetes version (or the version of your choice).
|
||||
|
||||
**This is an experimental and slightly risky procedure, so we recommend backing up important data before proceeding.
|
||||
Take a snapshot of your EBS volumes; export all your data from kubectl etc.**
|
||||
|
||||
Limitations:
|
||||
|
||||
* kOps splits etcd onto two volumes now: `main` and `events`. We will keep the `main` data, but you will lose your events history.
|
||||
* Doubtless others not yet known - please open issues if you encounter them!
|
||||
|
||||
### Overview
|
||||
|
||||
There are a few steps to upgrade a kubernetes cluster from 1.2 to 1.3:
|
||||
|
||||
* First you import the existing cluster state, so you can see and edit the configuration
|
||||
* You verify the cluster configuration
|
||||
* You move existing AWS resources to your new cluster
|
||||
* You bring up the new cluster
|
||||
* You can then delete the old cluster and its associated resources
|
||||
|
||||
### Importing the existing cluster
|
||||
|
||||
The `import cluster` command reverse engineers an existing cluster, and creates a cluster configuration.
|
||||
|
||||
Make sure you have set `export KOPS_STATE_STORE=s3://<mybucket>`
|
||||
|
||||
Then import the cluster; setting `--name` and `--region` to match the old cluster. If you're not sure of the old cluster name, you can find it by looking at the `KubernetesCluster` tag on your AWS resources.
|
||||
|
||||
```
|
||||
export OLD_NAME=kubernetes
|
||||
export REGION=us-west-2
|
||||
kops import cluster --region ${REGION} --name ${OLD_NAME}
|
||||
```
|
||||
|
||||
### Verify the cluster configuration
|
||||
|
||||
Now have a look at the cluster configuration, to make sure it looks right. If it doesn't, please open an issue.
|
||||
```
|
||||
kops get cluster ${OLD_NAME} -oyaml
|
||||
```
|
||||
|
||||
## Move resources to a new cluster
|
||||
|
||||
The upgrade moves some resources so they will be adopted by the new cluster. There are a number of things this step does:
|
||||
|
||||
* It resizes existing autoscaling groups to size 0
|
||||
* It will stop the existing master
|
||||
* It detaches the master EBS volume from the master
|
||||
* It re-tags resources to associate them with the new cluster: volumes, ELBs
|
||||
* It re-tags the VPC to associate it with the new cluster
|
||||
|
||||
The upgrade procedure forces you to choose a new cluster name (e.g. `k8s.mydomain.com`)
|
||||
|
||||
```
|
||||
export NEW_NAME=k8s.mydomain.com
|
||||
kops toolbox convert-imported --newname ${NEW_NAME} --name ${OLD_NAME}
|
||||
```
|
||||
|
||||
If you now list the clusters, you should see both the old cluster & the new cluster
|
||||
```
|
||||
kops get clusters
|
||||
```
|
||||
|
||||
You can also list the instance groups: `kops get ig --name ${NEW_NAME}`
|
||||
|
||||
### Import the SSH public key
|
||||
|
||||
The SSH public key is not easily retrieved from the old cluster, so you must add it:
|
||||
```
|
||||
kops create secret --name ${NEW_NAME} sshpublickey admin -i ~/.ssh/id_rsa.pub
|
||||
```
|
||||
|
||||
### Bring up the new cluster
|
||||
|
||||
Use the update command to bring up the new cluster:
|
||||
```
|
||||
kops update cluster ${NEW_NAME}
|
||||
```
|
||||
|
||||
Things to check are that it is reusing the existing volume for the _main_ etcd cluster (but not the events clusters).
|
||||
|
||||
And then when you are happy:
|
||||
```
|
||||
kops update cluster ${NEW_NAME} --yes
|
||||
```
|
||||
|
||||
|
||||
### Export kubecfg settings to access the new cluster
|
||||
|
||||
You can export a kubecfg (although update cluster did this automatically): `kops export kubecfg ${NEW_NAME}`
|
||||
|
||||
### Workaround for secret import failure
|
||||
|
||||
The import procedure tries to preserve the CA certificates, but unfortunately this isn't supported in kubernetes until [#34029](https://github.com/kubernetes/kubernetes/pull/34029) ships (should be in 1.5).
|
||||
|
||||
So you will need to delete the service-accounts, so they can be recreated with the correct keys.
|
||||
|
||||
Unfortunately, until you do this, some services (most notably internal & external DNS) will not work.
|
||||
Because of that you must SSH to the master to do this repair.
|
||||
|
||||
You can get the public IP address of the master from the AWS console, or by doing this:
|
||||
|
||||
```
|
||||
aws ec2 --region $REGION describe-instances \
|
||||
--filter Name=tag:KubernetesCluster,Values=${NEW_NAME} \
|
||||
Name=tag-key,Values=k8s.io/role/master \
|
||||
Name=instance-state-name,Values=running \
|
||||
--query Reservations[].Instances[].PublicIpAddress \
|
||||
--output text
|
||||
```
|
||||
|
||||
Then `ssh admin@<ip>` (the SSH key will be the one you added above, i.e. `~/.ssh/id_rsa.pub`), and run:
|
||||
|
||||
First check that the apiserver is running:
|
||||
```
|
||||
kubectl get nodes
|
||||
```
|
||||
|
||||
You should see only one node (the master). Then run
|
||||
```
|
||||
NS=`kubectl get namespaces -o 'jsonpath={.items[*].metadata.name}'`
|
||||
for i in ${NS}; do kubectl get secrets --namespace=${i} --no-headers | grep "kubernetes.io/service-account-token" | awk '{print $1}' | xargs -I {} kubectl delete secret --namespace=$i {}; done
|
||||
sleep 60 # Allow for new secrets to be created
|
||||
kubectl delete pods -lk8s-app=dns-controller --namespace=kube-system
|
||||
kubectl delete pods -lk8s-app=kube-dns --namespace=kube-system
|
||||
kubectl delete pods -lk8s-app=kube-dns-autoscaler --namespace=kube-system
|
||||
```
|
||||
|
||||
You probably also want to delete the imported DNS services from prior versions:
|
||||
```
|
||||
kubectl delete rc -lk8s-app=kube-dns --namespace=kube-system
|
||||
```
|
||||
|
||||
|
||||
Within a few minutes the new cluster should be running.
|
||||
|
||||
Try `kubectl get nodes --show-labels`, `kubectl get pods --all-namespaces` etc until you are sure that all is well.
|
||||
|
||||
This should work even without being SSH-ed into the master, although it can take a few minutes for DNS to propagate. If it doesn't work, double-check that you have specified a valid domain name for your cluster, that records have been created in Route53, and that you can resolve those records from your machine (using `nslookup` or `dig`).
|
||||
|
||||
### Other fixes
|
||||
|
||||
* If you're using a manually created ELB, the auto-scaling groups change, so you will need to reconfigure your ELBs to include the new auto-scaling group(s).
|
||||
|
||||
* It is recommended to delete any old kubernetes system services that we might have imported (and replace them with newer versions):
|
||||
|
||||
```
|
||||
kubectl delete rc -lk8s-app=kube-dns --namespace=kube-system
|
||||
|
||||
kubectl delete rc -lk8s-app=elasticsearch-logging --namespace=kube-system
|
||||
kubectl delete rc -lk8s-app=kibana-logging --namespace=kube-system
|
||||
kubectl delete rc -lk8s-app=kubernetes-dashboard --namespace=kube-system
|
||||
kubectl delete rc -lk8s-app=influxGrafana --namespace=kube-system
|
||||
|
||||
kubectl delete deployment -lk8s-app=heapster --namespace=kube-system
|
||||
```
|
||||
|
||||
## Delete remaining resources of the old cluster
|
||||
|
||||
`kops delete cluster ${OLD_NAME}`
|
||||
|
||||
```
|
||||
TYPE NAME ID
|
||||
autoscaling-config kubernetes-minion-group-us-west-2a kubernetes-minion-group-us-west-2a
|
||||
autoscaling-group kubernetes-minion kubernetes-minion-group-us-west-2a
|
||||
instance kubernetes-master i-67af2ec8
|
||||
```
|
||||
|
||||
And once you've confirmed it looks right, run with `--yes`
|
||||
|
||||
You will also need to release the old ElasticIP manually.
|
||||
|
||||
## `kube-up` -> `kops`, sans downtime
|
||||
|
||||
### Overview
|
||||
|
||||
This method provides zero-downtime when migrating a cluster from `kube-up` to `kops`. It does so by creating a logically separate `kops`-managed cluster in the existing `kube-up` VPC and then swapping the DNS entries (or your reverse proxy's upstream) to point to the new cluster's services.
|
||||
|
||||
Limitations:
|
||||
|
||||
- If you're using the default networking (`kubenet`), there is a account limit of 50 entries in a VPC's route table. If your cluster contains more than ~25 nodes, this strategy, as-is, will not work.
|
||||
+ Shifting to a CNI-compatible overlay network like `weave`, `kopeio-vxlan` (`kopeio`), `calico`, `canal`, `romana`, and similar. See the [kOps networking docs](../networking.md) for more information.
|
||||
+ One solution is to gradually shift traffic from one cluster to the other, scaling down the number of nodes on the old cluster, and scaling up the number of nodes on the new cluster.
|
||||
|
||||
### Steps
|
||||
|
||||
1. If using another service to manage a domain's DNS records, delegate cluster-level DNS resolution to Route53 by adding appropriate NS records pointing `cluster.example.com` to Route53's Hosted Zone's nameservers.
|
||||
2. Create the new cluster's configuration files with kops. For example:
|
||||
- `kops create cluster --cloud=aws --zones=us-east-1a,us-east-1b --admin-access=12.34.56.78/32 --dns-zone=cluster.example.com --kubernetes-version=1.4.0 --node-count=14 --node-size=c3.xlarge --master-zones=us-east-1a --master-size=m4.large --vpc=vpc-123abcdef --network-cidr=172.20.0.0/16 cluster.example.com`
|
||||
- `--vpc` is the resource id of the existing VPC.
|
||||
- `--network-cidr` is the CIDR of the existing VPC.
|
||||
- note that `kops` will propose re-naming the existing VPC but the change never occurs.
|
||||
- After this process you can manually rename the VPC for consistency.
|
||||
3. Verify that the CIDR on each of the zone subnets does not overlap with an existing subnet's.
|
||||
4. Verify the planned changes with `kops update cluster cluster.example.com`
|
||||
5. Create the cluster with `kops update cluster cluster.example.com --yes`
|
||||
6. Wait around for the cluster to fully come up and be available. `k get nodes` should return `(master + minions) = 15` available nodes.
|
||||
7. (Optional) Create the Dashboard with `kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml`
|
||||
8. Deploy the existing resource configuration to the new cluster.
|
||||
9. Confirm that pods on the new cluster are able to access remote resources.
|
||||
- For AWS-hosted services, add the generated `nodes.cluster.example.com` security group to the resources that may need it (i.e. ElastiCache, RDS, etc).
|
||||
10. Confirm that your application works as expected by hitting the services directly.
|
||||
- If you have a `LoadBalancer` service, you should be able to access the ELB's DNS name directly (although perhaps with an SSL error) and use your application as expected.
|
||||
11. Transition traffic from the old cluster to the new cluster. This depends a bit on your infrastructure, but
|
||||
- if using a DNS server, update the `CNAME` record for `example.com` to point to the new ELB's DNS name.
|
||||
- if using a reverse proxy, update the upstream to point to the new ELB's DNS name.
|
||||
- note that if you're proxying through Cloudflare or similar, changes are instantaneous because it's technically a reverse proxy and not a DNS record.
|
||||
- if not using Cloudflare or similar, you'll want to update your DNS record's TTL to a very low duration about 48 hours in advance of this change (and then change it back to the previous value once the shift has been finalized).
|
||||
12. Rejoice.
|
||||
13. Once traffic has shifted from the old cluster, delete the old resources after confirming that traffic has stabilized and that no new errors are generated.
|
||||
- autoscaling groups
|
||||
+ turn the ASG down to 0 nodes to delete the instances
|
||||
- launch configurations
|
||||
- all associated EBS volumes (some may not be released after the instances terminate)
|
||||
- security groups (`tag:KubernetesCluster : kubernetes`)
|
||||
|
||||
## Recovery/Rollback
|
||||
|
||||
The only part of this procedure that should affect the users actively using the site is the DNS swap, which should be relatively instantaneous because we're using Cloudflare as a reverse proxy, not just as a nameserver.
|
||||
|
||||
To revert back to the old cluster, simply re-swap the entries pointing to the new cluster with the entries from the old cluster.
|
|
@ -42,8 +42,6 @@ The `kops upgrade` command also automates checking for and applying updates.
|
|||
|
||||
It is recommended to run the latest version of kOps to ensure compatibility with the target kubernetesVersion. When applying a Kubernetes minor version upgrade (e.g. `v1.5.3` to `v1.6.0`), you should confirm that the target kubernetesVersion is compatible with the [current kOps release](https://github.com/kubernetes/kops/releases).
|
||||
|
||||
Note: if you want to upgrade from a `kube-up` installation, please see the instructions for [how to upgrade kubernetes installed with kube-up](cluster_upgrades_and_migrations.md).
|
||||
|
||||
### Manual update
|
||||
|
||||
* `kops edit cluster $NAME`
|
||||
|
|
|
@ -80,7 +80,6 @@ nav:
|
|||
- Instancegroup images: "operations/images.md"
|
||||
- Cluster configuration management: "changing_configuration.md"
|
||||
- Cluster Templating: "operations/cluster_template.md"
|
||||
- Cluster upgrades and migrations: "operations/cluster_upgrades_and_migrations.md"
|
||||
- GPU setup: "gpu.md"
|
||||
- Label management: "labels.md"
|
||||
- Secret management: "secrets.md"
|
||||
|
|
|
@ -2,28 +2,11 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"import_cluster.go",
|
||||
"kubectl.go",
|
||||
"ssh.go",
|
||||
],
|
||||
srcs = ["kubectl.go"],
|
||||
importpath = "k8s.io/kops/upup/pkg/kutil",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/kops:go_default_library",
|
||||
"//pkg/apis/kops/registry:go_default_library",
|
||||
"//pkg/client/simple:go_default_library",
|
||||
"//pkg/kubeconfig:go_default_library",
|
||||
"//pkg/pki:go_default_library",
|
||||
"//pkg/resources/aws:go_default_library",
|
||||
"//upup/pkg/fi:go_default_library",
|
||||
"//upup/pkg/fi/cloudup:go_default_library",
|
||||
"//upup/pkg/fi/cloudup/awsup:go_default_library",
|
||||
"//util/pkg/vfs:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
"//vendor/golang.org/x/crypto/ssh:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -1,853 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/ec2"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kops/pkg/apis/kops"
|
||||
"k8s.io/kops/pkg/apis/kops/registry"
|
||||
"k8s.io/kops/pkg/client/simple"
|
||||
"k8s.io/kops/pkg/pki"
|
||||
awsresources "k8s.io/kops/pkg/resources/aws"
|
||||
"k8s.io/kops/upup/pkg/fi"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup"
|
||||
"k8s.io/kops/upup/pkg/fi/cloudup/awsup"
|
||||
)
|
||||
|
||||
// ImportCluster tries to reverse engineer an existing k8s cluster, adding it to the cluster registry
|
||||
type ImportCluster struct {
|
||||
ClusterName string
|
||||
Cloud fi.Cloud
|
||||
|
||||
Clientset simple.Clientset
|
||||
}
|
||||
|
||||
func (x *ImportCluster) ImportAWSCluster(ctx context.Context) error {
|
||||
awsCloud := x.Cloud.(awsup.AWSCloud)
|
||||
clusterName := x.ClusterName
|
||||
|
||||
if clusterName == "" {
|
||||
return fmt.Errorf("ClusterName must be specified")
|
||||
}
|
||||
|
||||
var instanceGroups []*kops.InstanceGroup
|
||||
|
||||
cluster := &kops.Cluster{}
|
||||
cluster.ObjectMeta.Annotations = make(map[string]string)
|
||||
|
||||
// This annotation relaxes some validation (e.g. cluster name as full-dns name)
|
||||
cluster.ObjectMeta.Annotations[kops.AnnotationNameManagement] = kops.AnnotationValueManagementImported
|
||||
|
||||
cluster.Spec.CloudProvider = string(kops.CloudProviderAWS)
|
||||
cluster.ObjectMeta.Name = clusterName
|
||||
|
||||
cluster.Spec.KubeControllerManager = &kops.KubeControllerManagerConfig{}
|
||||
|
||||
cluster.Spec.Channel = kops.DefaultChannel
|
||||
|
||||
cluster.Spec.KubernetesAPIAccess = []string{"0.0.0.0/0"}
|
||||
cluster.Spec.SSHAccess = []string{"0.0.0.0/0"}
|
||||
|
||||
configBase, err := x.Clientset.ConfigBaseFor(cluster)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error building ConfigBase for cluster: %v", err)
|
||||
}
|
||||
cluster.Spec.ConfigBase = configBase.Path()
|
||||
|
||||
channel, err := kops.LoadChannel(cluster.Spec.Channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
instances, err := findInstances(awsCloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding instances: %v", err)
|
||||
}
|
||||
|
||||
var masterInstance *ec2.Instance
|
||||
subnets := make(map[string]*kops.ClusterSubnetSpec)
|
||||
|
||||
for _, instance := range instances {
|
||||
instanceState := aws.StringValue(instance.State.Name)
|
||||
|
||||
if instanceState != "terminated" && instance.Placement != nil {
|
||||
zoneName := aws.StringValue(instance.Placement.AvailabilityZone)
|
||||
// We name the subnet after the zone
|
||||
subnetName := zoneName
|
||||
|
||||
subnet := subnets[subnetName]
|
||||
if subnet == nil {
|
||||
subnet = &kops.ClusterSubnetSpec{
|
||||
Name: subnetName,
|
||||
Zone: zoneName,
|
||||
Type: kops.SubnetTypePublic,
|
||||
}
|
||||
subnets[subnetName] = subnet
|
||||
}
|
||||
|
||||
subnetID := aws.StringValue(instance.SubnetId)
|
||||
if subnetID != "" {
|
||||
subnet.ProviderID = subnetID
|
||||
}
|
||||
}
|
||||
|
||||
role, _ := awsup.FindEC2Tag(instance.Tags, "Role")
|
||||
if role == clusterName+"-master" {
|
||||
if masterInstance != nil {
|
||||
masterState := aws.StringValue(masterInstance.State.Name)
|
||||
|
||||
klog.Infof("Found multiple masters: %s and %s", masterState, instanceState)
|
||||
|
||||
if masterState == "terminated" && instanceState != "terminated" {
|
||||
// OK
|
||||
} else if instanceState == "terminated" && masterState != "terminated" {
|
||||
// Ignore this one
|
||||
continue
|
||||
} else {
|
||||
return fmt.Errorf("found multiple masters")
|
||||
}
|
||||
}
|
||||
masterInstance = instance
|
||||
}
|
||||
}
|
||||
if masterInstance == nil {
|
||||
return fmt.Errorf("could not find master node")
|
||||
}
|
||||
masterInstanceID := aws.StringValue(masterInstance.InstanceId)
|
||||
klog.Infof("Found master: %q", masterInstanceID)
|
||||
|
||||
masterGroup := &kops.InstanceGroup{}
|
||||
masterGroup.Spec.Role = kops.InstanceGroupRoleMaster
|
||||
masterGroup.Spec.MinSize = fi.Int32(1)
|
||||
masterGroup.Spec.MaxSize = fi.Int32(1)
|
||||
|
||||
masterGroup.Spec.MachineType = aws.StringValue(masterInstance.InstanceType)
|
||||
|
||||
masterInstanceGroups := []*kops.InstanceGroup{masterGroup}
|
||||
instanceGroups = append(instanceGroups, masterGroup)
|
||||
|
||||
awsSubnets, err := awsresources.DescribeSubnets(x.Cloud)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding subnets: %v", err)
|
||||
}
|
||||
|
||||
for _, s := range awsSubnets {
|
||||
subnetID := aws.StringValue(s.SubnetId)
|
||||
|
||||
found := false
|
||||
for _, subnet := range subnets {
|
||||
if subnet.ProviderID == subnetID {
|
||||
subnet.CIDR = aws.StringValue(s.CidrBlock)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
klog.Warningf("Ignoring subnet %q in which no instances were found", subnetID)
|
||||
}
|
||||
}
|
||||
|
||||
for k, subnet := range subnets {
|
||||
if subnet.ProviderID == "" {
|
||||
return fmt.Errorf("cannot find subnet %q. Please report this issue", k)
|
||||
}
|
||||
if subnet.CIDR == "" {
|
||||
return fmt.Errorf("cannot find subnet %q. If you used an existing subnet, please tag it with %s=%s and retry the import", subnet.ProviderID, awsup.TagClusterName, clusterName)
|
||||
}
|
||||
}
|
||||
|
||||
vpcID := aws.StringValue(masterInstance.VpcId)
|
||||
var vpc *ec2.Vpc
|
||||
{
|
||||
vpc, err = awsCloud.DescribeVPC(vpcID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if vpc == nil {
|
||||
return fmt.Errorf("cannot find vpc %q", vpcID)
|
||||
}
|
||||
}
|
||||
|
||||
cluster.Spec.NetworkID = vpcID
|
||||
cluster.Spec.NetworkCIDR = aws.StringValue(vpc.CidrBlock)
|
||||
for _, subnet := range subnets {
|
||||
cluster.Spec.Subnets = append(cluster.Spec.Subnets, *subnet)
|
||||
}
|
||||
|
||||
masterSubnet := subnets[aws.StringValue(masterInstance.Placement.AvailabilityZone)]
|
||||
if masterSubnet == nil {
|
||||
return fmt.Errorf("cannot find subnet %q for master. Please report this issue", aws.StringValue(masterInstance.Placement.AvailabilityZone))
|
||||
}
|
||||
masterGroup.Spec.Subnets = []string{masterSubnet.Name}
|
||||
masterGroup.ObjectMeta.Name = "master-" + masterSubnet.Name
|
||||
|
||||
userData, err := GetInstanceUserData(awsCloud, aws.StringValue(masterInstance.InstanceId))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting master user-data: %v", err)
|
||||
}
|
||||
|
||||
conf, err := ParseUserDataConfiguration(userData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing master user-data: %v", err)
|
||||
}
|
||||
|
||||
//master := &NodeSSH{
|
||||
// Hostname: c.Master,
|
||||
//}
|
||||
//err := master.AddSSHIdentity(c.SSHIdentity)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//
|
||||
//fmt.Printf("Connecting to node on %s\n", c.Node)
|
||||
//
|
||||
//node := &NodeSSH{
|
||||
// Hostname: c.Node,
|
||||
//}
|
||||
//err = node.AddSSHIdentity(c.SSHIdentity)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
instancePrefix := conf.Settings["INSTANCE_PREFIX"]
|
||||
if instancePrefix == "" {
|
||||
return fmt.Errorf("cannot determine INSTANCE_PREFIX")
|
||||
}
|
||||
if instancePrefix != clusterName {
|
||||
return fmt.Errorf("INSTANCE_PREFIX %q did not match cluster name %q", instancePrefix, clusterName)
|
||||
}
|
||||
|
||||
//k8s.NodeMachineType, err = InstanceType(node)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot determine node instance type: %v", err)
|
||||
//}
|
||||
|
||||
// We want to upgrade!
|
||||
// k8s.ImageId = ""
|
||||
|
||||
//clusterConfig.ClusterIPRange = conf.Settings["CLUSTER_IP_RANGE"]
|
||||
cluster.Spec.KubeControllerManager.AllocateNodeCIDRs = conf.ParseBool("ALLOCATE_NODE_CIDRS")
|
||||
//clusterConfig.KubeUser = conf.Settings["KUBE_USER"]
|
||||
cluster.Spec.ServiceClusterIPRange = conf.Settings["SERVICE_CLUSTER_IP_RANGE"]
|
||||
cluster.Spec.NonMasqueradeCIDR = conf.Settings["NON_MASQUERADE_CIDR"]
|
||||
//clusterConfig.EnableClusterMonitoring = conf.Settings["ENABLE_CLUSTER_MONITORING"]
|
||||
//clusterConfig.EnableClusterLogging = conf.ParseBool("ENABLE_CLUSTER_LOGGING")
|
||||
//clusterConfig.EnableNodeLogging = conf.ParseBool("ENABLE_NODE_LOGGING")
|
||||
//clusterConfig.LoggingDestination = conf.Settings["LOGGING_DESTINATION"]
|
||||
//clusterConfig.ElasticsearchLoggingReplicas, err = parseInt(conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"])
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot parse ELASTICSEARCH_LOGGING_REPLICAS=%q: %v", conf.Settings["ELASTICSEARCH_LOGGING_REPLICAS"], err)
|
||||
//}
|
||||
//clusterConfig.EnableClusterDNS = conf.ParseBool("ENABLE_CLUSTER_DNS")
|
||||
//clusterConfig.EnableClusterUI = conf.ParseBool("ENABLE_CLUSTER_UI")
|
||||
//clusterConfig.DNSReplicas, err = parseInt(conf.Settings["DNS_REPLICAS"])
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot parse DNS_REPLICAS=%q: %v", conf.Settings["DNS_REPLICAS"], err)
|
||||
//}
|
||||
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
cluster.Spec.ClusterDNSDomain = conf.Settings["DNS_DOMAIN"]
|
||||
//clusterConfig.AdmissionControl = conf.Settings["ADMISSION_CONTROL"]
|
||||
//clusterConfig.MasterIPRange = conf.Settings["MASTER_IP_RANGE"]
|
||||
//clusterConfig.DNSServerIP = conf.Settings["DNS_SERVER_IP"]
|
||||
//clusterConfig.DockerStorage = conf.Settings["DOCKER_STORAGE"]
|
||||
//k8s.MasterExtraSans = conf.Settings["MASTER_EXTRA_SANS"] // Not user set
|
||||
|
||||
nodeGroup := &kops.InstanceGroup{}
|
||||
nodeGroup.Spec.Role = kops.InstanceGroupRoleNode
|
||||
nodeGroup.ObjectMeta.Name = "nodes"
|
||||
for _, subnet := range subnets {
|
||||
nodeGroup.Spec.Subnets = append(nodeGroup.Spec.Subnets, subnet.Name)
|
||||
}
|
||||
instanceGroups = append(instanceGroups, nodeGroup)
|
||||
|
||||
//primaryNodeSet.Spec.MinSize, err = conf.ParseInt("NUM_MINIONS")
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("cannot parse NUM_MINIONS=%q: %v", conf.Settings["NUM_MINIONS"], err)
|
||||
//}
|
||||
|
||||
{
|
||||
groups, err := awsup.FindAutoscalingGroups(awsCloud, awsCloud.Tags())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing autoscaling groups: %v", err)
|
||||
}
|
||||
|
||||
if len(groups) == 0 {
|
||||
klog.Warningf("No Autoscaling group found")
|
||||
}
|
||||
if len(groups) == 1 {
|
||||
klog.Warningf("Multiple Autoscaling groups found")
|
||||
}
|
||||
minSize := int32(0)
|
||||
maxSize := int32(0)
|
||||
for _, group := range groups {
|
||||
minSize += int32(aws.Int64Value(group.MinSize))
|
||||
maxSize += int32(aws.Int64Value(group.MaxSize))
|
||||
}
|
||||
if minSize != 0 {
|
||||
nodeGroup.Spec.MinSize = fi.Int32(minSize)
|
||||
}
|
||||
if maxSize != 0 {
|
||||
nodeGroup.Spec.MaxSize = fi.Int32(maxSize)
|
||||
}
|
||||
|
||||
// Determine the machine type
|
||||
for _, group := range groups {
|
||||
name := aws.StringValue(group.LaunchConfigurationName)
|
||||
launchConfiguration, err := awsresources.FindAutoscalingLaunchConfiguration(awsCloud, name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finding autoscaling LaunchConfiguration %q: %v", name, err)
|
||||
}
|
||||
|
||||
if launchConfiguration == nil {
|
||||
klog.Warningf("ignoring error launchConfiguration %q not found", name)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeGroup.Spec.MachineType = aws.StringValue(launchConfiguration.InstanceType)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if conf.Version == "1.1" {
|
||||
// If users went with defaults on some things, clear them out so they get the new defaults
|
||||
//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" {
|
||||
// // More admission controllers in 1.2
|
||||
// clusterConfig.AdmissionControl = ""
|
||||
//}
|
||||
if masterGroup.Spec.MachineType == "t2.micro" {
|
||||
// Different defaults in 1.2
|
||||
masterGroup.Spec.MachineType = ""
|
||||
}
|
||||
if nodeGroup.Spec.MachineType == "t2.micro" {
|
||||
// Encourage users to pick something better...
|
||||
nodeGroup.Spec.MachineType = ""
|
||||
}
|
||||
}
|
||||
// if conf.Version == "1.2" {
|
||||
// If users went with defaults on some things, clear them out so they get the new defaults
|
||||
//if clusterConfig.AdmissionControl == "NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,ResourceQuota" {
|
||||
// // More admission controllers in 1.2
|
||||
// clusterConfig.AdmissionControl = ""
|
||||
//}
|
||||
// }
|
||||
|
||||
for _, etcdClusterName := range []string{"main", "events"} {
|
||||
etcdCluster := kops.EtcdClusterSpec{
|
||||
Name: etcdClusterName,
|
||||
}
|
||||
|
||||
for _, ig := range masterInstanceGroups {
|
||||
member := kops.EtcdMemberSpec{
|
||||
InstanceGroup: fi.String(ig.ObjectMeta.Name),
|
||||
}
|
||||
|
||||
name := ig.ObjectMeta.Name
|
||||
// We expect the IG to have a `master-` prefix, but this is both superfluous
|
||||
// and not how we named things previously
|
||||
name = strings.TrimPrefix(name, "master-")
|
||||
member.Name = name
|
||||
|
||||
etcdCluster.Members = append(etcdCluster.Members, member)
|
||||
}
|
||||
|
||||
cluster.Spec.EtcdClusters = append(cluster.Spec.EtcdClusters, etcdCluster)
|
||||
}
|
||||
|
||||
//if masterInstance.PublicIpAddress != nil {
|
||||
// eip, err := findElasticIP(cloud, *masterInstance.PublicIpAddress)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if eip != nil {
|
||||
// k8s.MasterElasticIP = masterInstance.PublicIpAddress
|
||||
// }
|
||||
//}
|
||||
//
|
||||
//vpc, err := cloud.DescribeVPC(*k8s.VPCID)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//k8s.DHCPOptionsID = vpc.DhcpOptionsId
|
||||
//
|
||||
//igw, err := findInternetGateway(cloud, *k8s.VPCID)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//if igw == nil {
|
||||
// return fmt.Errorf("unable to find internet gateway for VPC %q", k8s.VPCID)
|
||||
//}
|
||||
//k8s.InternetGatewayID = igw.InternetGatewayId
|
||||
//
|
||||
//rt, err := findRouteTable(cloud, *k8s.SubnetID)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//if rt == nil {
|
||||
// return fmt.Errorf("unable to find route table for Subnet %q", k8s.SubnetID)
|
||||
//}
|
||||
//k8s.RouteTableID = rt.RouteTableId
|
||||
|
||||
//b.Context = "aws_" + instancePrefix
|
||||
|
||||
keyStore, err := x.Clientset.KeyStore(cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//caCert, err := masterSSH.Join("ca.crt").ReadFile()
|
||||
caCert, err := conf.ParseCert("CA_CERT")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyset, err := keyStore.FindKeyset(fi.CertificateIDCA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = keyset.AddItem(caCert, nil, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = keyStore.StoreKeyset(fi.CertificateIDCA, keyset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
////masterKey, err := masterSSH.Join("server.key").ReadFile()
|
||||
//masterKey, err := conf.ParseKey("MASTER_KEY")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
////masterCert, err := masterSSH.Join("server.cert").ReadFile()
|
||||
//masterCert, err := conf.ParseCert("MASTER_CERT")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//err = keyStore.ImportKeypair("master", masterKey, masterCert)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
////kubeletKey, err := kubeletSSH.Join("kubelet.key").ReadFile()
|
||||
//kubeletKey, err := conf.ParseKey("KUBELET_KEY")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
////kubeletCert, err := kubeletSSH.Join("kubelet.cert").ReadFile()
|
||||
//kubeletCert, err := conf.ParseCert("KUBELET_CERT")
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//err = keyStore.ImportKeypair("kubelet", kubeletKey, kubeletCert)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
// We don't store the kubecfg key
|
||||
//kubecfgKey, err := masterSSH.Join("kubecfg.key").ReadFile()
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//kubecfgCert, err := masterSSH.Join("kubecfg.cert").ReadFile()
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//err = keyStore.ImportKeypair("kubecfg", kubecfgKey, kubecfgCert)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
|
||||
//// We will generate new tokens, but some of these are in existing API objects
|
||||
//secretStore := x.StateStore.Secrets()
|
||||
//kubePassword := conf.Settings["KUBE_PASSWORD"]
|
||||
//kubeletToken = conf.Settings["KUBELET_TOKEN"]
|
||||
//kubeProxyToken = conf.Settings["KUBE_PROXY_TOKEN"]
|
||||
|
||||
var fullInstanceGroups []*kops.InstanceGroup
|
||||
for _, ig := range instanceGroups {
|
||||
full, err := cloudup.PopulateInstanceGroupSpec(cluster, ig, awsCloud, channel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fullInstanceGroups = append(fullInstanceGroups, full)
|
||||
}
|
||||
|
||||
err = registry.CreateClusterConfig(ctx, x.Clientset, cluster, fullInstanceGroups, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Note - we can't PopulateClusterSpec & WriteCompletedConfig, because the cluster doesn't have a valid DNS Name
|
||||
//fullCluster, err := cloudup.PopulateClusterSpec(cluster, x.ClusterRegistry)
|
||||
//if err != nil {
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//err = x.ClusterRegistry.WriteCompletedConfig(fullCluster)
|
||||
//if err != nil {
|
||||
// return fmt.Errorf("error writing completed cluster spec: %v", err)
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// func parseInt(s string) (int, error) {
|
||||
// if s == "" {
|
||||
// return 0, nil
|
||||
// }
|
||||
|
||||
// n, err := strconv.ParseInt(s, 10, 64)
|
||||
// if err != nil {
|
||||
// return 0, err
|
||||
// }
|
||||
|
||||
// return int(n), nil
|
||||
// }
|
||||
|
||||
//func writeConf(p string, k8s *cloudup.CloudConfig) error {
|
||||
// jsonBytes, err := json.Marshal(k8s)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error serializing configuration (json write phase): %v", err)
|
||||
// }
|
||||
//
|
||||
// var confObj interface{}
|
||||
// err = yaml.Unmarshal(jsonBytes, &confObj)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error serializing configuration (yaml read phase): %v", err)
|
||||
// }
|
||||
//
|
||||
// m := confObj.(map[interface{}]interface{})
|
||||
//
|
||||
// for k, v := range m {
|
||||
// if v == nil {
|
||||
// delete(m, k)
|
||||
// }
|
||||
// s, ok := v.(string)
|
||||
// if ok && s == "" {
|
||||
// delete(m, k)
|
||||
// }
|
||||
// //klog.Infof("%v=%v", k, v)
|
||||
// }
|
||||
//
|
||||
// yaml, err := yaml.Marshal(confObj)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error serializing configuration (yaml write phase): %v", err)
|
||||
// }
|
||||
//
|
||||
// err = ioutil.WriteFile(p, yaml, 0600)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error writing configuration to file %q: %v", p, err)
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//func findInternetGateway(cloud awsup.AWSCloud, vpcID string) (*ec2.InternetGateway, error) {
|
||||
// request := &ec2.DescribeInternetGatewaysInput{
|
||||
// Filters: []*ec2.Filter{fi.NewEC2Filter("attachment.vpc-id", vpcID)},
|
||||
// }
|
||||
//
|
||||
// response, err := cloud.EC2.DescribeInternetGateways(request)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error listing InternetGateways: %v", err)
|
||||
// }
|
||||
// if response == nil || len(response.InternetGateways) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//
|
||||
// if len(response.InternetGateways) != 1 {
|
||||
// return nil, fmt.Errorf("found multiple InternetGatewayAttachments to VPC")
|
||||
// }
|
||||
// igw := response.InternetGateways[0]
|
||||
// return igw, nil
|
||||
//}
|
||||
|
||||
//func findRouteTable(cloud awsup.AWSCloud, subnetID string) (*ec2.RouteTable, error) {
|
||||
// request := &ec2.DescribeRouteTablesInput{
|
||||
// Filters: []*ec2.Filter{fi.NewEC2Filter("association.subnet-id", subnetID)},
|
||||
// }
|
||||
//
|
||||
// response, err := cloud.EC2.DescribeRouteTables(request)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error listing RouteTables: %v", err)
|
||||
// }
|
||||
// if response == nil || len(response.RouteTables) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//
|
||||
// if len(response.RouteTables) != 1 {
|
||||
// return nil, fmt.Errorf("found multiple RouteTables matching tags")
|
||||
// }
|
||||
// rt := response.RouteTables[0]
|
||||
// return rt, nil
|
||||
//}
|
||||
//
|
||||
//func findElasticIP(cloud awsup.AWSCloud, publicIP string) (*ec2.Address, error) {
|
||||
// request := &ec2.DescribeAddressesInput{
|
||||
// PublicIps: []*string{&publicIP},
|
||||
// }
|
||||
//
|
||||
// response, err := cloud.EC2.DescribeAddresses(request)
|
||||
// if err != nil {
|
||||
// if awsErr, ok := err.(awserr.Error); ok {
|
||||
// if awsErr.Code() == "InvalidAddress.NotFound" {
|
||||
// return nil, nil
|
||||
// }
|
||||
// }
|
||||
// return nil, fmt.Errorf("error listing Addresses: %v", err)
|
||||
// }
|
||||
// if response == nil || len(response.Addresses) == 0 {
|
||||
// return nil, nil
|
||||
// }
|
||||
//
|
||||
// if len(response.Addresses) != 1 {
|
||||
// return nil, fmt.Errorf("found multiple Addresses matching IP %q", publicIP)
|
||||
// }
|
||||
// return response.Addresses[0], nil
|
||||
//}
|
||||
|
||||
func findInstances(c awsup.AWSCloud) ([]*ec2.Instance, error) {
|
||||
filters := awsresources.BuildEC2Filters(c)
|
||||
|
||||
request := &ec2.DescribeInstancesInput{
|
||||
Filters: filters,
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Querying EC2 instances")
|
||||
|
||||
var instances []*ec2.Instance
|
||||
|
||||
err := c.EC2().DescribeInstancesPages(request, func(p *ec2.DescribeInstancesOutput, lastPage bool) bool {
|
||||
for _, reservation := range p.Reservations {
|
||||
instances = append(instances, reservation.Instances...)
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error describing instances: %v", err)
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
//func GetMetadata(t *NodeSSH, key string) (string, error) {
|
||||
// b, err := t.exec("curl -s http://169.254.169.254/latest/meta-data/" + key)
|
||||
// if err != nil {
|
||||
// return "", fmt.Errorf("error querying for metadata %q: %v", key, err)
|
||||
// }
|
||||
// return string(b), nil
|
||||
//}
|
||||
//
|
||||
//func InstanceType(t *NodeSSH) (string, error) {
|
||||
// return GetMetadata(t, "instance-type")
|
||||
//}
|
||||
//
|
||||
//func GetMetadataList(t *NodeSSH, key string) ([]string, error) {
|
||||
// d, err := GetMetadata(t, key)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// var macs []string
|
||||
// for _, line := range strings.Split(d, "\n") {
|
||||
// mac := line
|
||||
// mac = strings.Trim(mac, "/")
|
||||
// mac = strings.TrimSpace(mac)
|
||||
// if mac == "" {
|
||||
// continue
|
||||
// }
|
||||
// macs = append(macs, mac)
|
||||
// }
|
||||
//
|
||||
// return macs, nil
|
||||
//}
|
||||
|
||||
// Fetch instance UserData
|
||||
func GetInstanceUserData(cloud awsup.AWSCloud, instanceID string) ([]byte, error) {
|
||||
request := &ec2.DescribeInstanceAttributeInput{}
|
||||
request.InstanceId = aws.String(instanceID)
|
||||
request.Attribute = aws.String("userData")
|
||||
response, err := cloud.EC2().DescribeInstanceAttribute(request)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error querying EC2 for user metadata for instance %q: %v", instanceID, err)
|
||||
}
|
||||
if response.UserData != nil {
|
||||
b, err := base64.StdEncoding.DecodeString(aws.StringValue(response.UserData.Value))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding EC2 UserData: %v", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type UserDataConfiguration struct {
|
||||
Version string
|
||||
Settings map[string]string
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseBool(key string) *bool {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
s = strings.ToLower(s)
|
||||
if s == "true" || s == "1" || s == "y" || s == "yes" || s == "t" {
|
||||
return fi.Bool(true)
|
||||
}
|
||||
return fi.Bool(false)
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseCert(key string) (*pki.Certificate, error) {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding base64 certificate %q: %v", key, err)
|
||||
}
|
||||
cert, err := pki.ParsePEMCertificate(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing certificate %q: %v", key, err)
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
func (u *UserDataConfiguration) ParseKey(key string) (*pki.PrivateKey, error) {
|
||||
s := u.Settings[key]
|
||||
if s == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decoding base64 private key %q: %v", key, err)
|
||||
}
|
||||
k, err := pki.ParsePEMPrivateKey(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing private key %q: %v", key, err)
|
||||
}
|
||||
|
||||
return k, nil
|
||||
}
|
||||
|
||||
func ParseUserDataConfiguration(raw []byte) (*UserDataConfiguration, error) {
|
||||
userData, err := UserDataToString(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
settings := make(map[string]string)
|
||||
|
||||
version := ""
|
||||
if strings.Contains(userData, "install-salt master") || strings.Contains(userData, "dpkg -s salt-minion") {
|
||||
version = "1.1"
|
||||
} else {
|
||||
version = "1.2"
|
||||
}
|
||||
if version == "1.1" {
|
||||
for _, line := range strings.Split(userData, "\n") {
|
||||
if !strings.HasPrefix(line, "readonly ") {
|
||||
continue
|
||||
}
|
||||
line = line[9:]
|
||||
sep := strings.Index(line, "=")
|
||||
k := ""
|
||||
v := ""
|
||||
if sep != -1 {
|
||||
k = line[0:sep]
|
||||
v = line[sep+1:]
|
||||
}
|
||||
|
||||
if k == "" {
|
||||
klog.V(4).Infof("Unknown line: %s", line)
|
||||
}
|
||||
|
||||
if len(v) >= 2 && v[0] == '\'' && v[len(v)-1] == '\'' {
|
||||
v = v[1 : len(v)-1]
|
||||
}
|
||||
settings[k] = v
|
||||
}
|
||||
} else {
|
||||
for _, line := range strings.Split(userData, "\n") {
|
||||
sep := strings.Index(line, ": ")
|
||||
k := ""
|
||||
v := ""
|
||||
if sep != -1 {
|
||||
k = line[0:sep]
|
||||
v = line[sep+2:]
|
||||
}
|
||||
|
||||
if k == "" {
|
||||
klog.V(4).Infof("Unknown line: %s", line)
|
||||
}
|
||||
|
||||
if len(v) >= 2 && v[0] == '"' && v[len(v)-1] == '"' {
|
||||
v = v[1 : len(v)-1]
|
||||
} else if len(v) >= 2 && v[0] == '\'' && v[len(v)-1] == '\'' {
|
||||
v = v[1 : len(v)-1]
|
||||
}
|
||||
settings[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
c := &UserDataConfiguration{
|
||||
Version: version,
|
||||
Settings: settings,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func UserDataToString(userData []byte) (string, error) {
|
||||
var err error
|
||||
if len(userData) > 2 && userData[0] == 31 && userData[1] == 139 {
|
||||
// GZIP
|
||||
klog.V(2).Infof("gzip data detected; will decompress")
|
||||
|
||||
userData, err = gunzipBytes(userData)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error decompressing user data: %v", err)
|
||||
}
|
||||
}
|
||||
return string(userData), nil
|
||||
}
|
||||
|
||||
func gunzipBytes(d []byte) ([]byte, error) {
|
||||
var out bytes.Buffer
|
||||
in := bytes.NewReader(d)
|
||||
r, err := gzip.NewReader(in)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error building gunzip reader: %v", err)
|
||||
}
|
||||
defer r.Close()
|
||||
_, err = io.Copy(&out, r)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error decompressing data: %v", err)
|
||||
}
|
||||
return out.Bytes(), nil
|
||||
}
|
|
@ -20,12 +20,10 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kops/pkg/kubeconfig"
|
||||
)
|
||||
|
@ -34,33 +32,6 @@ type Kubectl struct {
|
|||
KubectlPath string
|
||||
}
|
||||
|
||||
//func (k *Kubectl) GetCurrentContext() (string, error) {
|
||||
// s, err := k.execKubectl("config", "current-context")
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
// s = strings.TrimSpace(s)
|
||||
// return s, nil
|
||||
//}
|
||||
|
||||
func (k *Kubectl) GetCurrentContext() (string, error) {
|
||||
pathOptions := clientcmd.NewDefaultPathOptions()
|
||||
|
||||
config, err := pathOptions.GetStartingConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return config.CurrentContext, nil
|
||||
|
||||
//s, err := k.execKubectl("config", "current-context")
|
||||
//if err != nil {
|
||||
// return "", err
|
||||
//}
|
||||
//s = strings.TrimSpace(s)
|
||||
//return s, nil
|
||||
}
|
||||
|
||||
func (k *Kubectl) GetConfig(minify bool) (*kubeconfig.KubectlConfig, error) {
|
||||
output := "json"
|
||||
// TODO: --context doesn't seem to work
|
||||
|
@ -91,28 +62,6 @@ func (k *Kubectl) GetConfig(minify bool) (*kubeconfig.KubectlConfig, error) {
|
|||
return config, nil
|
||||
}
|
||||
|
||||
// Apply calls kubectl apply to apply the manifest.
|
||||
// We will likely in future change this to create things directly (or more likely embed this logic into kubectl itself)
|
||||
func (k *Kubectl) Apply(context string, data []byte) error {
|
||||
localManifestFile, err := ioutil.TempFile("", "manifest")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error creating temp file: %v", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := os.Remove(localManifestFile.Name()); err != nil {
|
||||
klog.Warningf("error deleting temp file %q: %v", localManifestFile.Name(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ioutil.WriteFile(localManifestFile.Name(), data, 0600); err != nil {
|
||||
return fmt.Errorf("error writing temp file: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = k.execKubectl("apply", "--context", context, "-f", localManifestFile.Name())
|
||||
return err
|
||||
}
|
||||
|
||||
func (k *Kubectl) execKubectl(args ...string) (string, string, error) {
|
||||
kubectlPath := k.KubectlPath
|
||||
if kubectlPath == "" {
|
||||
|
|
|
@ -1,96 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
"k8s.io/kops/util/pkg/vfs"
|
||||
)
|
||||
|
||||
type NodeSSH struct {
|
||||
Hostname string
|
||||
SSHConfig ssh.ClientConfig
|
||||
sshClient *ssh.Client
|
||||
}
|
||||
|
||||
func (m *NodeSSH) Root() (*vfs.SSHPath, error) {
|
||||
client, err := m.GetSSHClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sudo := true
|
||||
return vfs.NewSSHPath(client, m.Hostname, "/", sudo), nil
|
||||
}
|
||||
|
||||
func (m *NodeSSH) dial() (*ssh.Client, error) {
|
||||
users := []string{"admin", "ubuntu"}
|
||||
if m.SSHConfig.User != "" {
|
||||
users = []string{m.SSHConfig.User}
|
||||
}
|
||||
|
||||
var lastError error
|
||||
for _, user := range users {
|
||||
m.SSHConfig.User = user
|
||||
sshClient, err := ssh.Dial("tcp", m.Hostname+":22", &m.SSHConfig)
|
||||
if err == nil {
|
||||
return sshClient, err
|
||||
}
|
||||
lastError = err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("error connecting to SSH on server %q: %v", m.Hostname, lastError)
|
||||
}
|
||||
|
||||
func (m *NodeSSH) GetSSHClient() (*ssh.Client, error) {
|
||||
if m.sshClient == nil {
|
||||
sshClient, err := m.dial()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sshClient = sshClient
|
||||
}
|
||||
return m.sshClient, nil
|
||||
}
|
||||
|
||||
//func (m *NodeSSH) ReadFile(remotePath string) ([]byte, error) {
|
||||
// b, err := m.exec("sudo cat " + remotePath)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error reading remote file %q: %v", remotePath, err)
|
||||
// }
|
||||
// return b, nil
|
||||
//}
|
||||
|
||||
//func (m *NodeSSH) exec(cmd string) ([]byte, error) {
|
||||
// client, err := m.GetSSHClient()
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
//
|
||||
// session, err := client.NewSession()
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error creating SSH session: %v", err)
|
||||
// }
|
||||
// defer session.Close()
|
||||
//
|
||||
// b, err := session.Output(cmd)
|
||||
// if err != nil {
|
||||
// return nil, fmt.Errorf("error executing command %q: %v", cmd, err)
|
||||
// }
|
||||
// return b, nil
|
||||
//}
|
Loading…
Reference in New Issue