Merge pull request #75 from omkarkhatavkar/migration_suite_2
adding the backup-restore migration scenario with RKE2 downstream cluster support
This commit is contained in:
commit
8106997100
|
|
@ -47,3 +47,5 @@ install_rancher.sh.bk
|
|||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
||||
tests/backuprestore/migration/restore-migration.yaml
|
||||
resources/terraform/modules/ec2/encryption-provider-config.yaml
|
||||
|
|
|
|||
|
|
@ -43,3 +43,10 @@ repos:
|
|||
entry: go mod tidy
|
||||
files: ^go\.mod$|^go\.sum$
|
||||
stages: [manual]
|
||||
|
||||
- repo: https://github.com/antonbabenko/pre-commit-terraform
|
||||
rev: v1.77.1 # Check for latest tag at https://github.com/antonbabenko/pre-commit-terraform/releases
|
||||
hooks:
|
||||
- id: terraform_fmt # Formats Terraform code using terraform fmt -recursive
|
||||
- id: terraform_validate # Validates Terraform configuration
|
||||
- id: terraform_docs # Updates README.md with module docs (if using modules)
|
||||
|
|
|
|||
|
|
@ -267,8 +267,8 @@ func CreateRKE2Cluster(rancherClient *rancher.Client, cloudCredentialName string
|
|||
}
|
||||
err = VerifyCluster(rancherClient, config.ClusterSpec.Metadata.Name)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("cluster %s is now Active", config.ClusterSpec.Metadata.Name)
|
||||
return "nil", err
|
||||
err := fmt.Errorf("cluster %s is not Active", config.ClusterSpec.Metadata.Name)
|
||||
return config.ClusterSpec.Metadata.Name, err
|
||||
}
|
||||
return config.ClusterSpec.Metadata.Name, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
package rancher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/rancher/observability-e2e/tests/helper/helm"
|
||||
)
|
||||
|
||||
// AddRancherHelmRepo adds the Rancher Helm repository and updates it.
|
||||
func AddRancherHelmRepo(kubeconfig, helmRepoURL, repoName string) error {
|
||||
|
||||
e2e.Logf("Adding Helm repo: %s -> %s", repoName, helmRepoURL)
|
||||
_, err := helm.Execute(kubeconfig, "repo", "add", repoName, helmRepoURL)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add helm repo: %w", err)
|
||||
}
|
||||
|
||||
e2e.Logf("Updating Helm repos...")
|
||||
_, err = helm.Execute(kubeconfig, "repo", "update")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update helm repos: %w", err)
|
||||
}
|
||||
|
||||
e2e.Logf("Helm repo added and updated successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// InstallRancher installs Rancher based on the repo URL and version
|
||||
func InstallRancher(kubeconfig, helmRepoURL, rancherVersion, hostname, password string) error {
|
||||
repoName := fmt.Sprintf("rancher-%d", time.Now().Unix())
|
||||
if err := AddRancherHelmRepo(kubeconfig, helmRepoURL, repoName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
namespace := "cattle-system"
|
||||
chart := fmt.Sprintf("%s/rancher", repoName)
|
||||
version := strings.TrimPrefix(rancherVersion, "v")
|
||||
|
||||
commonArgs := []string{
|
||||
"install", "rancher", chart,
|
||||
"--namespace", namespace,
|
||||
"--version", version,
|
||||
"--set", fmt.Sprintf("hostname=%s", hostname),
|
||||
"--set", "replicas=2",
|
||||
"--set", fmt.Sprintf("bootstrapPassword=%s", password),
|
||||
"--set", "global.cattle.psp.enabled=false",
|
||||
"--set", "insecure=true",
|
||||
"--wait",
|
||||
"--timeout=10m",
|
||||
"--create-namespace",
|
||||
"--devel",
|
||||
}
|
||||
|
||||
if strings.Contains(helmRepoURL, "releases.rancher.com") {
|
||||
e2e.Logf("Installing Rancher using official release chart...")
|
||||
} else {
|
||||
e2e.Logf("Installing Rancher using SUSE private registry chart...")
|
||||
extraArgs := []string{
|
||||
"--set", fmt.Sprintf("rancherImageTag=%s", rancherVersion),
|
||||
"--set", "rancherImage=stgregistry.suse.com/rancher/rancher",
|
||||
"--set", "rancherImagePullPolicy=Always",
|
||||
"--set", "extraEnv[0].name=CATTLE_AGENT_IMAGE",
|
||||
"--set", fmt.Sprintf("extraEnv[0].value=stgregistry.suse.com/rancher/rancher-agent:%s", rancherVersion),
|
||||
}
|
||||
commonArgs = append(commonArgs, extraArgs...)
|
||||
}
|
||||
|
||||
output, err := helm.Execute(kubeconfig, commonArgs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("helm install failed: %w\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
e2e.Logf("Rancher installed successfully: %s", output)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -23,10 +23,14 @@ module "ec2" {
|
|||
vpc_id = module.vpc.vpc_id
|
||||
security_group_id = module.vpc.security_group_id
|
||||
private_key_path = var.private_key_path
|
||||
preserve_eip = false
|
||||
preserve_eip = var.preserve_eip
|
||||
rke2_version = var.rke2_version
|
||||
cert_manager_version = var.cert_manager_version
|
||||
encryption_secret_key = var.encryption_secret_key
|
||||
input_cluster_config = var.input_cluster_config
|
||||
cattle_config = var.cattle_config
|
||||
rancher_password = var.rancher_password
|
||||
rancher_version = var.rancher_version
|
||||
rancher_repo_url = var.rancher_repo_url
|
||||
install_rancher = var.install_rancher
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
output "ec2_public_ip" {
|
||||
value = module.ec2.public_ip
|
||||
}
|
||||
|
||||
output "vpc_id" {
|
||||
value = module.vpc.vpc_id
|
||||
}
|
||||
|
||||
output "subnet_id" {
|
||||
value = module.vpc.subnet_id
|
||||
}
|
||||
|
||||
output "s3_bucket_name" {
|
||||
value = module.s3.s3_bucket_name
|
||||
}
|
||||
|
|
@ -1,10 +1,12 @@
|
|||
prefix = "auto-backup-restore-test"
|
||||
aws_region_instance = "us-east-2"
|
||||
aws_region_s3 = "us-east-2"
|
||||
vpc_cidr = "10.0.0.0/16"
|
||||
subnet_cidr = "10.0.0.0/24"
|
||||
aws_zone = "us-east-2a"
|
||||
ami_id = "ami-00eb69d236edcfaf8"
|
||||
instance_type = "t2.2xlarge"
|
||||
private_key_path = "~/.ssh/id_rsa"
|
||||
root_volume_size = 60
|
||||
prefix = "auto-backup-restore-test"
|
||||
aws_region_instance = "us-east-2"
|
||||
aws_region_s3 = "us-east-2"
|
||||
vpc_cidr = "10.0.0.0/16"
|
||||
subnet_cidr = "10.0.0.0/24"
|
||||
aws_zone = "us-east-2a"
|
||||
ami_id = "ami-00eb69d236edcfaf8"
|
||||
instance_type = "t2.2xlarge"
|
||||
private_key_path = "~/.ssh/id_rsa"
|
||||
root_volume_size = 60
|
||||
cattle_config = "./../../../cattle-config.yaml"
|
||||
input_cluster_config = "./../../../tests/helper/yamls/inputClusterConfig.yaml"
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ variable "key_name" {
|
|||
description = "Key pair name for EC2"
|
||||
type = string
|
||||
default = "test"
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable "private_key_path" {
|
||||
|
|
@ -66,7 +67,7 @@ variable "prefix" {
|
|||
variable "rke2_version" {
|
||||
description = "RKE2 version to install"
|
||||
type = string
|
||||
default = "v1.32.2+rke2r1"
|
||||
default = "v1.32.5+rke2r1"
|
||||
}
|
||||
|
||||
variable "cert_manager_version" {
|
||||
|
|
@ -96,5 +97,24 @@ variable "input_cluster_config" {
|
|||
variable "preserve_eip" {
|
||||
description = "create the static eip and attach that to instance for migration scenario"
|
||||
type = bool
|
||||
default = false
|
||||
default = true
|
||||
}
|
||||
variable "rancher_version" {
|
||||
description = "version of rancher under test"
|
||||
}
|
||||
|
||||
variable "rancher_password" {
|
||||
description = "Bootstrap password for Rancher"
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
variable "rancher_repo_url" {
|
||||
description = "Helm repository URL to install Rancher"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "install_rancher" {
|
||||
type = bool
|
||||
default = true
|
||||
description = "Whether to install Rancher after installing RKE2"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,9 +3,11 @@ kind: EncryptionConfiguration
|
|||
resources:
|
||||
- resources:
|
||||
- secrets
|
||||
- configmaps
|
||||
providers:
|
||||
- aescbc:
|
||||
keys:
|
||||
- name: key1
|
||||
secret: "${encryption_secret_key}"
|
||||
- identity: {}
|
||||
- identity: {} # this fallback allows reading unencrypted secrets;
|
||||
# for example, during initial migration
|
||||
|
|
|
|||
|
|
@ -0,0 +1,104 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
# Positional args
|
||||
|
||||
RANCHER_VERSION="${1}"
|
||||
RANCHER_PASSWORD="${2}"
|
||||
HELM_REPO_URL="${3}"
|
||||
INSTALL_RANCHER="${4:-true}" # Default to true if not provided
|
||||
|
||||
if [[ "$INSTALL_RANCHER" != "true" ]]; then
|
||||
echo "⏭️ Skipping Rancher installation because INSTALL_RANCHER=$INSTALL_RANCHER"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Proceed with Rancher installation
|
||||
echo "📦 Installing Rancher version: $RANCHER_VERSION"
|
||||
echo "🔐 Using password: [REDACTED]"
|
||||
echo "📦 Helm repo: $HELM_REPO_URL"
|
||||
|
||||
# Add Helm repo for Rancher
|
||||
helm repo add rancher "$HELM_REPO_URL"
|
||||
helm repo update
|
||||
|
||||
kubectl create namespace cattle-system || true
|
||||
|
||||
# Get public IP and set hostname
|
||||
PUBLIC_IP=$(curl -s ifconfig.me)
|
||||
RANCHER_HOSTNAME="rancher.${PUBLIC_IP}.sslip.io"
|
||||
|
||||
# Install Rancher
|
||||
if echo "$HELM_REPO_URL" | grep -q "releases.rancher.com"; then
|
||||
echo "📦 Installing Rancher using official release chart..."
|
||||
helm install rancher rancher/rancher --namespace cattle-system \
|
||||
--version "$(echo "$RANCHER_VERSION" | tr -d 'v')" \
|
||||
--set hostname=$RANCHER_HOSTNAME \
|
||||
--set replicas=2 \
|
||||
--set bootstrapPassword=$RANCHER_PASSWORD \
|
||||
--set global.cattle.psp.enabled=false \
|
||||
--set insecure=true \
|
||||
--wait \
|
||||
--timeout=10m \
|
||||
--create-namespace \
|
||||
--devel
|
||||
else
|
||||
echo "📦 Installing Rancher using SUSE private registry chart..."
|
||||
helm install rancher rancher/rancher --namespace cattle-system \
|
||||
--version "$(echo "$RANCHER_VERSION" | tr -d 'v')" \
|
||||
--set hostname=$RANCHER_HOSTNAME \
|
||||
--set replicas=2 \
|
||||
--set bootstrapPassword="$RANCHER_PASSWORD" \
|
||||
--set global.cattle.psp.enabled=false \
|
||||
--set insecure=true \
|
||||
--set rancherImageTag="$RANCHER_VERSION" \
|
||||
--set rancherImage='stgregistry.suse.com/rancher/rancher' \
|
||||
--set rancherImagePullPolicy=Always \
|
||||
--set extraEnv[0].name=CATTLE_AGENT_IMAGE \
|
||||
--set extraEnv[0].value="stgregistry.suse.com/rancher/rancher-agent:$RANCHER_VERSION" \
|
||||
--wait \
|
||||
--timeout=10m \
|
||||
--create-namespace \
|
||||
--devel
|
||||
fi
|
||||
|
||||
# Wait for Rancher to start
|
||||
sleep 180
|
||||
|
||||
# Post-install setup
|
||||
RANCHER_URL="https://${RANCHER_HOSTNAME}"
|
||||
echo "::add-mask::$RANCHER_PASSWORD"
|
||||
|
||||
LOGIN_RESPONSE=$(curl --silent -X POST -H 'Content-Type: application/json' \
|
||||
-d "{\"username\":\"admin\",\"password\":\"${RANCHER_PASSWORD}\"}" \
|
||||
"${RANCHER_URL}/v3-public/localProviders/local?action=login" \
|
||||
--insecure)
|
||||
|
||||
TOKEN=$(echo "$LOGIN_RESPONSE" | jq -r .token)
|
||||
echo "::add-mask::$TOKEN"
|
||||
|
||||
if [[ -z "$TOKEN" || "$TOKEN" == "null" ]]; then
|
||||
echo "❌ Failed to login with admin password" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Accept telemetry
|
||||
curl --silent -X PUT -H "Authorization: Bearer $TOKEN" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"name":"telemetry-opt","value":"out"}' \
|
||||
"${RANCHER_URL}/v3/settings/telemetry-opt" --insecure
|
||||
|
||||
# Mark first login complete
|
||||
curl --silent -X PUT -H "Authorization: Bearer $TOKEN" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"value":"false"}' \
|
||||
"${RANCHER_URL}/v3/settings/first-login" --insecure
|
||||
|
||||
# Set Rancher server URL
|
||||
curl --silent -X PUT -H "Authorization: Bearer $TOKEN" \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"name\":\"server-url\",\"value\":\"${RANCHER_URL}\"}" \
|
||||
"${RANCHER_URL}/v3/settings/server-url" --insecure
|
||||
|
||||
echo "✅ Rancher installation and configuration complete."
|
||||
|
|
@ -1,47 +1,57 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Define default values
|
||||
DEFAULT_RKE2_VERSION="v1.32.2+rke2r1"
|
||||
DEFAULT_CERT_MANAGER_VERSION="v1.15.3"
|
||||
DEFAULT_HELM_REPO_NAME="rancher"
|
||||
DEFAULT_HELM_REPO_URL="https://releases.rancher.com/server-charts/latest"
|
||||
set -euxo pipefail
|
||||
|
||||
# Get inputs or use defaults
|
||||
RKE2_VERSION="${1:-$DEFAULT_RKE2_VERSION}"
|
||||
CERT_MANAGER_VERSION="${2:-$DEFAULT_CERT_MANAGER_VERSION}"
|
||||
HELM_REPO_URL="${3:-$DEFAULT_HELM_REPO_URL}"
|
||||
RKE2_VERSION="${1}"
|
||||
CERT_MANAGER_VERSION="${2}"
|
||||
HELM_REPO_URL="${3}"
|
||||
|
||||
echo "🚀 Installing RKE2 version: $RKE2_VERSION"
|
||||
echo "🔐 Installing Cert Manager version: $CERT_MANAGER_VERSION"
|
||||
echo "📦 Using Helm repo URL: $HELM_REPO_URL"
|
||||
|
||||
sudo apt-get update -qq && sudo apt-get install -y -qq jq curl
|
||||
sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 && sudo chmod +x /usr/local/bin/yq
|
||||
|
||||
|
||||
# Install RKE2
|
||||
curl -sfL https://get.rke2.io | INSTALL_RKE2_VERSION=$RKE2_VERSION sh -
|
||||
systemctl enable --now rke2-server.service
|
||||
systemctl restart rke2-server
|
||||
sudo systemctl enable --now rke2-server.service
|
||||
sudo systemctl restart rke2-server
|
||||
|
||||
# Configure kubectl
|
||||
# Wait a bit to ensure RKE2 starts up and generates kubeconfig
|
||||
sleep 10
|
||||
|
||||
# Give permissions so Terraform can copy it
|
||||
cat /etc/rancher/rke2/rke2.yaml
|
||||
cp /etc/rancher/rke2/rke2.yaml /tmp/
|
||||
sudo chown ubuntu:ubuntu /tmp/rke2.yaml
|
||||
|
||||
### 🔧 Patch kubeconfig with external IP
|
||||
EXTERNAL_IP=$(curl -s ifconfig.me)
|
||||
sudo sed -i "s/127.0.0.1/${EXTERNAL_IP}/" /tmp/rke2.yaml
|
||||
yq e '.clusters[].cluster |= {"server": .server, "insecure-skip-tls-verify": true}' -i /tmp/rke2.yaml
|
||||
|
||||
# Configure kubectl for current user (ubuntu)
|
||||
mkdir -p ~/.kube
|
||||
ln -sf /etc/rancher/rke2/rke2.yaml ~/.kube/config
|
||||
ln -sf /var/lib/rancher/rke2/bin/kubectl /usr/local/bin/
|
||||
ln -sf /var/lib/rancher/rke2/bin/kubectl /usr/local/bin/kubectl
|
||||
|
||||
# Install Helm
|
||||
echo "📦 Installing Helm..."
|
||||
curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3
|
||||
chmod 700 get_helm.sh
|
||||
chmod +x get_helm.sh
|
||||
./get_helm.sh
|
||||
rm -f get_helm.sh
|
||||
|
||||
# Add Rancher Helm repo (with default name 'rancher')
|
||||
echo "📌 Adding Helm repo '$DEFAULT_HELM_REPO_NAME' -> $HELM_REPO_URL"
|
||||
helm repo add "$DEFAULT_HELM_REPO_NAME" "$HELM_REPO_URL"
|
||||
# Add cert-manager repo and install
|
||||
helm repo add jetstack https://charts.jetstack.io
|
||||
helm repo update
|
||||
|
||||
# Install Cert Manager
|
||||
echo "🔧 Installing Cert Manager version: $CERT_MANAGER_VERSION"
|
||||
# Add Helm repo for Rancher
|
||||
helm repo add rancher "$HELM_REPO_URL"
|
||||
helm repo update
|
||||
|
||||
# Install cert-manager
|
||||
kubectl apply -f "https://github.com/cert-manager/cert-manager/releases/download/$CERT_MANAGER_VERSION/cert-manager.yaml"
|
||||
|
||||
# Create Rancher namespace
|
||||
kubectl create namespace cattle-system
|
||||
|
||||
echo "✅ Installation complete! RKE2 and Rancher Helm repo is set up."
|
||||
echo "✅ RKE2 and Cert Manager installed. Wait ~60 seconds before installing Rancher."
|
||||
sleep 60
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ locals {
|
|||
resource "null_resource" "provision_rke2" {
|
||||
depends_on = [aws_instance.rke2_node]
|
||||
|
||||
# Transfer install_rke2.sh
|
||||
provisioner "file" {
|
||||
source = "${path.module}/install_rke2.sh"
|
||||
destination = "/home/ubuntu/install_rke2.sh"
|
||||
|
|
@ -58,6 +59,19 @@ resource "null_resource" "provision_rke2" {
|
|||
}
|
||||
}
|
||||
|
||||
# Transfer install_rancher.sh
|
||||
provisioner "file" {
|
||||
source = "${path.module}/install_rancher.sh"
|
||||
destination = "/home/ubuntu/install_rancher.sh"
|
||||
|
||||
connection {
|
||||
type = "ssh"
|
||||
user = "ubuntu"
|
||||
private_key = file(var.private_key_path)
|
||||
host = local.rke2_host_ip
|
||||
}
|
||||
}
|
||||
|
||||
provisioner "file" {
|
||||
source = local_file.encrypted_config.filename
|
||||
destination = "/home/ubuntu/encryption-provider-config.yaml"
|
||||
|
|
@ -70,10 +84,12 @@ resource "null_resource" "provision_rke2" {
|
|||
}
|
||||
}
|
||||
|
||||
# Run scripts sequentially
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"chmod +x /home/ubuntu/install_rke2.sh",
|
||||
"sudo /home/ubuntu/install_rke2.sh ${var.rke2_version} ${var.cert_manager_version}"
|
||||
"chmod +x /home/ubuntu/install_rke2.sh /home/ubuntu/install_rancher.sh",
|
||||
"sudo -i bash /home/ubuntu/install_rke2.sh '${var.rke2_version}' '${var.cert_manager_version}' '${var.rancher_repo_url}'",
|
||||
"sudo -i bash /home/ubuntu/install_rancher.sh '${var.rancher_version}' '${var.rancher_password}' '${var.rancher_repo_url}' '${var.install_rancher}'"
|
||||
]
|
||||
|
||||
connection {
|
||||
|
|
@ -85,6 +101,27 @@ resource "null_resource" "provision_rke2" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "copy_kubeconfig" {
|
||||
depends_on = [null_resource.provision_rke2]
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = "scp -i ${var.private_key_path} -o StrictHostKeyChecking=no ubuntu@${local.rke2_host_ip}:/tmp/rke2.yaml ./rke2-kubeconfig.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "move_kubeconfig_local" {
|
||||
depends_on = [null_resource.copy_kubeconfig]
|
||||
|
||||
provisioner "local-exec" {
|
||||
command = <<EOT
|
||||
mkdir -p ~/.kube
|
||||
mv ./rke2-kubeconfig.yaml ~/.kube/config
|
||||
chmod 600 ~/.kube/config
|
||||
echo "✅ kubeconfig placed at ~/.kube/config"
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "update_yaml" {
|
||||
provisioner "local-exec" {
|
||||
command = <<EOT
|
||||
|
|
|
|||
|
|
@ -9,10 +9,23 @@ variable "security_group_id" {}
|
|||
variable "private_key_path" {}
|
||||
variable "preserve_eip" {
|
||||
type = bool
|
||||
default = false
|
||||
default = true
|
||||
}
|
||||
variable "rke2_version" {}
|
||||
variable "cert_manager_version" {}
|
||||
variable "encryption_secret_key" {}
|
||||
variable "input_cluster_config" {}
|
||||
variable "cattle_config" {}
|
||||
variable "rancher_version" {
|
||||
}
|
||||
|
||||
variable "rancher_password" {
|
||||
description = "Bootstrap password for Rancher"
|
||||
type = string
|
||||
sensitive = false
|
||||
}
|
||||
variable "rancher_repo_url" {
|
||||
}
|
||||
|
||||
variable "install_rancher" {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,6 +74,13 @@ resource "aws_security_group" "rancher_sg_allowall" {
|
|||
cidr_blocks = [local.my_ip_cidr]
|
||||
description = "Allow SSH access"
|
||||
}
|
||||
ingress {
|
||||
from_port = 6443
|
||||
to_port = 6443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = [local.my_ip_cidr]
|
||||
description = "Allow Kubernetes API access"
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 443
|
||||
|
|
|
|||
|
|
@ -80,6 +80,12 @@ var _ = DescribeTable("Test: Rancher inplace backup and restore test.",
|
|||
e2e.Logf("%v, %v, %v", userList, projList, roleList)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
By("Delete the downstream clusters as part of cleanup")
|
||||
err = resources.DeleteCluster(client, clusterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
if params.CreateCluster == true {
|
||||
By("Provisioning a downstream RKE2 cluster...")
|
||||
clusterName, err = resources.CreateRKE2Cluster(clientWithSession, CloudCredentialName)
|
||||
|
|
@ -197,15 +203,10 @@ var _ = DescribeTable("Test: Rancher inplace backup and restore test.",
|
|||
} else {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
if params.CreateCluster == true {
|
||||
By("Validating downstream clusters are in an Active status...")
|
||||
err = resources.VerifyCluster(client, clusterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Delete the downstream clusters are in an Active status...")
|
||||
err = resources.DeleteCluster(client, clusterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
},
|
||||
|
||||
|
|
|
|||
|
|
@ -15,12 +15,27 @@ limitations under the License.
|
|||
package migration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rancher/norman/types"
|
||||
"github.com/rancher/observability-e2e/resources"
|
||||
"github.com/rancher/observability-e2e/tests/helper/charts"
|
||||
localConfig "github.com/rancher/observability-e2e/tests/helper/config"
|
||||
localTerraform "github.com/rancher/observability-e2e/tests/helper/terraform"
|
||||
|
||||
terraform "github.com/gruntwork-io/terratest/modules/terraform"
|
||||
"github.com/rancher/observability-e2e/tests/helper/utils"
|
||||
"github.com/rancher/rancher/tests/v2/actions/pipeline"
|
||||
"github.com/rancher/shepherd/clients/rancher"
|
||||
management "github.com/rancher/shepherd/clients/rancher/generated/management/v3"
|
||||
"github.com/rancher/shepherd/extensions/cloudcredentials"
|
||||
"github.com/rancher/shepherd/extensions/cloudcredentials/aws"
|
||||
"github.com/rancher/shepherd/extensions/clusters"
|
||||
"github.com/rancher/shepherd/pkg/config"
|
||||
session "github.com/rancher/shepherd/pkg/session"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
|
@ -29,6 +44,39 @@ import (
|
|||
|
||||
var tfCtx *localTerraform.TerraformContext
|
||||
|
||||
// Sensitive secrets passed via env vars
|
||||
var envSecretsTerraformVarMap = map[string]string{
|
||||
"ENCRYPTION_SECRET_KEY": "encryption_secret_key",
|
||||
"RANCHER_PASSWORD": "rancher_password",
|
||||
}
|
||||
|
||||
// Non-sensitive config passed directly
|
||||
var envTerraformVarMap = map[string]string{
|
||||
"CERT_MANAGER_VERSION": "cert_manager_version",
|
||||
"KEY_NAME": "key_name",
|
||||
"RANCHER_VERSION": "rancher_version",
|
||||
"RKE2_VERSION": "rke2_version",
|
||||
"RANCHER_REPO_URL": "rancher_repo_url",
|
||||
}
|
||||
|
||||
var (
|
||||
client *rancher.Client
|
||||
sess *session.Session
|
||||
project *management.Project
|
||||
cluster *clusters.ClusterMeta
|
||||
registrySetting *management.Setting
|
||||
s3Client *resources.S3Client
|
||||
BackupRestoreConfig *localConfig.BackupRestoreConfig
|
||||
skipS3Tests bool
|
||||
CloudCredentialName string
|
||||
CredentialConfig *cloudcredentials.AmazonEC2CredentialConfig
|
||||
)
|
||||
|
||||
const (
|
||||
exampleAppProjectName = "System"
|
||||
providerName = "aws"
|
||||
)
|
||||
|
||||
func FailWithReport(message string, callerSkip ...int) {
|
||||
// Ensures the correct line numbers are reported
|
||||
Fail(message, callerSkip[0]+1)
|
||||
|
|
@ -51,21 +99,114 @@ func TestE2E(t *testing.T) {
|
|||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
var err error
|
||||
// Set up the Terraform context pointing to your configuration
|
||||
By("Loading Terraform variables from environment")
|
||||
terraformVars := localTerraform.LoadVarsFromEnv(envTerraformVarMap)
|
||||
|
||||
err := localTerraform.SetTerraformEnvVarsFromMap(envSecretsTerraformVarMap)
|
||||
if err != nil {
|
||||
e2e.Logf("Failed to set secret TF_VAR_*: %v", err)
|
||||
}
|
||||
|
||||
By("Creating Terraform context")
|
||||
tfCtx, err = localTerraform.NewTerraformContext(localTerraform.TerraformOptions{
|
||||
// Relative path from the test file location to the Terraform config folder.
|
||||
TerraformDir: "../../../resources/terraform/config/",
|
||||
Vars: terraformVars,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred(), "Failed to create Terraform context")
|
||||
|
||||
// Initialize and apply the Terraform configuration.
|
||||
terraform.InitAndApply(GinkgoT(), tfCtx.Options)
|
||||
By("Initializing and applying Terraform configuration")
|
||||
_, err = tfCtx.InitAndApply()
|
||||
Expect(err).ToNot(HaveOccurred(), "Failed to init/apply Terraform context")
|
||||
|
||||
By("Loading Rancher config and creating admin token")
|
||||
rancherConfig := new(rancher.Config)
|
||||
config.LoadConfig(rancher.ConfigurationFileKey, rancherConfig)
|
||||
token, err := pipeline.CreateAdminToken(os.Getenv("RANCHER_PASSWORD"), rancherConfig)
|
||||
Expect(err).To(BeNil())
|
||||
rancherConfig.AdminToken = token
|
||||
config.UpdateConfig(rancher.ConfigurationFileKey, rancherConfig)
|
||||
|
||||
By("Loading AWS credential config")
|
||||
CredentialConfig = new(cloudcredentials.AmazonEC2CredentialConfig)
|
||||
config.LoadAndUpdateConfig("awsCredentials", CredentialConfig, func() {
|
||||
CredentialConfig.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
CredentialConfig.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
CredentialConfig.DefaultRegion = os.Getenv("DEFAULT_REGION")
|
||||
})
|
||||
|
||||
testSession := session.NewSession()
|
||||
sess = testSession
|
||||
|
||||
By("Creating Rancher client")
|
||||
client, err = rancher.NewClient("", testSession)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Retrieving cluster metadata")
|
||||
clusterName := client.RancherConfig.ClusterName
|
||||
Expect(clusterName).NotTo(BeEmpty(), "Cluster name to install is not set")
|
||||
cluster, err = clusters.NewClusterMeta(client, clusterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Retrieving system-default-registry setting")
|
||||
registrySetting, err = client.Management.Setting.ByID("system-default-registry")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Locating or creating system project")
|
||||
projectsList, err := client.Management.Project.List(&types.ListOpts{
|
||||
Filters: map[string]interface{}{
|
||||
"clusterId": cluster.ID,
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for i := range projectsList.Data {
|
||||
p := &projectsList.Data[i]
|
||||
if p.Name == exampleAppProjectName {
|
||||
project = p
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if project == nil {
|
||||
projectConfig := &management.Project{
|
||||
ClusterID: cluster.ID,
|
||||
Name: exampleAppProjectName,
|
||||
}
|
||||
project, err = client.Management.Project.Create(projectConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(project.Name).To(Equal(exampleAppProjectName))
|
||||
}
|
||||
|
||||
By("Creating AWS cloud credentials")
|
||||
cloudCredentialConfig := cloudcredentials.LoadCloudCredential(providerName)
|
||||
cloudCredential, err := aws.CreateAWSCloudCredentials(client, cloudCredentialConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
CloudCredentialName = strings.Replace(cloudCredential.ID, "/", ":", 1)
|
||||
Expect(CloudCredentialName).To(ContainSubstring("cc"))
|
||||
|
||||
By("Loading backup/restore config and setting dynamic S3 bucket name")
|
||||
BackupRestoreConfig = &localConfig.BackupRestoreConfig{}
|
||||
filePath, _ := filepath.Abs(charts.BackupRestoreConfigurationFileKey)
|
||||
err = utils.LoadConfigIntoStruct(filePath, BackupRestoreConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
BackupRestoreConfig.S3BucketName = fmt.Sprintf("backup-restore-automation-test-%d", time.Now().Unix())
|
||||
|
||||
if BackupRestoreConfig.AccessKey != "" {
|
||||
By("Creating S3 client and S3 bucket")
|
||||
s3Client, err = resources.NewS3Client(BackupRestoreConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = s3Client.CreateBucket(BackupRestoreConfig.S3BucketName, BackupRestoreConfig.S3Region)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
e2e.Logf("S3 bucket '%s' created successfully", BackupRestoreConfig.S3BucketName)
|
||||
} else {
|
||||
skipS3Tests = true
|
||||
}
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
// Tear down the infrastructure after all tests finish.
|
||||
By("Destroying Terraform infrastructure")
|
||||
if tfCtx != nil {
|
||||
terraform.Destroy(GinkgoT(), tfCtx.Options)
|
||||
_, err := tfCtx.DestroyTarget("module.ec2.aws_instance.rke2_node")
|
||||
Expect(err).ToNot(HaveOccurred(), "Failed to Destroy Terraform Resource")
|
||||
}
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,15 +1,287 @@
|
|||
/*
|
||||
Copyright © 2024 - 2025 SUSE LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package migration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
resources "github.com/rancher/observability-e2e/resources/rancher"
|
||||
"github.com/rancher/observability-e2e/tests/helper/charts"
|
||||
"github.com/rancher/observability-e2e/tests/helper/helm"
|
||||
localkubectl "github.com/rancher/observability-e2e/tests/helper/kubectl"
|
||||
"github.com/rancher/observability-e2e/tests/helper/utils"
|
||||
"github.com/rancher/rancher/tests/v2/actions/pipeline"
|
||||
"github.com/rancher/shepherd/clients/rancher"
|
||||
"github.com/rancher/shepherd/clients/rancher/catalog"
|
||||
extencharts "github.com/rancher/shepherd/extensions/charts"
|
||||
"github.com/rancher/shepherd/pkg/config"
|
||||
namegen "github.com/rancher/shepherd/pkg/namegenerator"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
e2e "k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// This currently a sample test and migration test will be added in the next PR, Otherwise it will be pretty big PR to check
|
||||
// ::TODO::
|
||||
var _ = Describe("Rancher Backup and Restore Migration", Label("LEVEL0", "migration"), func() {
|
||||
It("should validate the backup and restore flow", func() {
|
||||
type MigrationParams struct {
|
||||
StorageType string
|
||||
BackupOptions charts.BackupOptions
|
||||
BackupFileExtension string
|
||||
ProvisioningInput charts.ProvisioningConfig
|
||||
Prune bool
|
||||
CreateCluster bool
|
||||
EncryptionConfigFilePath string
|
||||
}
|
||||
|
||||
var clusterName string
|
||||
|
||||
var _ = DescribeTable("Test: Validate the Backup and Restore Migration Scenario from RKE2 to RKE2",
|
||||
func(params MigrationParams) {
|
||||
By("Checking that the Terraform context is valid")
|
||||
Expect(tfCtx).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
var (
|
||||
clientWithSession *rancher.Client
|
||||
err error
|
||||
)
|
||||
By("Creating a client session")
|
||||
clientWithSession, err = client.WithSession(sess)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = charts.SelectResourceSetName(clientWithSession, ¶ms.BackupOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By(fmt.Sprintf("Installing Backup Restore Chart with %s", params.StorageType))
|
||||
|
||||
// Check if the chart is already installed
|
||||
initialBackupRestoreChart, err := extencharts.GetChartStatus(clientWithSession, project.ClusterID, charts.RancherBackupRestoreNamespace, charts.RancherBackupRestoreName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
e2e.Logf("Checking if the backup and restore chart is already installed")
|
||||
if initialBackupRestoreChart.IsAlreadyInstalled {
|
||||
e2e.Logf("Backup and Restore chart is already installed in project: %v", exampleAppProjectName)
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Configuring/Creating required resources for the storage type: %s testing", params.StorageType))
|
||||
secretName, err := charts.CreateStorageResources(params.StorageType, clientWithSession, BackupRestoreConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating two users, projects, and role templates...")
|
||||
userList, projList, roleList, err := resources.CreateRancherResources(clientWithSession, project.ClusterID, "cluster")
|
||||
e2e.Logf("%v, %v, %v", userList, projList, roleList)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
By("Delete the downstream clusters as part of cleanup")
|
||||
err = resources.DeleteCluster(client, clusterName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
if params.CreateCluster == true {
|
||||
By("Provisioning a downstream RKE2 cluster...")
|
||||
clusterName, err = resources.CreateRKE2Cluster(clientWithSession, CloudCredentialName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
DeferCleanup(func() {
|
||||
By(fmt.Sprintf("Deleting required resources used for the storage type: %s testing", params.StorageType))
|
||||
err = charts.DeleteStorageResources(params.StorageType, clientWithSession, BackupRestoreConfig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Get the latest version of the backup restore chart
|
||||
if !initialBackupRestoreChart.IsAlreadyInstalled {
|
||||
latestBackupRestoreVersion, err := clientWithSession.Catalog.GetLatestChartVersion(charts.RancherBackupRestoreName, catalog.RancherChartRepo)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
e2e.Logf("Retrieved latest backup-restore chart version to install: %v", latestBackupRestoreVersion)
|
||||
latestBackupRestoreVersion = utils.GetEnvOrDefault("BACKUP_RESTORE_CHART_VERSION", latestBackupRestoreVersion)
|
||||
backuprestoreInstOpts := &charts.InstallOptions{
|
||||
Cluster: cluster,
|
||||
Version: latestBackupRestoreVersion,
|
||||
ProjectID: project.ID,
|
||||
}
|
||||
|
||||
backuprestoreOpts := &charts.RancherBackupRestoreOpts{
|
||||
VolumeName: BackupRestoreConfig.VolumeName,
|
||||
StorageClassName: BackupRestoreConfig.StorageClassName,
|
||||
BucketName: BackupRestoreConfig.S3BucketName,
|
||||
CredentialSecretName: secretName,
|
||||
CredentialSecretNamespace: BackupRestoreConfig.CredentialSecretNamespace,
|
||||
Enabled: true,
|
||||
Endpoint: BackupRestoreConfig.S3Endpoint,
|
||||
Folder: BackupRestoreConfig.S3FolderName,
|
||||
Region: BackupRestoreConfig.S3Region,
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Installing the version %s for the backup restore", latestBackupRestoreVersion))
|
||||
err = charts.InstallRancherBackupRestoreChart(clientWithSession, backuprestoreInstOpts, backuprestoreOpts, true, params.StorageType)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Waiting for backup-restore chart deployments to have expected replicas")
|
||||
errDeployChan := make(chan error, 1)
|
||||
go func() {
|
||||
err = extencharts.WatchAndWaitDeployments(clientWithSession, project.ClusterID, charts.RancherBackupRestoreNamespace, metav1.ListOptions{})
|
||||
errDeployChan <- err
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-errDeployChan:
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
case <-time.After(2 * time.Minute):
|
||||
e2e.Failf("Timeout waiting for WatchAndWaitDeployments to complete")
|
||||
}
|
||||
}
|
||||
By("Check if the backup needs to be encrypted, if yes create the encryptionconfig secret")
|
||||
if params.BackupOptions.EncryptionConfigSecretName != "" {
|
||||
secretName, err = charts.CreateEncryptionConfigSecret(client.Steve, params.EncryptionConfigFilePath,
|
||||
params.BackupOptions.EncryptionConfigSecretName, charts.RancherBackupRestoreNamespace)
|
||||
if err != nil {
|
||||
e2e.Logf("Error applying encryption config: %v", err)
|
||||
}
|
||||
e2e.Logf("Successfully created encryption config secret: %s", secretName)
|
||||
}
|
||||
|
||||
_, filename, err := charts.CreateRancherBackupAndVerifyCompleted(clientWithSession, params.BackupOptions)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(filename).To(ContainSubstring(params.BackupOptions.Name))
|
||||
Expect(filename).To(ContainSubstring(params.BackupFileExtension))
|
||||
|
||||
By("Validating backup file is present in AWS S3...")
|
||||
s3Location := BackupRestoreConfig.S3BucketName + "/" + BackupRestoreConfig.S3FolderName
|
||||
result, err := s3Client.FileExistsInBucket(s3Location, filename)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(result).To(Equal(true))
|
||||
|
||||
// As we have the backup now I should start doing the cleanup the instance and then migration
|
||||
By("As backup is present we can remove/clean the instance for migration")
|
||||
_, err = tfCtx.DestroyTarget("module.ec2.aws_instance.rke2_node")
|
||||
if err != nil {
|
||||
e2e.Logf("Remove rke2_node destroy failed:")
|
||||
}
|
||||
By("Old server is destroyed, will spin up new machine and start restoring the backup")
|
||||
tfCtx.Options.Vars["install_rancher"] = false
|
||||
_, err = tfCtx.InitAndApply()
|
||||
Expect(err).ToNot(HaveOccurred(), "Failed to spinup the new machine")
|
||||
|
||||
By(fmt.Sprintf("Configuring/Creating required resources for the storage type: %s testing", params.StorageType))
|
||||
_, err = localkubectl.Execute(
|
||||
"create", "secret", "generic", "s3-creds",
|
||||
"--from-literal=accessKey="+CredentialConfig.AccessKey,
|
||||
"--from-literal=secretKey="+CredentialConfig.SecretKey,
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create secret for backup and restore")
|
||||
|
||||
By("Create the cattle-system namespace")
|
||||
createNamespace := []string{"create", "namespace", "cattle-system"}
|
||||
_, err = localkubectl.Execute(createNamespace...)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create namespace")
|
||||
|
||||
// Todo Add the way to fetch the rancher version pass to install it
|
||||
By("Checkout the charts repo based on the rancher upstream version ")
|
||||
rancherVersion := tfCtx.Options.Vars["rancher_version"].(string)
|
||||
branch := "dev-" + strings.Join(strings.Split(rancherVersion, ".")[:2], ".")
|
||||
chartDir, err := charts.DownloadAndExtractRancherCharts(branch)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to download and extract repo")
|
||||
e2e.Logf("Extracted charts directory: %s\n", chartDir)
|
||||
|
||||
backupRestoreChartVersion := os.Getenv("BACKUP_RESTORE_CHART_VERSION")
|
||||
|
||||
By("install the rancher-backup-crd")
|
||||
rancherBackupCrdPath := filepath.Join(chartDir, "charts", "rancher-backup-crd")
|
||||
err = helm.InstallChartFromPath("rancher-backup-crd", rancherBackupCrdPath, backupRestoreChartVersion, charts.RancherBackupRestoreNamespace)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to install the rancher-backup-crd")
|
||||
|
||||
By("install the rancher-backup")
|
||||
rancherBackupPath := filepath.Join(chartDir, "charts", "rancher-backup")
|
||||
err = helm.InstallChartFromPath("rancher-backup", rancherBackupPath, backupRestoreChartVersion, charts.RancherBackupRestoreNamespace)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to install the rancher-backup-crd")
|
||||
|
||||
_, err = helm.Execute("", "list", "-n", "cattle-resources-system")
|
||||
Expect(err).NotTo(HaveOccurred(), "rancher-backup and rancher-backup-crd are deployed")
|
||||
|
||||
By("Create the encryption config")
|
||||
encryptionconfigFilePath := utils.GetYamlPath("tests/helper/yamls/encryption-provider-config.yaml")
|
||||
_, err = localkubectl.Execute(
|
||||
"create", "secret", "generic", "encryptionconfig",
|
||||
"--from-file="+encryptionconfigFilePath,
|
||||
"-n", "cattle-resources-system",
|
||||
)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to create the encryptionconfig")
|
||||
|
||||
By("create the restore-migation yaml and apply it")
|
||||
migrationYamlData := charts.MigrationYamlData{
|
||||
BackupFilename: filename,
|
||||
BucketName: BackupRestoreConfig.S3BucketName,
|
||||
Folder: BackupRestoreConfig.S3FolderName,
|
||||
Region: BackupRestoreConfig.S3Region,
|
||||
Endpoint: BackupRestoreConfig.S3Endpoint,
|
||||
}
|
||||
err = utils.GenerateYAMLFromTemplate(
|
||||
utils.GetYamlPath("tests/helper/yamls/restore-migration.template.yaml"),
|
||||
"restore-migration.yaml",
|
||||
migrationYamlData,
|
||||
)
|
||||
|
||||
_, err = localkubectl.Execute("apply", "-f", "restore-migration.yaml")
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to apply the Restore Migration Process")
|
||||
e2e.Logf("Waiting for 5 minutes to see backups appear...")
|
||||
time.Sleep(3 * time.Minute)
|
||||
|
||||
// output, err := localkubectl.Execute("get", "restore")
|
||||
// Expect(err).NotTo(HaveOccurred(), "Failed restore the backup")
|
||||
// Expect(string(output)).To(ContainSubstring("Completed"), "Restore not completed")
|
||||
|
||||
rancherRepoURL := tfCtx.Options.Vars["rancher_repo_url"].(string)
|
||||
password := os.Getenv("RANCHER_PASSWORD")
|
||||
|
||||
By("Now Install the rancher as the restore is been successful")
|
||||
err = resources.InstallRancher("", rancherRepoURL, rancherVersion, clientWithSession.RancherConfig.Host, password)
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to install the rancher after the restore")
|
||||
|
||||
rancherConfig := new(rancher.Config)
|
||||
config.LoadConfig(rancher.ConfigurationFileKey, rancherConfig)
|
||||
token, err := pipeline.CreateAdminToken(os.Getenv("RANCHER_PASSWORD"), rancherConfig)
|
||||
Expect(err).To(BeNil())
|
||||
rancherConfig.AdminToken = token
|
||||
config.UpdateConfig(rancher.ConfigurationFileKey, rancherConfig)
|
||||
|
||||
By("Veriy that the downstream clusters are showing up correctly")
|
||||
err = resources.VerifyCluster(clientWithSession, clusterName)
|
||||
if err != nil {
|
||||
e2e.Logf("cluster %s is not Active", clusterName)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred(), "Downstream Cluster is not getting Active. ")
|
||||
},
|
||||
|
||||
// **Test Case: Rancher inplace backup and restore test scenarios
|
||||
Entry("(with encryption)", Label("LEVEL0", "backup-restore", "migration"), MigrationParams{
|
||||
StorageType: "s3",
|
||||
BackupOptions: charts.BackupOptions{
|
||||
Name: namegen.AppendRandomString("backup"),
|
||||
RetentionCount: 10,
|
||||
EncryptionConfigSecretName: "encryptionconfig",
|
||||
},
|
||||
BackupFileExtension: ".tar.gz.enc",
|
||||
ProvisioningInput: charts.ProvisioningConfig{
|
||||
RKE2KubernetesVersions: []string{utils.GetEnvOrDefault("RKE2_VERSION", "v1.31.5+rke2r1")},
|
||||
Providers: []string{"aws"},
|
||||
NodeProviders: []string{"ec2"},
|
||||
CNIs: []string{"calico"},
|
||||
},
|
||||
Prune: false,
|
||||
CreateCluster: true,
|
||||
EncryptionConfigFilePath: charts.EncryptionConfigFilePath,
|
||||
}),
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -50,7 +51,7 @@ var (
|
|||
}
|
||||
BackupRestoreConfigurationFileKey = utils.GetYamlPath("tests/helper/yamls/inputBackupRestoreConfig.yaml")
|
||||
localStorageClass = utils.GetYamlPath("tests/helper/yamls/localStorageClass.yaml")
|
||||
EncryptionConfigFilePath = utils.GetYamlPath("tests/helper/yamls/encrptionConfig.yaml")
|
||||
EncryptionConfigFilePath = utils.GetYamlPath("tests/helper/yamls/encryption-provider-config.yaml")
|
||||
EncryptionConfigAsteriskFilePath = utils.GetYamlPath("tests/helper/yamls/encrptionConfigwithAsterisk.yaml")
|
||||
)
|
||||
|
||||
|
|
@ -77,6 +78,14 @@ type BackupParams struct {
|
|||
SecretsExists bool
|
||||
}
|
||||
|
||||
type MigrationYamlData struct {
|
||||
BackupFilename string
|
||||
BucketName string
|
||||
Folder string
|
||||
Region string
|
||||
Endpoint string
|
||||
}
|
||||
|
||||
// InstallRancherBackupRestoreChart installs the Rancher backup/restore chart with optional storage configuration.
|
||||
func InstallRancherBackupRestoreChart(client *rancher.Client, installOpts *InstallOptions, chartOpts *RancherBackupRestoreOpts, withStorage bool, storageType string) error {
|
||||
serverSetting, err := client.Management.Setting.ByID(serverURLSettingID)
|
||||
|
|
@ -567,7 +576,7 @@ func SelectResourceSetName(clientWithSession *rancher.Client, params *BackupOpti
|
|||
return err
|
||||
}
|
||||
if ok {
|
||||
params.ResourceSetName = "rancher-resource-set-basic"
|
||||
params.ResourceSetName = "rancher-resource-set-full"
|
||||
} else {
|
||||
params.ResourceSetName = "rancher-resource-set"
|
||||
}
|
||||
|
|
@ -619,3 +628,43 @@ func WaitForDeploymentsCleanup(client *rancher.Client, clusterID string, namespa
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DownloadAndExtractRancherCharts downloads and extracts Rancher charts from the given branch.
|
||||
// It always extracts to a fixed directory, replacing any previous contents.
|
||||
func DownloadAndExtractRancherCharts(branch string) (string, error) {
|
||||
// Define a fixed extraction directory
|
||||
baseDir := os.TempDir() // works cross-platform
|
||||
extractDir := filepath.Join(baseDir, "rancher-charts-extracted")
|
||||
|
||||
// If directory exists, delete it first
|
||||
if _, err := os.Stat(extractDir); err == nil {
|
||||
if err := os.RemoveAll(extractDir); err != nil {
|
||||
return "", fmt.Errorf("failed to remove previous charts dir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(extractDir, 0755); err != nil {
|
||||
return "", fmt.Errorf("failed to create extract dir: %w", err)
|
||||
}
|
||||
|
||||
// GitHub archive URL
|
||||
url := fmt.Sprintf("https://github.com/rancher/charts/tarball/%s", branch)
|
||||
|
||||
// Download and extract
|
||||
cmd := exec.Command("sh", "-c", fmt.Sprintf("curl -Ls %s | tar -xz -C %s", url, extractDir))
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to download/extract charts: %v\n%s", err, output)
|
||||
}
|
||||
|
||||
e2e.Logf("✅ Rancher charts extracted to: %s\n", extractDir)
|
||||
|
||||
// Find the actual extracted directory (Rancher creates one inside)
|
||||
files, err := os.ReadDir(extractDir)
|
||||
if err != nil || len(files) == 0 {
|
||||
return "", fmt.Errorf("extracted directory is empty or unreadable: %w", err)
|
||||
}
|
||||
|
||||
extractedPath := filepath.Join(extractDir, files[0].Name())
|
||||
return extractedPath, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
package helm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// runCommandWithKubeconfig runs a command with the given kubeconfig path.
|
||||
// If kubeconfig is empty, it defaults to ~/.kube/config.
|
||||
func Execute(kubeconfig string, args ...string) (string, error) {
|
||||
if kubeconfig == "" {
|
||||
usr, err := user.Current()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not get current user: %w", err)
|
||||
}
|
||||
kubeconfig = filepath.Join(usr.HomeDir, ".kube", "config")
|
||||
}
|
||||
|
||||
cmd := exec.Command("helm", args...)
|
||||
cmd.Env = append(os.Environ(), "KUBECONFIG="+kubeconfig)
|
||||
output, err := cmd.CombinedOutput() // get stdout and stderr
|
||||
return string(output), err
|
||||
}
|
||||
|
||||
// InstallChartFromPath installs a Helm chart from a local directory using Rancher kubeconfig
|
||||
func InstallChartFromPath(chartName, chartPath, chartVersion, namespace string) error {
|
||||
fullChartPath := filepath.Join(chartPath, chartVersion, "/.")
|
||||
|
||||
if _, err := os.Stat(fullChartPath); os.IsNotExist(err) {
|
||||
return fmt.Errorf("chart path does not exist: %s", fullChartPath)
|
||||
}
|
||||
// Helm install arguments
|
||||
args := []string{
|
||||
"install", chartName, fullChartPath,
|
||||
"-n", namespace, "--create-namespace",
|
||||
}
|
||||
|
||||
// Run the Helm command
|
||||
if _, err := Execute("", args...); err != nil {
|
||||
return fmt.Errorf("helm install failed for %s: %w", chartName, err)
|
||||
}
|
||||
|
||||
log.Printf("Successfully installed chart '%s' from '%s' into namespace '%s'\n", chartName, fullChartPath, namespace)
|
||||
return nil
|
||||
}
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
package kubectl
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Execute runs a kubectl command with the given arguments,
|
||||
// automatically appending --insecure-skip-tls-verify=true unless it's already present.
|
||||
func Execute(args ...string) (string, error) {
|
||||
cmd := exec.Command("kubectl", args...)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return "", errors.New(stderr.String())
|
||||
}
|
||||
|
||||
return stdout.String(), nil
|
||||
}
|
||||
|
|
@ -9,6 +9,13 @@ type TerraformContext struct {
|
|||
Options *terraform.Options
|
||||
}
|
||||
|
||||
// TerraformOptions is your input struct to control dir, vars, env vars
|
||||
type TerraformOptions struct {
|
||||
TerraformDir string
|
||||
Vars map[string]interface{}
|
||||
EnvVars map[string]string
|
||||
}
|
||||
|
||||
// NewTerraformContext initializes a new Terraform context with options.
|
||||
func NewTerraformContext(opts TerraformOptions) (*TerraformContext, error) {
|
||||
tfOpts := &terraform.Options{
|
||||
|
|
@ -34,23 +41,33 @@ func NewTerraformContext(opts TerraformOptions) (*TerraformContext, error) {
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (ctx *TerraformContext) InitAndApply() error {
|
||||
terraform.InitAndApply(nil, ctx.Options)
|
||||
return nil
|
||||
func (ctx *TerraformContext) InitAndApply() (string, error) {
|
||||
return terraform.InitAndApplyE(ginkgo.GinkgoT(), ctx.Options)
|
||||
}
|
||||
|
||||
func (ctx *TerraformContext) OutputAll() map[string]interface{} {
|
||||
return terraform.OutputAll(ginkgo.GinkgoT(), ctx.Options)
|
||||
}
|
||||
|
||||
func (ctx *TerraformContext) Destroy() error {
|
||||
terraform.Destroy(nil, ctx.Options)
|
||||
return nil
|
||||
// Destroy tears down the infrastructure.
|
||||
func (ctx *TerraformContext) Destroy() (string, error) {
|
||||
return terraform.DestroyE(ginkgo.GinkgoT(), ctx.Options)
|
||||
}
|
||||
|
||||
// TerraformOptions is your input struct to control dir, vars, env vars
|
||||
type TerraformOptions struct {
|
||||
TerraformDir string
|
||||
Vars map[string]interface{}
|
||||
EnvVars map[string]string
|
||||
// DestroyTarget destroys specific Terraform resources using the -target flag,
|
||||
// and resets the Targets field so the context can be reused safely.
|
||||
func (ctx *TerraformContext) DestroyTarget(targets ...string) (string, error) {
|
||||
// Backup original targets
|
||||
originalTargets := ctx.Options.Targets
|
||||
|
||||
// Set the target(s) temporarily
|
||||
ctx.Options.Targets = targets
|
||||
|
||||
// Perform targeted destroy
|
||||
output, err := terraform.DestroyE(ginkgo.GinkgoT(), ctx.Options)
|
||||
|
||||
// Reset targets to their original state
|
||||
ctx.Options.Targets = originalTargets
|
||||
|
||||
return output, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,35 @@
|
|||
package terraform
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LoadVarsFromEnv builds a map of Terraform variables by checking env vars.
|
||||
// Only adds the variable if the corresponding environment variable is set.
|
||||
func LoadVarsFromEnv(envToTfVarMap map[string]string) map[string]interface{} {
|
||||
vars := make(map[string]interface{})
|
||||
for envKey, tfVarName := range envToTfVarMap {
|
||||
val := os.Getenv(envKey)
|
||||
if val != "" {
|
||||
vars[tfVarName] = val
|
||||
}
|
||||
}
|
||||
return vars
|
||||
}
|
||||
|
||||
// Sets secret TF_VAR_* environment variables from a map and returns a cleanup function
|
||||
func SetTerraformEnvVarsFromMap(envToTfVarMap map[string]string) error {
|
||||
for envKey, tfVarName := range envToTfVarMap {
|
||||
val := os.Getenv(envKey)
|
||||
if val == "" {
|
||||
continue // skip unset values
|
||||
}
|
||||
tfEnvKey := fmt.Sprintf("TF_VAR_%s", tfVarName)
|
||||
err := os.Setenv(tfEnvKey, val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set %s: %w", tfEnvKey, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -5,26 +5,31 @@ import (
|
|||
"fmt"
|
||||
)
|
||||
|
||||
type TerraformValue struct {
|
||||
Sensitive bool `json:"sensitive"`
|
||||
Type string `json:"type"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
type TerraformOutputs struct {
|
||||
PublicIP string `json:"public_ip"`
|
||||
PublicIP string `json:"ec2_public_ip"`
|
||||
S3BucketName string `json:"s3_bucket_name"`
|
||||
NetworkInfo struct {
|
||||
SubnetID string `json:"subnet_id"`
|
||||
VPCID string `json:"vpc_id"`
|
||||
} `json:"network_info"`
|
||||
SubnetID string `json:"subnet_id"`
|
||||
VPCID string `json:"vpc_id"`
|
||||
}
|
||||
|
||||
func ParseTerraformOutputs(tfCtx *TerraformContext) (*TerraformOutputs, error) {
|
||||
rawOutputs := tfCtx.OutputAll()
|
||||
|
||||
// Marshal to JSON
|
||||
buf, err := json.Marshal(rawOutputs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal terraform outputs: %w", err)
|
||||
}
|
||||
|
||||
// Unmarshal into correct structure
|
||||
var outputs TerraformOutputs
|
||||
err = json.Unmarshal(buf, &outputs)
|
||||
if err != nil {
|
||||
if err := json.Unmarshal(buf, &outputs); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal terraform outputs: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/creasty/defaults"
|
||||
rancher "github.com/rancher/shepherd/clients/rancher"
|
||||
|
|
@ -386,3 +387,18 @@ func GetYamlPath(relativeYamlPath string) string {
|
|||
|
||||
return absPath
|
||||
}
|
||||
|
||||
func GenerateYAMLFromTemplate(templateFile, outputFile string, data any) error {
|
||||
tmpl, err := template.ParseFiles(templateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
output, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer output.Close()
|
||||
|
||||
return tmpl.Execute(output, data)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: resources.cattle.io/v1
|
||||
kind: Restore
|
||||
metadata:
|
||||
name: restore-migration
|
||||
spec:
|
||||
backupFilename: {{ .BackupFilename }}
|
||||
prune: false
|
||||
encryptionConfigSecretName: encryptionconfig
|
||||
storageLocation:
|
||||
s3:
|
||||
credentialSecretName: s3-creds
|
||||
credentialSecretNamespace: default
|
||||
bucketName: {{ .BucketName }}
|
||||
folder: {{ .Folder }}
|
||||
region: {{ .Region }}
|
||||
endpoint: {{ .Endpoint }}
|
||||
Loading…
Reference in New Issue