Add Harvester infra modules (#13)

Signed-off-by: Silvio Moioli <silvio@moioli.net>
Co-authored-by: Silvio Moioli <silvio@moioli.net>
This commit is contained in:
Iramis Valentin 2025-04-14 15:46:52 -04:00 committed by GitHub
parent eeacca101c
commit f47e010e16
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
25 changed files with 859 additions and 5 deletions

9
.gitignore vendored
View File

@ -2,12 +2,13 @@
.DS_Store
# tofu related
tofu/**/*/.terraform
tofu/**/*/.terraform.lock.hcl
tofu/**/*/terraform.tfstate
tofu/**/*/terraform.tfstate.*
**/.terraform/*
*.tfstate
*.tfstate.*
.terraform.lock.hcl
tofu/main/*/*config
*.tfvars
*.tfvars.json
# IDE related
.idea

View File

@ -124,3 +124,15 @@ ssh remotehost -L 2375:localhost:2375 -L 8443:localhost:8443 $(for KUBEPORT in $
When using `k3d`, change `RANCHER_IMAGE_TAG` and if an image with the same tag is found it will be added to relevant clusters.
This is useful during Rancher development to test Rancher changes on k3d clusters.
## Harvester: bypassing TLS verification
If you get the following error:
```
Error: Get "https://$ADDRESS/k8s/clusters/local/apis/harvesterhci.io/v1beta1/settings/server-version": tls: failed to verify certificate
```
Then your Harvester installation's TLS certificate is not set up correctly, or trusted by your system. Ideally, address those issues, otherwise communication with Harvester will not be secure.
If you want to bypass TLS checks edit your kubeconfig file to remove the `certificate-authority-data` entry and add a `insecure-skip-tls-verify: true` entry instead.

152
darts/harvester.yaml Normal file
View File

@ -0,0 +1,152 @@
# Deploys Rancher and prepares it for performance tests
tofu_main_directory: ./tofu/main/harvester
tofu_parallelism: 100 # Harvester tolerates high values well
tofu_variables:
project_name: st
namespace: st
# Uncomment and add the path to your Harvester kubeconfig. Get one with these instructions:
# https://docs.harvesterhci.io/v1.3/faq/#how-can-i-access-the-kubeconfig-file-of-the-harvester-cluster
# Leaving commented uses the file pointed by the KUBECONFIG environment variable
# kubeconfig:
ssh_public_key_path: ~/.ssh/id_ed25519.pub
ssh_private_key_path: ~/.ssh/id_ed25519
ssh_user: opensuse
# Must set the following bastion vars appropriately if there is a bastion between you and Harvester
# ssh_bastion_user: root
# ssh_bastion_host: # Must set this appropriately if there is a bastion between you and Harvester
# ssh_bastion_key_path: ~/.ssh/bastion_id_ed25519 # Must set this appropriately if there is a bastion between you and Harvester
# Set to false to skip the creation of the openSUSE Leap 15.6 image
create_image: true
network:
create: false
name: vlan2179-public
clusternetwork_name: mgmt
vlan_id: 2179
namespace: harvester-public
interface_type: bridge
interface_model: virtio
public: true
wait_for_lease: true
upstream_cluster:
server_count: 3
agent_count: 0
distro_version: v1.26.9+k3s1
public_ip: false
reserve_node_for_monitoring: false
enable_audit_log: false
node_module_variables:
cpu: 2
memory: 8
disks:
- name: "disk-0"
size: 35
type: "disk"
bus: "virtio"
password: linux # Non-SSH password
# Uncomment to override the image created by the create_image flag above
# image_name: openSUSE-leap-micro-6.0
# image_namespace: harvester-public
ssh_shared_public_keys: [
# Uncomment to add shared keys that exist in Harvester
# { name: bullseye-qa, namespace: bullseye },
]
tags: {}
upstream_cluster_distro_module: generic/k3s
tester_cluster:
server_count: 1
agent_count: 0
distro_version: v1.26.9+k3s1
public_ip: false
reserve_node_for_monitoring: false
enable_audit_log: false
node_module_variables:
cpu: 2
memory: 4
disks:
- name: "disk-0"
size: 35
type: "disk"
bus: "virtio"
password: linux # Non-SSH password
# Uncomment to override the image created by the create_image flag above
# image_name: openSUSE-leap-micro-6.0
# image_namespace: harvester-public
ssh_shared_public_keys: [
# Uncomment to add shared keys that exist in Harvester
# { name: bullseye-qa, namespace: bullseye },
]
tags: {}
tester_cluster_distro_module: generic/k3s
downstream_cluster_templates:
- cluster_count: 0
server_count: 1
agent_count: 0
distro_version: v1.26.9+k3s1
public_ip: false
reserve_node_for_monitoring: false
enable_audit_log: false
node_module_variables:
cpu: 2
memory: 8
disks:
- name: "disk-0"
size: 35
type: "disk"
bus: "virtio"
password: linux # Non-SSH password
# Uncomment to override the image created by the create_image flag above
# image_name: openSUSE-leap-micro-6.0
# image_namespace: harvester-public
ssh_shared_public_keys: [
# Uncomment to add shared keys that exist in Harvester
# { name: bullseye-qa, namespace: bullseye },
]
tags: {}
downstream_cluster_distro_module: generic/k3s
first_kubernetes_api_port: 7445
first_app_http_port: 9080
first_app_https_port: 9443
chart_variables:
rancher_replicas: 1
downstream_rancher_monitoring: true
admin_password: adminadminadmin
rancher_monitoring_version: 104.1.0+up57.0.3 # see https://github.com/rancher/charts/tree/release-v2.9/assets/rancher-monitoring-crd
cert_manager_version: 1.8.0
tester_grafana_version: 6.56.5
rancher_version: 2.9.1
force_prime_registry: false
# Use the following for 2.8.6:
# rancher_version: 2.8.6
# rancher_monitoring_version: 103.1.1+up45.31.1 # see https://github.com/rancher/charts/tree/release-v2.8/assets/rancher-monitoring-crd
# Add the following to set a custom image:
# rancher_image_override: rancher/rancher
# rancher_image_tag_override: v2.8.6-debug-1
test_variables:
test_config_maps: 2000
test_secrets: 2000
test_roles: 20
test_users: 10
test_projects: 20

View File

@ -0,0 +1,42 @@
provider "harvester" {
kubeconfig = var.kubeconfig
}
module "network" {
source = "../../modules/harvester/network"
project_name = var.project_name
namespace = var.namespace
network_details = var.network
ssh_public_key_path = var.ssh_public_key_path
ssh_bastion_host = var.ssh_bastion_host
ssh_bastion_user = var.ssh_bastion_user
ssh_bastion_key_path = var.ssh_bastion_key_path
}
resource "harvester_image" "created" {
count = var.create_image ? 1 : 0
name = "${var.project_name}-opensuse156"
namespace = var.namespace
display_name = "${var.project_name}-opensuse156"
source_type = "download"
url = "https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.6/images/openSUSE-Leap-15.6.x86_64-NoCloud.qcow2"
}
module "test_environment" {
source = "../../modules/generic/test_environment"
project_name = var.project_name
upstream_cluster = var.upstream_cluster
upstream_cluster_distro_module = var.upstream_cluster_distro_module
downstream_cluster_templates = var.downstream_cluster_templates
downstream_cluster_distro_module = var.downstream_cluster_distro_module
tester_cluster = var.tester_cluster
tester_cluster_distro_module = var.tester_cluster_distro_module
node_module = "harvester/node"
ssh_user = var.ssh_user
ssh_private_key_path = var.ssh_private_key_path
network_config = module.network.config
image_id = var.create_image ? harvester_image.created[0].id : null
first_kubernetes_api_port = var.first_kubernetes_api_port
first_app_http_port = var.first_app_http_port
first_app_https_port = var.first_app_https_port
}

View File

@ -0,0 +1,3 @@
output "clusters" {
value = module.test_environment.clusters
}

View File

@ -0,0 +1,21 @@
terraform {
required_version = "1.8.2"
required_providers {
harvester = {
source = "harvester/harvester"
version = "0.6.6"
}
tls = {
source = "hashicorp/tls"
version = "4.0.3"
}
helm = {
source = "hashicorp/helm"
version = "2.7.1"
}
ssh = {
source = "loafoe/ssh"
version = "2.7.0"
}
}
}

View File

@ -0,0 +1,147 @@
variable "ssh_public_key_path" {
description = "Path to SSH public key file (can be generated with `ssh-keygen -t ed25519`)"
default = "~/.ssh/id_ed25519.pub"
}
variable "ssh_private_key_path" {
description = "Path to SSH private key file (can be generated with `ssh-keygen -t ed25519`)"
default = "~/.ssh/id_ed25519"
}
variable "ssh_user" {
description = "User name to use for the SSH connection to all nodes in all clusters"
default = "opensuse"
}
variable "ssh_bastion_host" {
description = "Public name of the SSH bastion host. Leave null for publicly accessible Harvester instances"
type = string
default = null
}
variable "ssh_bastion_user" {
description = "User name to connect to the SSH bastion host"
default = null
}
variable "ssh_bastion_key_path" {
description = "Path of private ssh key used to access the bastion host to access Harvester"
type = string
default = null
}
variable "upstream_cluster" {
description = "Upstream cluster configuration. See tofu/modules/generic/test_environment/variables.tf for details"
type = any
}
variable "upstream_cluster_distro_module" {
description = "Name of the module to use for the upstream cluster"
default = "generic/k3s"
}
variable "downstream_cluster_templates" {
description = "List of downstream cluster configurations. See tofu/modules/generic/test_environment/variables.tf for details"
type = list(any)
}
variable "downstream_cluster_distro_module" {
description = "Name of the module to use for the downstream clusters"
default = "generic/k3s"
}
variable "tester_cluster" {
description = "Tester cluster configuration. See tofu/modules/generic/test_environment/variables.tf for details"
type = any
default = null
}
variable "tester_cluster_distro_module" {
description = "Name of the module to use for the downstream clusters"
default = "generic/k3s"
}
# "Multi-tenancy" variables
variable "project_name" {
description = "Name of this project, used as prefix for resources it creates"
default = "st"
}
variable "first_kubernetes_api_port" {
description = "Port number where the Kubernetes API of the first cluster is published locally. Other clusters' ports are published in successive ports"
type = number
default = 7445
}
variable "first_app_http_port" {
description = "Port number where the first server's port 80 is published locally. Other clusters' ports are published in successive ports"
type = number
default = 9080
}
variable "first_app_https_port" {
description = "Port number where the first server's port 443 is published locally. Other clusters' ports are published in successive ports"
type = number
default = 9443
}
# Harvester-specific variables
variable "namespace" {
description = "The namespace where the VMs should be created"
default = "default"
}
variable "kubeconfig" {
description = "Path to the Harvester kubeconfig file. Uses KUBECONFIG by default. See https://docs.harvesterhci.io/v1.3/faq/#how-can-i-access-the-kubeconfig-file-of-the-harvester-cluster"
type = string
nullable = false
}
variable "network" {
description = <<-EOT
An object combining fields that define a pre-existing VM Network as well as the VM's network_interface type and model.
The object includes a name, a "public" flag if the network will assign a public IP address, a "wait_for_lease" flag if the interface is expected to provision an IP address,
and optionally a namespace, interface_type and interface_model to be assigned to the VM.
If using a VM Network which will assign a public IP to the VM, ensure the "public" flag is set to true.
EOT
type = object({
create = bool
name = string
vlan_id = number
clusternetwork_name = string
namespace = optional(string)
interface_type = optional(string)
interface_model = optional(string)
public = bool
wait_for_lease = bool
})
default = {
create = false
clusternetwork_name = "vmnet"
vlan_id = 100
name = "vmnet-shared"
namespace = "default"
interace_type = "bridge"
public = true
wait_for_lease = true
}
}
variable "password" {
description = "Password to use for VM access (via terminal, SSH access is exclusively via SSH public key)"
default = "linux"
}
variable "ssh_shared_public_keys" {
description = "A list of shared public ssh key names + namespaces (which already exists in Harvester) to load onto the Harvester VMs"
type = list(object({
name = string
namespace = string
}))
default = []
}
variable "create_image" {
description = "Whether to create a new image for the VMs"
default = true
}

View File

@ -21,6 +21,7 @@ module "server_nodes" {
node_module = var.node_module
node_module_variables = var.node_module_variables
network_config = var.network_config
image_id = var.image_id
}
module "agent_nodes" {
@ -33,6 +34,7 @@ module "agent_nodes" {
node_module = var.node_module
node_module_variables = var.node_module_variables
network_config = var.network_config
image_id = var.image_id
}
resource "ssh_sensitive_resource" "first_server_installation" {

View File

@ -102,3 +102,11 @@ variable "network_config" {
description = "Network module outputs, to be passed to node_module"
type = any
}
# Only used for Harvester module atm
variable "image_id" {
description = "ID of a Harvester image, if one was created. Otherwise null"
type = string
default = null
nullable = true
}

View File

@ -8,6 +8,7 @@ module "host" {
host_configuration_commands = var.host_configuration_commands
node_module_variables = var.node_module_variables
network_config = var.network_config
image_id = var.image_id
public = var.public
}

View File

@ -49,3 +49,11 @@ variable "network_config" {
description = "Network module outputs, to be passed to node_module"
type = any
}
# Only used for Harvester module atm
variable "image_id" {
description = "ID of a Harvester image, if one was created. Otherwise null"
type = string
default = null
nullable = true
}

View File

@ -22,6 +22,7 @@ module "server_nodes" {
node_module = var.node_module
node_module_variables = var.node_module_variables
network_config = var.network_config
image_id = var.image_id
}
module "agent_nodes" {
@ -34,6 +35,7 @@ module "agent_nodes" {
node_module = var.node_module
node_module_variables = var.node_module_variables
network_config = var.network_config
image_id = var.image_id
}
resource "ssh_sensitive_resource" "first_server_installation" {

View File

@ -97,3 +97,11 @@ variable "network_config" {
description = "Network module outputs, to be passed to node_module"
type = any
}
# Only used for Harvester module atm
variable "image_id" {
description = "ID of a Harvester image, if one was created. Otherwise null"
type = string
default = null
nullable = true
}

View File

@ -23,6 +23,7 @@ module "upstream_cluster" {
ssh_user = var.ssh_user
node_module = var.node_module
network_config = var.network_config
image_id = var.image_id
node_module_variables = var.upstream_cluster.node_module_variables
}
@ -45,6 +46,7 @@ module "tester_cluster" {
ssh_user = var.ssh_user
node_module = var.node_module
network_config = var.network_config
image_id = var.image_id
node_module_variables = var.tester_cluster.node_module_variables
}
@ -68,5 +70,6 @@ module "downstream_clusters" {
ssh_user = var.ssh_user
node_module = var.node_module
network_config = var.network_config
image_id = var.image_id
node_module_variables = local.downstream_clusters[count.index].node_module_variables
}

View File

@ -23,6 +23,14 @@ variable "network_config" {
type = any
}
# Only used for Harvester module atm
variable "image_id" {
description = "ID of a Harvester image, if one was created. Otherwise null"
type = string
default = null
nullable = true
}
# Upstream cluster specifics
variable "upstream_cluster" {
type = object({
@ -81,7 +89,7 @@ variable "tester_cluster" {
enable_audit_log = bool // Enable audit log for the cluster
node_module_variables = any // Node module-specific variables
})
}) # If null, no tester cluster will be created
nullable = true
}

View File

@ -0,0 +1,5 @@
data "harvester_network" "this" {
count = var.network_details.create ? 0 : 1
name = var.network_details.name
namespace = var.network_details.namespace
}

View File

@ -0,0 +1,45 @@
terraform {
required_providers {
harvester = {
source = "harvester/harvester"
}
}
}
resource "harvester_ssh_key" "public_key" {
name = "${var.project_name}-ssh-key"
namespace = var.namespace
public_key = file(var.ssh_public_key_path)
}
resource "harvester_clusternetwork" "cluster-vlan" {
count = var.network_details.create ? 1 : 0
name = var.network_details.clusternetwork_name
description = "Cluster VLAN managed by Dartboard's Harvester opentofu module"
}
resource "harvester_vlanconfig" "cluster-vlan-config" {
count = var.network_details.create ? 1 : 0
name = "${var.network_details.clusternetwork_name}-vlan-config"
cluster_network_name = harvester_clusternetwork.cluster-vlan[0].name
uplink {
nics = var.vlan_uplink.nics
bond_mode = var.vlan_uplink.bond_mode
bond_miimon = var.vlan_uplink.bond_miimon
mtu = var.vlan_uplink.mtu
}
}
resource "harvester_network" "this" {
count = var.network_details.create ? 1 : 0
depends_on = [ harvester_vlanconfig.cluster-vlan-config ]
name = var.network_details.name
namespace = var.namespace
description = "Harvester network managed by Dartboard's Harvester opentofu module"
vlan_id = var.network_details.vlan_id
cluster_network_name = harvester_clusternetwork.cluster-vlan[0].name
}

View File

@ -0,0 +1,19 @@
output "config" {
value = {
namespace : var.namespace,
ssh_public_key_id : harvester_ssh_key.public_key.id,
ssh_public_key : harvester_ssh_key.public_key.public_key,
id : var.network_details.create ? harvester_network.this[0].id : data.harvester_network.this[0].id
name : var.network_details.name
clusternetwork_name : var.network_details.clusternetwork_name
interface_type : var.network_details.interface_type
interface_model : var.network_details.interface_model
public : var.network_details.public
wait_for_lease : var.network_details.wait_for_lease
ssh_bastion_host : var.ssh_bastion_host
ssh_bastion_user : var.ssh_bastion_user
ssh_bastion_key_path : var.ssh_bastion_key_path
}
}

View File

@ -0,0 +1,73 @@
variable "project_name" {
description = "A prefix for names of objects created by this module"
default = "st"
}
variable "namespace" {
description = "The namespace for objects created by this module"
default = "default"
}
variable "network_details" {
description = <<-EOT
An object combining fields that define a VM Network as well as the VM's network_interface type and model.
The object includes a name, a "public" flag if the network will assign a public IP address, a "wait_for_lease" flag if the interface is expected to provision an IP address,
and optionally a namespace, interface_type and interface_model to be assigned to the VM.
If using a VM Network which will assign a public IP to the VM, ensure the "public" flag is set to true.
EOT
type = object({
create = bool
name = string
vlan_id = number
clusternetwork_name = string
namespace = optional(string)
interface_type = optional(string)
interface_model = optional(string)
public = bool
wait_for_lease = bool
})
default = {
create = false
clusternetwork_name = "vmnet"
vlan_id = 100
name = "vmnet-shared"
namespace = "default"
interace_type = "bridge"
public = true
wait_for_lease = true
}
}
variable "ssh_public_key_path" {
description = "Path of public ssh key for hosts created by this module"
type = string
}
variable "vlan_uplink" {
description = "Harvester ClusterNetwork uplink configuration"
type = object({
nics = list(string)
bond_miimon = optional(number)
bond_mode = optional(string)
mtu = optional(number)
})
default = null
}
variable "ssh_bastion_host" {
description = "Public name of the SSH bastion host. Leave null for publicly accessible Harvester instances"
type = string
default = null
}
variable "ssh_bastion_user" {
description = "User name to connect to the SSH bastion host"
default = null
}
variable "ssh_bastion_key_path" {
description = "Path of private ssh key used to access the bastion host to access Harvester"
type = string
default = null
}

View File

@ -0,0 +1,14 @@
data "harvester_image" "this" {
count = var.node_module_variables.image_name != null && var.node_module_variables.image_namespace != null ? 1 : 0
display_name = var.node_module_variables.image_name
namespace = local.image_namespace
}
data "harvester_ssh_key" "shared" {
for_each = var.node_module_variables.ssh_shared_public_keys != null ? {
for i, key in var.node_module_variables.ssh_shared_public_keys :
key.name => key
} : {}
name = each.value.name
namespace = each.value.namespace
}

View File

@ -0,0 +1,22 @@
locals {
public_keys = compact([var.network_config.ssh_public_key, try(data.harvester_ssh_key.shared[0].public_key, null)])
template_user_data = templatefile("${path.module}/user_data.yaml", {
ssh_user = var.ssh_user
password = var.node_module_variables.password
ssh_keys = local.public_keys
})
wait_for_lease = var.network_config.wait_for_lease
disks_map = { for disk in var.node_module_variables.disks : disk.name => disk }
private_network_interfaces = [for network in harvester_virtualmachine.this.network_interface[*] : {
interface_name = network.interface_name
ip_address = network.ip_address
} if !var.network_config.public
]
public_network_interfaces = [for network in harvester_virtualmachine.this.network_interface[*] : {
interface_name = network.interface_name
ip_address = network.ip_address
} if var.network_config.public
]
image_namespace = replace(lower(var.node_module_variables.image_namespace), "/[^a-z0-9-]/", "-") # Convert to valid Kubernetes name
}

View File

@ -0,0 +1,82 @@
terraform {
required_providers {
harvester = {
source = "harvester/harvester"
}
}
}
resource "harvester_virtualmachine" "this" {
name = "${var.project_name}-${var.name}"
namespace = var.network_config.namespace
hostname = var.name
tags = merge({
ssh-user = var.ssh_user
Project = var.project_name
}, var.node_module_variables.tags)
cpu = var.node_module_variables.cpu
memory = "${var.node_module_variables.memory}Gi"
efi = coalesce(var.node_module_variables.efi, false)
secure_boot = coalesce(var.node_module_variables.efi, false) ? coalesce(var.node_module_variables.secure_boot, false) : false
network_interface {
name = var.network_config.name
network_name = var.network_config.id
type = var.network_config.interface_type
model = var.network_config.interface_model
wait_for_lease = var.network_config.wait_for_lease
}
dynamic "disk" {
for_each = local.disks_map
content {
name = disk.value.name
type = disk.value.type
size = "${disk.value.size}Gi"
bus = disk.value.bus
image = index(var.node_module_variables.disks, disk.value) == 0 ? (
var.node_module_variables.image_name != null && var.node_module_variables.image_namespace != null ? data.harvester_image.this[0].id : var.image_id
) : null
boot_order = index(var.node_module_variables.disks, disk.value) + 1 //boot_order starts at 1, while the index() function is 0-based
auto_delete = true
}
}
ssh_keys = compact([var.network_config.ssh_public_key_id, try(data.harvester_ssh_key.shared[0].id, null)])
# Default "USB Tablet" config for VNC usage
input {
name = "tablet"
type = "tablet"
bus = "usb"
}
cloudinit {
user_data = local.template_user_data
}
// Allow for more than the default time for VM destruction
timeouts {
delete = "15m"
}
}
resource "null_resource" "host_configuration" {
connection {
host = local.public_network_interfaces[0].ip_address
private_key = var.ssh_private_key_path != null ? file(var.ssh_private_key_path) : null
user = var.ssh_user
bastion_host = var.network_config.ssh_bastion_host
bastion_user = var.network_config.ssh_bastion_user
bastion_private_key = var.network_config.ssh_bastion_key_path != null ? file(var.network_config.ssh_bastion_key_path) : null
bastion_port = 22
timeout = "5m"
}
provisioner "remote-exec" {
inline = var.host_configuration_commands
}
}

View File

@ -0,0 +1,23 @@
output "name" {
value = var.name
}
output "id" {
value = harvester_virtualmachine.this.id
}
output "private_name" {
value = "${var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address}.sslip.io"
}
output "private_ip" {
value = var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address
}
output "public_address" {
value = var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address
}
output "public_name" {
value = "${var.network_config.public ? local.public_network_interfaces[0].ip_address : local.private_network_interfaces[0].ip_address}.sslip.io"
}

View File

@ -0,0 +1,16 @@
#cloud-config
users:
- default
- ${ssh_user}
password: ${password}
disable_root: false
chpasswd:
expire: false
users:
- {name: ${ssh_user}, password: ${password}, type: text}
package_update: true
ssh_authorized_keys:
%{ for public_key in ssh_keys ~}
- >-
${public_key}
%{ endfor ~}

View File

@ -0,0 +1,137 @@
variable "project_name" {
description = "A prefix for names of objects created by this module"
default = "st"
}
variable "name" {
description = "Symbolic name of this host"
type = string
}
variable "ssh_private_key_path" {
description = "Path of private ssh key used to access the host"
type = string
}
variable "ssh_user" {
description = "User name to use for the SSH connection to the host"
default = "root"
}
variable "ssh_tunnels" {
description = "Opens SSH tunnels to this host via the bastion"
type = list(list(number))
default = []
}
variable "host_configuration_commands" {
description = "Commands to run when the host is deployed"
default = [
"echo 'Waiting for cloud-init to complete...'",
"cloud-init status --wait > /dev/null",
"echo 'Completed cloud-init!'",
"cat /etc/os-release"
]
}
variable "public" {
description = "Whether the node is publicly accessible"
default = false
}
variable "node_module_variables" {
description = <<EOT
Harvester-specific VM configuration variables.
image_name: Image name for this VM. If null, the image created by the network module will be used
image_namespace: Namespace for image_name. If null, the image created by the network module will be used
cpu: Number of CPUs to allocate for the VM(s)
memory: Number of GB of Memory to allocate for the VM(s)
tags: A map of strings to add as VM tags
password: Password to use for VM access (via terminal, SSH access is exclusively via SSH public key)
ssh_shared_public_keys: A list of shared public ssh key names + namespaces (which already exists in Harvester) to load onto the Harvester VMs
disks: List of objects representing the disks to be provisioned for the VM. NOTE: boot_order will be set to the index of each disk in the list.
efi: Flag that determines if the VM will boot in EFI mode
secure_boot: Flag that determines if the VM will be provisioned with secure_boot enabled. EFI must be enabled to use this
EOT
type = object({
image_name = optional(string)
image_namespace = optional(string)
cpu = number
memory = number
tags = optional(map(string))
password = string
ssh_shared_public_keys = optional(list(object({
name = string
namespace = string
})))
disks = optional(list(object({
name = string
type = string
size = number
bus = string
})))
efi = optional(bool)
secure_boot = optional(bool)
})
default = {
image_name = null
image_namespace = null
cpu = 2
memory = 8
namespace = "default"
tags = {}
password = "linux"
ssh_shared_public_keys = []
disks = [{
name = "disk-0"
type = "disk"
size = 35
bus = "virtio"
}]
efi = false
secure_boot = false
}
}
variable "network_config" {
description = <<EOT
Harvester-specific network configuration variables.
namespace: The namespace for nodes created by this module
ssh_public_key_id: ID of the public ssh key used to access the instance
ssh_public_key: Contents of the public ssh key used to access the instance
id: ID of the network
name: Name of the network
clusternetwork_name: Name of the cluster network
interface_type: Type of network interface to use
interface_model: Model of network interface to use
public: Whether the network will assign a public IP address
wait_for_lease: Whether the interface is expected to provision an IP address
created_image_id: ID of the image if one was created
ssh_bastion_host: Public name of the SSH bastion host. Leave null for publicly accessible instances
ssh_bastion_user: User name for the SSH bastion host's OS
ssh_bastion_key_path: Path of private ssh key used to access the bastion host
EOT
type = object({
namespace = string
ssh_public_key_id = string
ssh_public_key = string
id = string
name = string
clusternetwork_name = string
interface_type = string
interface_model = string
public = bool
wait_for_lease = bool
created_image_id = optional(string)
ssh_bastion_host = optional(string)
ssh_bastion_user = optional(string)
ssh_bastion_key_path = optional(string)
})
}
variable "image_id" {
description = "ID of a Harvester image, if one was created. Otherwise null"
type = string
default = null
nullable = true
}