271 lines
7.7 KiB
HCL
271 lines
7.7 KiB
HCL
terraform {
|
|
required_version = ">= 1.0.0"
|
|
required_providers {
|
|
null = {
|
|
source = "hashicorp/null"
|
|
version = "~> 3.2.0"
|
|
}
|
|
local = {
|
|
source = "hashicorp/local"
|
|
version = "~> 2.4.0"
|
|
}
|
|
}
|
|
}
|
|
|
|
locals {
|
|
ssh_config_path = "${path.module}/ssh_config"
|
|
temporary_dir = "/tmp/k3s-terraform"
|
|
server_kubeconfig = "${local.temporary_dir}/k3s.yaml"
|
|
node_token_path = "${local.temporary_dir}/node-token"
|
|
worker_count = length(var.worker_ips)
|
|
}
|
|
|
|
# Create a temporary SSH config file for secure connections
|
|
resource "null_resource" "setup_ssh_config" {
|
|
triggers = {
|
|
server_ip = var.server_ip
|
|
worker_ips = join(",", var.worker_ips)
|
|
ssh_user = var.ssh_user
|
|
ssh_private_key = var.ssh_private_key
|
|
}
|
|
|
|
provisioner "local-exec" {
|
|
command = <<-EOT
|
|
mkdir -p ${dirname(local.ssh_config_path)}
|
|
cat > ${local.ssh_config_path} << 'EOF'
|
|
Host ${var.server_ip}
|
|
User ${var.ssh_user}
|
|
IdentityFile ${var.ssh_private_key}
|
|
StrictHostKeyChecking no
|
|
UserKnownHostsFile /dev/null
|
|
|
|
${join("\n", [
|
|
for ip in var.worker_ips : <<-WORKER
|
|
Host ${ip}
|
|
User ${var.ssh_user}
|
|
IdentityFile ${var.ssh_private_key}
|
|
StrictHostKeyChecking no
|
|
UserKnownHostsFile /dev/null
|
|
WORKER
|
|
])}
|
|
EOF
|
|
EOT
|
|
interpreter = ["bash", "-c"]
|
|
}
|
|
|
|
# Clean up SSH config on destroy
|
|
provisioner "local-exec" {
|
|
when = destroy
|
|
command = "rm -f ${self.triggers.ssh_config_path}"
|
|
interpreter = ["bash", "-c"]
|
|
on_failure = continue
|
|
}
|
|
}
|
|
|
|
# Install K3s on the server node
|
|
resource "null_resource" "install_k3s_server" {
|
|
depends_on = [null_resource.setup_ssh_config]
|
|
|
|
triggers = {
|
|
server_ip = var.server_ip
|
|
ssh_user = var.ssh_user
|
|
ssh_private_key = var.ssh_private_key
|
|
ssh_config_path = local.ssh_config_path
|
|
k3s_version = var.k3s_version
|
|
k3s_extra_args = var.k3s_extra_server_args
|
|
}
|
|
|
|
# Install K3s server
|
|
provisioner "remote-exec" {
|
|
connection {
|
|
host = var.server_ip
|
|
user = var.ssh_user
|
|
private_key = file(var.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"mkdir -p ${local.temporary_dir}",
|
|
"curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${var.k3s_version} INSTALL_K3S_CHANNEL=${var.k3s_channel} sh -s - server ${var.k3s_extra_server_args}",
|
|
"until systemctl is-active --quiet k3s; do echo 'Waiting for k3s to start...'; sleep 5; done",
|
|
"echo 'K3s server installation complete'"
|
|
]
|
|
}
|
|
|
|
# Uninstall K3s server on destroy
|
|
provisioner "remote-exec" {
|
|
when = destroy
|
|
connection {
|
|
host = self.triggers.server_ip
|
|
user = self.triggers.ssh_user
|
|
private_key = file(self.triggers.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"/usr/local/bin/k3s-uninstall.sh || true",
|
|
"rm -rf ${local.temporary_dir} || true"
|
|
]
|
|
on_failure = continue
|
|
}
|
|
}
|
|
|
|
# Retrieve the K3s kubeconfig from the server
|
|
resource "null_resource" "get_k3s_config" {
|
|
depends_on = [null_resource.install_k3s_server, null_resource.setup_ssh_config]
|
|
|
|
triggers = {
|
|
server_ip = var.server_ip
|
|
ssh_user = var.ssh_user
|
|
ssh_private_key = var.ssh_private_key
|
|
ssh_config_path = local.ssh_config_path
|
|
}
|
|
|
|
# Copy kubeconfig to a temporary location on server
|
|
provisioner "remote-exec" {
|
|
connection {
|
|
host = var.server_ip
|
|
user = var.ssh_user
|
|
private_key = file(var.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"mkdir -p ${local.temporary_dir}",
|
|
"sudo cp /etc/rancher/k3s/k3s.yaml ${local.server_kubeconfig}",
|
|
"sudo chmod 644 ${local.server_kubeconfig}"
|
|
]
|
|
}
|
|
|
|
# Download kubeconfig to local machine
|
|
provisioner "local-exec" {
|
|
command = "mkdir -p ${dirname(var.kubeconfig_path)} && scp -F ${local.ssh_config_path} ${var.ssh_user}@${var.server_ip}:${local.server_kubeconfig} ${var.kubeconfig_path}"
|
|
}
|
|
|
|
# Update server URL in kubeconfig if needed
|
|
provisioner "local-exec" {
|
|
command = <<-EOT
|
|
if [ -n "${var.replace_url}" ]; then
|
|
sed -i 's|https://127.0.0.1:6443|https://${var.replace_url}:6443|g' ${var.kubeconfig_path}
|
|
fi
|
|
EOT
|
|
interpreter = ["bash", "-c"]
|
|
on_failure = continue
|
|
}
|
|
}
|
|
|
|
# Retrieve the K3s node token from the server
|
|
resource "null_resource" "get_k3s_token" {
|
|
depends_on = [null_resource.install_k3s_server, null_resource.setup_ssh_config]
|
|
|
|
triggers = {
|
|
server_ip = var.server_ip
|
|
ssh_user = var.ssh_user
|
|
ssh_private_key = var.ssh_private_key
|
|
ssh_config_path = local.ssh_config_path
|
|
}
|
|
|
|
# Extract node token and save to a temporary file
|
|
provisioner "remote-exec" {
|
|
connection {
|
|
host = var.server_ip
|
|
user = var.ssh_user
|
|
private_key = file(var.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"mkdir -p ${local.temporary_dir}",
|
|
"sudo cat /var/lib/rancher/k3s/server/node-token > ${local.node_token_path}",
|
|
"sudo chmod 644 ${local.node_token_path}"
|
|
]
|
|
}
|
|
|
|
# Download node token to local machine
|
|
provisioner "local-exec" {
|
|
command = "mkdir -p ${dirname(var.node_token_path)} && scp -F ${local.ssh_config_path} ${var.ssh_user}@${var.server_ip}:${local.node_token_path} ${var.node_token_path}"
|
|
}
|
|
}
|
|
|
|
# Copy the node token to each worker node
|
|
resource "null_resource" "copy_token_to_workers" {
|
|
depends_on = [null_resource.get_k3s_token]
|
|
count = local.worker_count
|
|
|
|
triggers = {
|
|
worker_ip = var.worker_ips[count.index]
|
|
ssh_user = var.ssh_user
|
|
ssh_private_key = var.ssh_private_key
|
|
ssh_config_path = local.ssh_config_path
|
|
node_token_path = var.node_token_path
|
|
}
|
|
|
|
# Create temporary directory on worker and copy token
|
|
provisioner "remote-exec" {
|
|
connection {
|
|
host = var.worker_ips[count.index]
|
|
user = var.ssh_user
|
|
private_key = file(var.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"mkdir -p ${local.temporary_dir}"
|
|
]
|
|
}
|
|
|
|
# Upload token file to worker
|
|
provisioner "local-exec" {
|
|
command = "scp -F ${local.ssh_config_path} ${var.node_token_path} ${var.ssh_user}@${var.worker_ips[count.index]}:${local.node_token_path}"
|
|
}
|
|
}
|
|
|
|
# Install K3s on each worker node
|
|
resource "null_resource" "install_k3s_worker" {
|
|
depends_on = [null_resource.copy_token_to_workers]
|
|
count = local.worker_count
|
|
|
|
triggers = {
|
|
worker_ip = var.worker_ips[count.index]
|
|
server_ip = var.server_ip
|
|
ssh_user = var.ssh_user
|
|
ssh_private_key = var.ssh_private_key
|
|
ssh_config_path = local.ssh_config_path
|
|
k3s_version = var.k3s_version
|
|
k3s_extra_args = var.k3s_extra_agent_args
|
|
}
|
|
|
|
# Install K3s agent on worker
|
|
provisioner "remote-exec" {
|
|
connection {
|
|
host = var.worker_ips[count.index]
|
|
user = var.ssh_user
|
|
private_key = file(var.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=${var.k3s_version} INSTALL_K3S_CHANNEL=${var.k3s_channel} K3S_URL=https://${var.server_ip}:6443 K3S_TOKEN=$(cat ${local.node_token_path}) sh -s - agent ${var.k3s_extra_agent_args}",
|
|
"until systemctl is-active --quiet k3s-agent; do echo 'Waiting for k3s-agent to start...'; sleep 5; done",
|
|
"echo 'K3s agent installation complete'"
|
|
]
|
|
}
|
|
|
|
# Uninstall K3s agent on destroy
|
|
provisioner "remote-exec" {
|
|
when = destroy
|
|
connection {
|
|
host = self.triggers.worker_ip
|
|
user = self.triggers.ssh_user
|
|
private_key = file(self.triggers.ssh_private_key)
|
|
agent = false
|
|
}
|
|
|
|
inline = [
|
|
"/usr/local/bin/k3s-agent-uninstall.sh || true",
|
|
"rm -rf ${local.temporary_dir} || true"
|
|
]
|
|
on_failure = continue
|
|
}
|
|
}
|