Terraform example for custom multiple instances
In this example we willl create 3 instances with different settigns on each one, this is useful because sometimes the images to create are always the same.
Terraform runs 3 base instances (2 Ubuntu flavours and one Rocky Linux) with apache2 and SSH. The orchestrator creates the instances with a main file shows the external IP.
Step 1: Create the variables provider file (provider.tf)
provider "openstack" {
user_name = "<OPENSTACK_USERNAME>"
tenant_name = "<OPENSTACK_USERNAME>"
password = "<OPENSTACK_PASSWORD>"
auth_url = "https://colossus.cesar.unizar.es:5000/v3"
}
Step 2: Create the variables file (variables.tf)
# Params file for variables
# Create an array of heterogenuos instances with configuration
variable "instances" {
description = "The Instances to be deployed"
type = map(object({
name = string
image = string
flavor = string
volume_size = number
tags = list(string)
}))
default = {
"instance-01" = {
name = "Webserver01"
image = "Ubuntu server 24.04 (Noble Numbat)"
flavor = "m1.small"
volume_size = 1
tags = ["general", "webserver"]
},
"instance-02" = {
name = "Webserver02"
image = "Ubuntu server 24.10 (Oracular Oriole)"
flavor = "m1.small"
volume_size = 3
tags = ["general", "webserver"]
},
"instance-03" = {
name = "Webserver03"
image = "Rocky Linux 9.4 (Blue Onyx)"
flavor = "m1.medium"
volume_size = 2
tags = ["general", "webserver"]
}
}
}
variable "private_ssh_key" {
type = string
default = "<PRIVATE_SSH_KEY_LOCAL_PATH>"
description = "Private ssh key path"
}
variable "keypair_name" {
type = string
default = "<OPENSTACK_ID_KEYPAIR>"
description = "Keypair name for instance access"
}
# UUID of external gateway
variable "external_gateway" {
type = string
default = "<OPENSTACK_VLAN_UUID>"
}
variable "external_network" {
type = string
default = "vlanXXXX"
}
variable "dns_ip" {
type = list(string)
default = ["155.210.12.9", "155.210.3.12"]
}
variable "network_http" {
type = map(string)
default = {
net_name = "cluster-network"
subnet_name = "cluster-subnetwork"
cidr = "192.168.1.0/24"
}
}
# Mounting point for data volume
variable "mounting_point_data" {
type = string
default = "/mnt/data-volume"
description = "Mounting point for data volume"
}
Step 3: Security groups file (security_groups.tf)
This security group, we open the port 80 and 22.
# Acces group, open input port 80 and ssh port
resource "openstack_compute_secgroup_v2" "http" {
name = "http"
description = "Open input http port"
rule {
from_port = 80
to_port = 80
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}
resource "openstack_compute_secgroup_v2" "ssh" {
name = "ssh"
description = "Open input ssh port"
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}
resource "openstack_compute_secgroup_v2" "icmp" {
name = "icmp"
description = "Security group for ICMP"
# Allow inbound traffic on port 139 (NetBIOS)
rule {
from_port = -1
to_port = -1
ip_protocol = "icmp"
cidr = "0.0.0.0/0"
}
}
Step 4: Networking file (network.tf)
We create a router, a network and a subnetwork.
# Router creation
resource "openstack_networking_router_v2" "router_cluster" {
name = "cluster-router"
external_network_id = var.external_gateway
}
# Network creation
resource "openstack_networking_network_v2" "router_cluster" {
name = var.network_http["net_name"]
}
#### HTTP SUBNET ####
# Subnet http configuration
resource "openstack_networking_subnet_v2" "http" {
name = var.network_http["subnet_name"]
network_id = openstack_networking_network_v2.router_cluster.id
cidr = var.network_http["cidr"]
dns_nameservers = var.dns_ip
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "http" {
router_id = openstack_networking_router_v2.router_cluster.id
subnet_id = openstack_networking_subnet_v2.http.id
}
Step 5: Main file (main.tf)
We create the instance, attach the network to the instance and a data volume, as well as the floating IP.
# Configure OpenStack Provider
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.53.0"
}
}
}
# Create multiple instances
resource "openstack_compute_instance_v2" "instances" {
for_each = var.instances
name = each.value.name
flavor_name = each.value.flavor
key_pair = var.keypair_name
image_name = each.value.image
network {
port = openstack_networking_port_v2.http[each.key].id
}
tags = each.value.tags
}
# Create network ports
resource "openstack_networking_port_v2" "http" {
for_each = var.instances
name = "port-instance-http-${each.key}"
network_id = openstack_networking_network_v2.router_cluster.id
security_group_ids = [
openstack_compute_secgroup_v2.ssh.id,
openstack_compute_secgroup_v2.http.id,
openstack_compute_secgroup_v2.icmp.id,
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.http.id
}
}
# Create volume
resource "openstack_blockstorage_volume_v3" "data_volume" {
for_each = var.instances
name = "data-volume-${each.key}"
description = "Data volume for demo instance ${each.key}"
size = each.value.volume_size
}
# Attach volume to instance
resource "openstack_compute_volume_attach_v2" "attached" {
for_each = var.instances
instance_id = openstack_compute_instance_v2.instances[each.key].id
volume_id = openstack_blockstorage_volume_v3.data_volume[each.key].id
}
# Create floating ips
resource "openstack_networking_floatingip_v2" "http" {
for_each = var.instances
pool = var.external_network
}
# Attach floating ips to instances
resource "openstack_compute_floatingip_associate_v2" "http" {
for_each = openstack_networking_floatingip_v2.http
instance_id = openstack_compute_instance_v2.instances[each.key].id
floating_ip = each.value.address
}
locals {
script_exec = { for k, v in var.instances : k => strcontains(lower(v.image), "ubuntu") ? "post-deployment-ubuntu.sh" : "post-deployment-rocky.sh" }
}
# Post-deployment actions, format and mount data volume
resource "null_resource" "post_deployment" {
for_each = var.instances
connection {
type = "ssh"
host = openstack_networking_floatingip_v2.http[each.key].address
user = strcontains(lower(each.value.image), "ubuntu") ? "ubuntu" : "rocky"
private_key = file(var.private_ssh_key)
}
# Copies file from local directory to remote directory
provisioner "file" {
source = "${local.script_exec[each.key]}"
destination = "/tmp/${local.script_exec[each.key]}"
}
provisioner "remote-exec" {
inline = [
"chmod +x /tmp/${local.script_exec[each.key]}",
"/tmp/${local.script_exec[each.key]} ${each.value.volume_size} ${var.mounting_point_data}",
]
}
}
# Modified outputs for multiple instances
output "instance_names" {
value = [for k, v in openstack_compute_instance_v2.instances : v.name]
}
output "instance_ips" {
value = [for k, v in openstack_compute_instance_v2.instances : v.access_ip_v4]
}
output "floating_ips" {
value = [for k, v in openstack_networking_floatingip_v2.http : v.address]
}
File post-deployment-ubuntu.sh:
#!/bin/bash
# Look for disks with the specified size
DISK=/dev/$(lsblk | awk -v size=$1G '$4 == size {print $1}')
# Check if the disk is available
if [ -z "$DISK" ]; then
echo "No disk found with size ${1}G"
exit 1
else
echo "Disk found: $DISK"
fi
# Check if the disk is already mounted and unmount it
if mount | grep -q "$DISK"; then
echo "Unmounting $DISK"
sudo umount $DISK
fi
DATA_VOLUME=$2
# Format data volume and mount it
sudo mkfs.ext4 -qF $DISK
sudo mkdir -p $DATA_VOLUME && \
sudo mount $DISK $DATA_VOLUME && \
sudo chown -R $USER:$USER $DATA_VOLUME
# Add the disk to fstab use the UUID of the disk
UUID=$(sudo blkid -s UUID -o value $DISK)
if [ -z "$UUID" ]; then
echo "Failed to get UUID for $DISK"
exit 1
fi
# Check if the disk is already in fstab
if grep -q "$UUID" /etc/fstab; then
echo "Disk $DISK already in fstab"
else
echo "Adding disk $DISK to fstab"
echo "UUID=$UUID $DATA_VOLUME ext4 defaults,nofail 0 2" | sudo tee -a /etc/fstab
fi
# Install packages
sudo apt update && sudo apt install -y apache2
# Open ports in firewall
sudo ufw enable
sudo ufw allow 80/tcp
sudo ufw allow OpenSSH
# Create HTML file and display the external IP
sudo echo "<html><body>" | sudo tee /var/www/html/index.html
sudo echo "External IP: $(curl -s ident.me)<br/>" | sudo tee -a /var/www/html/index.html
sudo echo "hello user! I am this machine:<br/>" | sudo tee -a /var/www/html/index.html
sudo echo "$(lsb_release -a)<br/>" | sudo tee -a /var/www/html/index.html
sudo echo "</body></html>" | sudo tee -a /var/www/html/index.html
File post-deployment-rocky.sh:
#!/bin/bash
# Look for disks with the specified size
DISK=/dev/$(lsblk | awk -v size=$1G '$4 == size {print $1}')
# Check if the disk is available
if [ -z "$DISK" ]; then
echo "No disk found with size ${1}G"
exit 1
else
echo "Disk found: $DISK"
fi
# Check if the disk is already mounted and unmount it
if mount | grep -q "$DISK"; then
echo "Unmounting $DISK"
sudo umount $DISK
fi
DATA_VOLUME=$2
# Format data volume and mount it
sudo mkfs.ext4 -qF $DISK
sudo mkdir -p $DATA_VOLUME && \
sudo mount $DISK $DATA_VOLUME && \
sudo chown -R $USER:$USER $DATA_VOLUME
# Add the disk to fstab use the UUID of the disk
UUID=$(sudo blkid -s UUID -o value $DISK)
if [ -z "$UUID" ]; then
echo "Failed to get UUID for $DISK"
exit 1
fi
# Check if the disk is already in fstab
if grep -q "$UUID" /etc/fstab; then
echo "Disk $DISK already in fstab"
else
echo "Adding disk $DISK to fstab"
echo "UUID=$UUID $DATA_VOLUME ext4 defaults,nofail 0 2" | sudo tee -a /etc/fstab
fi
# Install packages
sudo dnf check-update
sudo dnf install -y httpd firewalld
sudo systemctl start httpd && sudo systemctl enable httpd
sudo systemctl enable --now firewalld
sudo firewall-cmd --permanent --zone=public --add-service=http
sudo firewall-cmd --permanent --zone=public --add-service=ssh
sudo firewall-cmd --reload
# Create HTML file and display the external IP
sudo echo "<html><body>" | sudo tee /var/www/html/index.html
sudo echo "External IP: $(curl -s ident.me)<br/>" | sudo tee -a /var/www/html/index.html
sudo echo "hello user! I am this machine:<br/>" | sudo tee -a /var/www/html/index.html
sudo echo "$(hostnamectl)<br/>" | sudo tee -a /var/www/html/index.html
sudo echo "</body></html>" | sudo tee -a /var/www/html/index.html
Step 6: Launch Terraform orchestrator
Now, you can launch the commands to create the orchestration in colossus cloud:
terraform init
source <file>-rc.sh
terraform fmt -recursive
terraform plan -out plan.out
terraform apply plan.out
You can debug the terraform deployment by setting the OS_DEBUG=1
variable:
OS_DEBUG=1 TF_LOG=DEBUG terraform apply plan.out
Rolling back changes (optional)
You can rollback the deployment and destroy the instance with the command:
terraform destroy
IMPORTANT: This will destroy the instance and you will lose all your data.
For more information check the terraform documentation: https://developer.hashicorp.com/terraform/docs