Terraform example

It is possible to replicate the OpenStack client example created with the OpenStack YML file using Terraform orchestrator, create a folder containing the following files:

Terraform runs 3 base instances with apache2 and SSH. The orchestrator creates an instances with apache2, the main file shows the external IP.

Step 1: Create the variables provider file (provider.tf)

provider "openstack" {
    user_name     = "<OPENSTACK_USERNAME>"
    tenant_name   = "<OPENSTACK_USERNAME>"
    password      = "<OPENSTACK_PASSWORD>"
    auth_url      = "https://colossus.cesar.unizar.es:5000/v3"
}

Step 2: Create the variables file (variables.tf)

# Params file for variables

# Number of instances to deploy
variable "num_instances" {
  type    = number
  default = 3
}

# Local SSH private key
variable "private_ssh_key" {
  type        = string
  default     = "<PRIVATE_SSH_KEY_LOCAL_PATH>"
  description = "Private ssh key path"
}

# Keypair associated with the local SSH private key
variable "keypair_name" {
  type        = string
  default     = "<OPENSTACK_ID_KEYPAIR>"
  description = "Keypair name for instance access"
}

# UUID of external gateway
variable "external_gateway" {
  type    = string
  default = "<OPENSTACK_VLAN_UUID>"
}

variable "external_network" {
  type    = string
  default = "vlanXXXX"
}

variable "dns_ip" {
  type    = list(string)
  default = ["155.210.12.9", "155.210.3.12"]
}

variable "network_http" {
  type = map(string)
  default = {
    net_name    = "test-network"
    subnet_name = "test-subnetwork"
    cidr        = "192.168.0.0/24"
  }
}

# Mounting point for data volume
variable "mounting_point_data" {
  type        = string
  default     = "/mnt/data-volume"
  description = "Mounting point for data volume"
}

variable "volume_size" {
  type        = number
  default     = 2
  description = "Size of the volume in GB"
}

Step 3: Security groups file (security_groups.tf)

This security group, we open the port 80 and 22.

resource "openstack_compute_secgroup_v2" "http" {
  name        = "http"
  description = "Open input http port"
  rule {
    from_port   = 80
    to_port     = 80
    ip_protocol = "tcp"
    cidr        = "0.0.0.0/0"
  }
}

resource "openstack_compute_secgroup_v2" "ssh" {
  name        = "ssh"
  description = "Open input ssh port"
  rule {
    from_port   = 22
    to_port     = 22
    ip_protocol = "tcp"
    cidr        = "0.0.0.0/0"
  }
}

resource "openstack_compute_secgroup_v2" "icmp" {
  name        = "icmp"
  description = "Security group for ICMP"
  rule {
    from_port   = -1
    to_port     = -1
    ip_protocol = "icmp"
    cidr        = "0.0.0.0/0"
  }
}

Step 4: Networking file (network.tf)

We create a router, a network and a subnetwork.

# Router creation
resource "openstack_networking_router_v2" "router_test" {
  name                = "test-router"
  external_network_id = var.external_gateway
}

# Network creation
resource "openstack_networking_network_v2" "router_test" {
  name = var.network_http["net_name"]
}

#### HTTP SUBNET ####

# Subnet http configuration
resource "openstack_networking_subnet_v2" "http" {
  name            = var.network_http["subnet_name"]
  network_id      = openstack_networking_network_v2.router_test.id
  cidr            = var.network_http["cidr"]
  dns_nameservers = var.dns_ip
}

# Router interface configuration
resource "openstack_networking_router_interface_v2" "http" {
  router_id = openstack_networking_router_v2.router_test.id
  subnet_id = openstack_networking_subnet_v2.http.id
}

Step 5: Main file (main.tf)

We create the instance, attach the network to the instance and a data volume, as well as the floating IP.

# Configure OpenStack Provider
terraform {
required_version = ">= 0.14.0"
  required_providers {
    openstack = {
      source  = "terraform-provider-openstack/openstack"
      version = "~> 1.53.0"
    }
  }
}

# Create instance
resource "openstack_compute_instance_v2" "demo_instance" {
  count       = var.num_instances
  name        = "demo-instance-${count.index + 1}"
  image_name  = "Ubuntu server 24.04 (Noble Numbat)"
  flavor_name = "m1.small"
  key_pair    = var.keypair_name
  network {
    port = openstack_networking_port_v2.http[count.index].id
  }
}

# Create network port
resource "openstack_networking_port_v2" "http" {
  count      = var.num_instances
  name       = "port-instance-http-${count.index + 1}"
  network_id = openstack_networking_network_v2.router_test.id
  security_group_ids = [
    openstack_compute_secgroup_v2.ssh.id,
    openstack_compute_secgroup_v2.http.id,
    openstack_compute_secgroup_v2.icmp.id,
  ]
  fixed_ip {
    subnet_id = openstack_networking_subnet_v2.http.id
  }
}

# Create floating ip
resource "openstack_networking_floatingip_v2" "http" {
  count = var.num_instances
  pool  = var.external_network
}

# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "http" {
  count       = var.num_instances
  floating_ip = openstack_networking_floatingip_v2.http[count.index].address
  instance_id = openstack_compute_instance_v2.demo_instance[count.index].id
}

# Create volume
resource "openstack_blockstorage_volume_v3" "data_volume" {
  count       = var.num_instances
  name        = "data-volume-${count.index + 1}"
  description = "Data volume for demo instance ${count.index + 1}"
  size        = var.volume_size
}

# Attach volume to instance
resource "openstack_compute_volume_attach_v2" "attached" {
  count       = var.num_instances
  instance_id = openstack_compute_instance_v2.demo_instance[count.index].id
  volume_id   = openstack_blockstorage_volume_v3.data_volume[count.index].id
}

# Post-deployment actions, format and mount data volume
resource "null_resource" "post_deployment" {
  count = var.num_instances
  connection {
    type        = "ssh"
    host        = openstack_networking_floatingip_v2.http.address
    user        = "ubuntu"
    private_key = file(var.private_ssh_key)
  }

  # Copies file from local directory to remote directory
  provisioner "file" {
    source      = "post-deployment.sh"
    destination = "/tmp/post-deployment.sh"
  }

  provisioner "remote-exec" {
    inline = [
      "chmod +x /tmp/post-deployment.sh",
      "/tmp/post-deployment.sh ${var.volume_size} ${var.mounting_point_data}",
    ]
  }
}

# Add volume output
output "instance_names" {
  value = openstack_compute_instance_v2.demo_instance[*].name
}

output "instance_ips" {
  value = openstack_compute_instance_v2.demo_instance[*].access_ip_v4
}

output "floating_ips" {
  value = openstack_networking_floatingip_v2.http[*].address
}

output "volume_ids" {
  value = openstack_blockstorage_volume_v3.data_volume[*].id
}

File post-deployment.sh:

#!/bin/bash

# Look for disks with the specified size
DISK=/dev/$(lsblk | awk -v size=$1G '$4 == size {print $1}')

# Check if the disk is available
if [ -z "$DISK" ]; then
  echo "No disk found with size ${1}G"
  exit 1
else
  echo "Disk found: $DISK"
fi

# Check if the disk is already mounted and unmount it
if mount | grep -q "$DISK"; then
  echo "Unmounting $DISK"
  sudo umount $DISK
fi

DATA_VOLUME=$2

# Format data volume and mount it
sudo mkfs.ext4 -qF $DISK
sudo mkdir -p $DATA_VOLUME && \
sudo mount $DISK $DATA_VOLUME && \
sudo chown -R $USER:$USER $DATA_VOLUME

# Add the disk to fstab use the UUID of the disk
UUID=$(sudo blkid -s UUID -o value $DISK)
if [ -z "$UUID" ]; then
  echo "Failed to get UUID for $DISK"
  exit 1
fi

# Check if the disk is already in fstab
if grep -q "$UUID" /etc/fstab; then
  echo "Disk $DISK already in fstab"
else
  echo "Adding disk $DISK to fstab"
  echo "UUID=$UUID $DATA_VOLUME ext4 defaults,nofail 0 2" | sudo tee -a /etc/fstab
fi

# Install packages
sudo apt update && sudo apt install -y apache2

# Create HTML file and display the external IP
sudo echo "<html><body>External IP: $(curl -s ident.me)</body></html>" | sudo tee /var/www/html/index.html

# Open ports in firewall
sudo ufw enable
sudo ufw allow 80/tcp
sudo ufw allow OpenSSH

Step 6: Launch Terraform orchestrator

Now, you can launch the commands to create the orchestration in colossus cloud:

terraform init
source <file>-rc.sh
terraform fmt -recursive
terraform plan -out plan.out
terraform apply plan.out

You can debug the terraform deployment by setting the OS_DEBUG=1 variable:

OS_DEBUG=1 TF_LOG=DEBUG terraform apply plan.out

Rolling back changes (optional)

You can rollback the deployment and destroy the instance with the command:

terraform destroy

IMPORTANT: This will destroy the instance and you will lose all your data.

For more information check the terraform documentation: https://developer.hashicorp.com/terraform/docs