Terraform example

It is possible to replicate the OpenStack client example created with the OpenStack YML file using Terraform orchestrator, create a folder containing the following files.

The orchestrator creates a base instance with apache2 and SSH. The orchestrator creates an instances with apache2, the main file shows the external IP.

Step 1: Create the variables provider file (provider.tf)

provider "openstack" {
    user_name     = "<OPENSTACK_USERNAME>"
    tenant_name   = "<OPENSTACK_USERNAME>"
    password      = "<OPENSTACK_PASSWORD>"
    auth_url      = "https://colossus.cesar.unizar.es:5000/v3"
}

Step 2: Create the variables file (variables.tf)

# Params file for variables

# Local SSH private key
variable "private_ssh_key" {
  type        = string
  default     = "<PRIVATE_SSH_KEY_LOCAL_PATH>"
  description = "Private ssh key path"
}

# Keypair associated with the local SSH private key
variable "keypair_name" {
  type        = string
  default     = "<OPENSTACK_ID_KEYPAIR>"
  description = "Keypair name for instance access"
}

# UUID of external gateway
variable "external_gateway" {
  type    = string
  default = "<OPENSTACK_VLAN_UUID>"
}

variable "external_network" {
  type    = string
  default = "vlanXXXX"
}

variable "dns_ip" {
  type    = list(string)
  default = ["155.210.12.9", "155.210.3.12"]
}

variable "network_http" {
  type = map(string)
  default = {
    net_name    = "test-network"
    subnet_name = "test-subnetwork"
    cidr        = "192.168.0.0/24"
  }
}

# Mounting point for data volume
variable "mounting_point_data" {
  type        = string
  default     = "/mnt/data-volume"
  description = "Mounting point for data volume"
}

variable "volume_size" {
  type        = number
  default     = 2
  description = "Size of the volume in GB"
}

Step 3: Security groups file (security_groups.tf)

This security group, we open the port 80, 22 and 8888.

resource "openstack_compute_secgroup_v2" "http" {
  name        = "http"
  description = "Open input http port"
  rule {
    from_port   = 80
    to_port     = 80
    ip_protocol = "tcp"
    cidr        = "0.0.0.0/0"
  }
}

resource "openstack_compute_secgroup_v2" "ssh" {
  name        = "ssh"
  description = "Open input ssh port"
  rule {
    from_port   = 22
    to_port     = 22
    ip_protocol = "tcp"
    cidr        = "0.0.0.0/0"
  }
}

resource "openstack_compute_secgroup_v2" "jupyter" {
  name        = "jupyter"
  description = "Open input jupyter port"
  rule {
    from_port   = 8888
    to_port     = 8888
    ip_protocol = "tcp"
    cidr        = "0.0.0.0/0"
  }
}

Step 4: Networking file (network.tf)

We create a router, a network and a subnetwork.

# Router creation
resource "openstack_networking_router_v2" "router_test" {
  name                = "test-router"
  external_network_id = var.external_gateway
}

# Network creation
resource "openstack_networking_network_v2" "router_test" {
  name = var.network_http["net_name"]
}

#### HTTP SUBNET ####

# Subnet http configuration
resource "openstack_networking_subnet_v2" "http" {
  name            = var.network_http["subnet_name"]
  network_id      = openstack_networking_network_v2.router_test.id
  cidr            = var.network_http["cidr"]
  dns_nameservers = var.dns_ip
}

# Router interface configuration
resource "openstack_networking_router_interface_v2" "http" {
  router_id = openstack_networking_router_v2.router_test.id
  subnet_id = openstack_networking_subnet_v2.http.id
}

Step 5: Main file (main.tf)

We create the instance, attach the network to the instance and a data volume of 2GB, as well as the floating IP.

# Configure OpenStack Provider
terraform {
required_version = ">= 0.14.0"
  required_providers {
    openstack = {
      source  = "terraform-provider-openstack/openstack"
      version = "~> 1.53.0"
    }
  }
}

# Create instance
resource "openstack_compute_instance_v2" "demo_instance" {
  name            = "demo-instance"
  image_name      = "Ubuntu server 24.04 (Noble Numbat)"
  flavor_name     = "m1.small"
  key_pair        = var.keypair_name
  network {
    port = openstack_networking_port_v2.http.id
  }
}

# Create network port
resource "openstack_networking_port_v2" "http" {
  name           = "port-instance-http"
  network_id     = openstack_networking_network_v2.router_test.id
  security_group_ids = [
    openstack_compute_secgroup_v2.ssh.id,
    openstack_compute_secgroup_v2.http.id,
    openstack_compute_secgroup_v2.jupyter.id,
  ]
  fixed_ip {
    subnet_id = openstack_networking_subnet_v2.http.id
  }
}

# Create floating ip
resource "openstack_networking_floatingip_v2" "http" {
  pool = var.external_network
}

# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "http" {
  floating_ip = openstack_networking_floatingip_v2.http.address
  instance_id = openstack_compute_instance_v2.demo_instance.id
}

# Create volume
resource "openstack_blockstorage_volume_v3" "data_volume" {
  name        = "data-volume"
  description = "Data volume for demo instance"
  size        = var.volume_size  # Size in GB
}

# Attach volume to instance
resource "openstack_compute_volume_attach_v2" "attached" {
  instance_id = openstack_compute_instance_v2.demo_instance.id
  volume_id   = openstack_blockstorage_volume_v3.data_volume.id
}

# Post-deployment actions, format and mount data volume (optional)
# for manually post-deployment see step 7
resource "null_resource" "post_deployment" {
  connection {
    type        = "ssh"
    host        = openstack_networking_floatingip_v2.http.address
    user        = "ubuntu"
    private_key = file(var.private_ssh_key)
  }

  provisioner "remote-exec" {
    inline = [
      "DISK=\"/dev/$(lsblk | grep ${var.volume_size}G | awk '{print $1}')\"",
      "sudo mkfs.ext4 -qF $DISK",
      "sudo mkdir -p ${var.mounting_point_data}",
      "sudo mount $DISK ${var.mounting_point_data}",
      "sudo chown -R $USER:$USER ${var.mounting_point_data}",
      "UUID=$(sudo blkid -s UUID -o value $DISK)",
      "echo \"UUID=$UUID ${var.mounting_point_data} ext4 defaults,nofail 0 2\" | sudo tee -a /etc/fstab",
      "sudo apt update && sudo apt install -y apache2",
      "sudo echo \"<html><body>External IP: $(curl -s ident.me)</body></html>\" | sudo tee /var/www/html/index.html"
    ]
  }
}

# Add volume output
output "volume_id" {
  value = openstack_blockstorage_volume_v3.data_volume.id
}

# Output instance details
output "instance_name" {
  value = openstack_compute_instance_v2.demo_instance.name
}

output "instance_ip" {
  value = openstack_compute_instance_v2.demo_instance.access_ip_v4
}

output "floating_ip" {
  value = openstack_networking_floatingip_v2.http.address
}

Step 6: Launch Terraform orchestrator

Now, you can launch the commands to create the orchestration in colossus cloud:

terraform init
source <file>-rc.sh
terraform fmt -recursive
terraform plan -out plan.out
terraform apply plan.out

You can debug the terraform deployment by setting the OS_DEBUG=1 variable:

OS_DEBUG=1 TF_LOG=DEBUG terraform apply plan.out

Step 7: Create volume in image and mount external volume (optional)

We can mount and give a filesystem format to the atached volume, in this case we have it on the /dev/vdb device:

ssh -i ~/.ssh/id_key ubuntu@FLOATING_IP
sudo lsblk
sudo mkfs.ext4 -qF /dev/vdb
sudo mkdir -p /mnt/data-jupyter
sudo mount /dev/vdb /mnt/data-jupyter/
sudo chown -R ubuntu:ubuntu /mnt/data-jupyter
sudo UUID=$(sudo blkid -s UUID -o value /dev/vdb)
echo "UUID=$UUID /mnt/data-jupyter ext4 defaults,nofail 0 2" | sudo tee -a /etc/fstab

Step 8: Install jupyter notebook

As a normal ubuntu installation, we can use apt-get to install jupyter notebook, we also make use of venv (virtual environment):

sudo apt update
sudo apt install python3-venv
python3 -m venv jupyter-env
source jupyter-env/bin/activate
pip install notebook
jupyter notebook --no-browser --ip=0.0.0.0 --port=8888
deactivate

Rolling back changes (optional)

You can rollback the deployment and destroy the instance with the command:

terraform destroy

IMPORTANT: This will destroy the instance and you will lose all your data.

For more information check the terraform documentation: https://developer.hashicorp.com/terraform/docs