Terraform Samples
Terraform Infrastructure as Code examples for cloud resource provisioning and management
Key Facts
- Category
- DevOps
- Items
- 3
- Format Families
- sample
Sample Overview
Terraform Infrastructure as Code examples for cloud resource provisioning and management This sample set belongs to DevOps and can be used to test related workflows inside Elysia Tools.
💻 AWS EC2 Instance Deployment hcl
🟢 simple
Basic Terraform configuration for deploying AWS EC2 instances with security groups
🏷️ aws, ec2, infrastructure, terraform
# AWS Provider configuration
terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {
region = var.aws_region
}
# Variables
variable "aws_region" {
description = "AWS region for resources"
type = string
default = "us-east-1"
}
variable "instance_type" {
description = "EC2 instance type"
type = string
default = "t3.micro"
}
variable "ami_id" {
description = "Amazon Machine Image ID"
type = string
default = "ami-0c02fb55956c7d316"
}
# Security group
resource "aws_security_group" "web_sg" {
name = "web-security-group"
description = "Allow HTTP/HTTPS and SSH traffic"
ingress {
description = "HTTP from anywhere"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "HTTPS from anywhere"
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
description = "SSH from anywhere"
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "web-sg"
Env = "production"
}
}
# EC2 Instance
resource "aws_instance" "web_server" {
ami = var.ami_id
instance_type = var.instance_type
vpc_security_group_ids = [aws_security_group.web_sg.id]
user_data = base64encode(<<-EOF
#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
echo "<h1>Hello from Terraform!</h1>" > /var/www/html/index.html
EOF
)
tags = {
Name = "WebServer"
Environment = "Production"
ManagedBy = "Terraform"
}
}
# Output
output "instance_public_ip" {
description = "Public IP address of the EC2 instance"
value = aws_instance.web_server.public_ip
}
output "instance_id" {
description = "ID of the EC2 instance"
value = aws_instance.web_server.id
}
💻 Multi-tier Web Application Architecture hcl
🟡 intermediate
Complete multi-tier architecture with VPC, load balancer, Auto Scaling group, and RDS database
🏷️ aws, vpc, rds, autoscaling, load-balancer, terraform
# Multi-tier Web Application Architecture
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
}
}
provider "aws" {
region = var.aws_region
}
# Variables
variable "aws_region" {
description = "AWS region"
type = string
default = "us-east-1"
}
variable "environment" {
description = "Environment name"
type = string
default = "production"
}
variable "db_password" {
description = "Database password"
type = string
sensitive = true
}
# VPC Configuration
resource "aws_vpc" "main" {
cidr_block = "10.0.0.0/16"
enable_dns_support = true
enable_dns_hostnames = true
tags = {
Name = "main-vpc"
Environment = var.environment
}
}
# Internet Gateway
resource "aws_internet_gateway" "main" {
vpc_id = aws_vpc.main.id
tags = {
Name = "main-igw"
}
}
# Public Subnets
resource "aws_subnet" "public" {
count = 2
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index)
availability_zone = data.aws_availability_zones.available.names[count.index]
map_public_ip_on_launch = true
tags = {
Name = "public-subnet"
}
}
# Private Subnets
resource "aws_subnet" "private" {
count = 2
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(aws_vpc.main.cidr_block, 8, count.index + 10)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = {
Name = "private-subnet"
}
}
# Data source for availability zones
data "aws_availability_zones" "available" {
state = "available"
}
# Route Tables
resource "aws_route_table" "public" {
vpc_id = aws_vpc.main.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.main.id
}
tags = {
Name = "public-rt"
}
}
resource "aws_route_table_association" "public" {
count = length(aws_subnet.public)
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.public.id
}
# Security Groups
resource "aws_security_group" "web_sg" {
name_prefix = "web-sg-"
vpc_id = aws_vpc.main.id
ingress {
description = "HTTP"
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "web-sg"
}
}
# Load Balancer
resource "aws_lb" "web" {
name = "web-lb"
internal = false
load_balancer_type = "application"
security_groups = [aws_security_group.web_sg.id]
subnets = aws_subnet.public[*].id
tags = {
Environment = var.environment
}
}
resource "aws_lb_target_group" "web" {
name = "web-tg"
port = 80
protocol = "HTTP"
vpc_id = aws_vpc.main.id
health_check {
enabled = true
healthy_threshold = 2
interval = 30
matcher = "200"
path = "/"
port = "traffic-port"
protocol = "HTTP"
timeout = 5
unhealthy_threshold = 2
}
}
# RDS Database
resource "aws_db_subnet_group" "main" {
name = "main-db-subnet-group"
subnet_ids = aws_subnet.private[*].id
tags = {
Name = "DB subnet group"
}
}
resource "aws_db_instance" "main" {
identifier = "web-app-db"
engine = "mysql"
engine_version = "8.0"
instance_class = "db.t3.micro"
allocated_storage = 20
max_allocated_storage = 100
storage_type = "gp2"
storage_encrypted = true
db_name = "webapp"
username = "admin"
password = var.db_password
db_subnet_group_name = aws_db_subnet_group.main.name
backup_retention_period = 7
backup_window = "03:00-04:00"
maintenance_window = "sun:04:00-sun:05:00"
skip_final_snapshot = true
tags = {
Environment = var.environment
}
}
# Outputs
output "load_balancer_dns" {
description = "DNS name of the load balancer"
value = aws_lb.web.dns_name
}
output "database_endpoint" {
description = "RDS database endpoint"
value = aws_db_instance.main.endpoint
sensitive = true
}
💻 EKS Kubernetes Cluster Deployment hcl
🔴 complex
Complete EKS Kubernetes cluster with VPC, IAM roles, node groups, and applications
🏷️ kubernetes, eks, aws, containers, orchestration, terraform
# EKS Kubernetes Cluster Deployment
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "~> 2.20"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.10"
}
}
}
provider "aws" {
region = var.aws_region
}
# Variables
variable "aws_region" {
description = "AWS region"
type = string
default = "us-east-1"
}
variable "cluster_name" {
description = "EKS cluster name"
type = string
default = "production-cluster"
}
variable "cluster_version" {
description = "Kubernetes version"
type = string
default = "1.28"
}
variable "node_group_desired_size" {
description = "Desired number of worker nodes"
type = number
default = 3
}
variable "node_group_max_size" {
description = "Maximum number of worker nodes"
type = number
default = 6
}
variable "node_group_min_size" {
description = "Minimum number of worker nodes"
type = number
default = 1
}
# Data sources
data "aws_availability_zones" "available" {
state = "available"
}
# VPC for EKS
resource "aws_vpc" "eks" {
cidr_block = "10.0.0.0/16"
enable_dns_hostnames = true
enable_dns_support = true
tags = {
Name = "eks-vpc"
}
}
# Internet Gateway
resource "aws_internet_gateway" "eks" {
vpc_id = aws_vpc.eks.id
tags = {
Name = "eks-igw"
}
}
# Public Subnets
resource "aws_subnet" "public" {
count = 3
vpc_id = aws_vpc.eks.id
cidr_block = cidrsubnet(aws_vpc.eks.cidr_block, 8, count.index)
availability_zone = data.aws_availability_zones.available.names[count.index]
map_public_ip_on_launch = true
tags = {
Name = "eks-public-subnet"
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
}
# Private Subnets
resource "aws_subnet" "private" {
count = 3
vpc_id = aws_vpc.eks.id
cidr_block = cidrsubnet(aws_vpc.eks.cidr_block, 8, count.index + 10)
availability_zone = data.aws_availability_zones.available.names[count.index]
tags = {
Name = "eks-private-subnet"
"kubernetes.io/cluster/${var.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}
# NAT Gateway
resource "aws_eip" "nat" {
count = 3
vpc = true
tags = {
Name = "eks-nat-eip"
}
depends_on = [aws_internet_gateway.eks]
}
resource "aws_nat_gateway" "eks" {
count = 3
allocation_id = aws_eip.nat[count.index].id
subnet_id = aws_subnet.public[count.index].id
tags = {
Name = "eks-nat-gw"
}
depends_on = [aws_internet_gateway.eks]
}
# Route Tables
resource "aws_route_table" "public" {
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.eks.id
}
tags = {
Name = "eks-public-rt"
}
}
resource "aws_route_table" "private" {
count = 3
vpc_id = aws_vpc.eks.id
route {
cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.eks[count.index].id
}
tags = {
Name = "eks-private-rt"
}
}
resource "aws_route_table_association" "public" {
count = 3
subnet_id = aws_subnet.public[count.index].id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "private" {
count = 3
subnet_id = aws_subnet.private[count.index].id
route_table_id = aws_route_table.private[count.index].id
}
# IAM Role for EKS Cluster
resource "aws_iam_role" "eks_cluster_role" {
name = "eks-cluster-role"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "eks.amazonaws.com"
}
}
]
})
tags = {
Name = "eks-cluster-role"
}
}
resource "aws_iam_role_policy_attachment" "eks_cluster_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.eks_cluster_role.name
}
# EKS Cluster
resource "aws_eks_cluster" "main" {
name = var.cluster_name
role_arn = aws_iam_role.eks_cluster_role.arn
version = var.cluster_version
vpc_config {
subnet_ids = concat(aws_subnet.public[*].id, aws_subnet.private[*].id)
}
depends_on = [
aws_iam_role_policy_attachment.eks_cluster_policy
]
tags = {
Name = var.cluster_name
}
}
# IAM Role for EKS Node Group
resource "aws_iam_role" "eks_node_role" {
name = "eks-node-role"
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Principal = {
Service = "ec2.amazonaws.com"
}
}
]
})
tags = {
Name = "eks-node-role"
}
}
resource "aws_iam_role_policy_attachment" "eks_worker_node_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.eks_node_role.name
}
resource "aws_iam_role_policy_attachment" "eks_cni_policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.eks_node_role.name
}
resource "aws_iam_role_policy_attachment" "ec2_container_registry_readonly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.eks_node_role.name
}
# EKS Node Group
resource "aws_eks_node_group" "main" {
cluster_name = aws_eks_cluster.main.name
node_group_name = "main-node-group"
node_role_arn = aws_iam_role.eks_node_role.arn
subnet_ids = aws_subnet.private[*].id
scaling_config {
desired_size = var.node_group_desired_size
max_size = var.node_group_max_size
min_size = var.node_group_min_size
}
instance_types = ["t3.medium"]
depends_on = [
aws_iam_role_policy_attachment.eks_worker_node_policy,
aws_iam_role_policy_attachment.eks_cni_policy,
aws_iam_role_policy_attachment.ec2_container_registry_readonly
]
tags = {
Name = "main-node-group"
}
}
# Kubernetes Provider
provider "kubernetes" {
host = aws_eks_cluster.main.endpoint
cluster_ca_certificate = base64decode(aws_eks_cluster.main.certificate_authority[0].data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", aws_eks_cluster.main.name]
}
}
# Deploy sample application
resource "kubernetes_namespace" "app" {
metadata {
name = "sample-app"
}
}
resource "kubernetes_deployment" "sample_app" {
metadata {
name = "sample-app"
namespace = kubernetes_namespace.app.metadata[0].name
}
spec {
replicas = 3
selector {
match_labels = {
app = "sample-app"
}
}
template {
metadata {
labels = {
app = "sample-app"
}
}
spec {
container {
image = "nginx:1.21"
name = "nginx"
port {
container_port = 80
}
resources {
limits = {
cpu = "200m"
memory = "256Mi"
}
requests = {
cpu = "100m"
memory = "128Mi"
}
}
}
}
}
}
}
# Outputs
output "cluster_endpoint" {
description = "Endpoint for EKS control plane"
value = aws_eks_cluster.main.endpoint
}
output "cluster_name" {
description = "Kubernetes Cluster Name"
value = aws_eks_cluster.main.name
}
output "configure_kubectl" {
description = "Configure kubectl command"
value = "aws eks update-kubeconfig --region us-east-1 --name production-cluster"
}