Terraform CDK Samples

Infrastructure as Code examples using Terraform Cloud Development Kit with TypeScript and Python

Key Facts

Category
Infrastructure as Code
Items
4
Format Families
sample

Sample Overview

Infrastructure as Code examples using Terraform Cloud Development Kit with TypeScript and Python This sample set belongs to Infrastructure as Code and can be used to test related workflows inside Elysia Tools.

💻 CDKTF TypeScript Setup and Basic Usage typescript

🟢 simple ⭐⭐⭐

Setting up Terraform CDK with TypeScript and creating basic AWS resources

⏱️ 20 min 🏷️ cdktf, typescript, aws, iac
Prerequisites: Terraform knowledge, TypeScript, AWS account
// CDKTF TypeScript Setup and Basic Usage
import { Construct } from 'constructs';
import { App, TerraformStack, RemoteBackend, TerraformOutput } from 'cdktf';
import { AwsProvider } from '@cdktf/provider-aws/lib/provider';
import { S3Bucket } from '@cdktf/provider-aws/lib/s3-bucket';
import { Vpc } from '@cdktf/provider-aws/lib/vpc';
import { Subnet } from '@cdktf/provider-aws/lib/subnet';
import { Instance } from '@cdktf/provider-aws/lib/instance';
import { SecurityGroup } from '@cdktf/provider-aws/lib/security-group';

class MyTerraformStack extends TerraformStack {
  constructor(scope: Construct, id: string) {
    super(scope, id);

    // Configure AWS provider
    new AwsProvider(this, 'aws', {
      region: 'us-west-2',
      defaultTags: {
        tags: {
          Environment: 'development',
          Project: 'cdktf-demo',
          ManagedBy: 'cdktf'
        }
      }
    });

    // Create S3 bucket
    const bucket = new S3Bucket(this, 'my-app-bucket', {
      bucket: `my-unique-app-bucket-${Math.floor(Math.random() * 1000000)}`,
      acl: 'private',
      versioning: {
        enabled: true
      },
      tags: {
        Name: 'my-app-bucket',
        Purpose: 'application-storage'
      }
    });

    // Create VPC
    const vpc = new Vpc(this, 'my-vpc', {
      cidrBlock: '10.0.0.0/16',
      enableDnsHostnames: true,
      enableDnsSupport: true,
      tags: {
        Name: 'my-app-vpc'
      }
    });

    // Create public subnets
    const publicSubnet1 = new Subnet(this, 'public-subnet-1', {
      vpcId: vpc.id,
      cidrBlock: '10.0.1.0/24',
      availabilityZone: 'us-west-2a',
      mapPublicIpOnLaunch: true,
      tags: {
        Name: 'public-subnet-1',
        Type: 'public'
      }
    });

    const publicSubnet2 = new Subnet(this, 'public-subnet-2', {
      vpcId: vpc.id,
      cidrBlock: '10.0.2.0/24',
      availabilityZone: 'us-west-2b',
      mapPublicIpOnLaunch: true,
      tags: {
        Name: 'public-subnet-2',
        Type: 'public'
      }
    });

    // Create security group
    const securityGroup = new SecurityGroup(this, 'app-sg', {
      name: 'app-security-group',
      vpcId: vpc.id,
      ingress: [
        {
          description: 'Allow SSH',
          fromPort: 22,
          toPort: 22,
          protocol: 'tcp',
          cidrBlocks: ['0.0.0.0/0']
        },
        {
          description: 'Allow HTTP',
          fromPort: 80,
          toPort: 80,
          protocol: 'tcp',
          cidrBlocks: ['0.0.0.0/0']
        }
      ],
      egress: [
        {
          fromPort: 0,
          toPort: 0,
          protocol: '-1',
          cidrBlocks: ['0.0.0.0/0']
        }
      ],
      tags: {
        Name: 'app-security-group'
      }
    });

    // Create EC2 instance
    const instance = new Instance(this, 'app-instance', {
      ami: 'ami-0c55b159cbfafe1f0', // Amazon Linux 2
      instanceType: 't3.micro',
      subnetId: publicSubnet1.id,
      vpcSecurityGroupIds: [securityGroup.id],
      associatePublicIpAddress: true,
      tags: {
        Name: 'app-instance'
      },
      userData: `#!/bin/bash
yum update -y
yum install -y httpd
systemctl start httpd
systemctl enable httpd
echo "<h1>Hello from CDKTF!</h1>" > /var/www/html/index.html
`
    });

    // Output values
    new TerraformOutput(this, 'bucket_name', {
      value: bucket.id,
      description: 'The name of the S3 bucket'
    });

    new TerraformOutput(this, 'vpc_id', {
      value: vpc.id,
      description: 'The ID of the VPC'
    });

    new TerraformOutput(this, 'instance_public_ip', {
      value: instance.publicIp,
      description: 'The public IP of the EC2 instance'
    });
  }
}

// Initialize the app
const app = new App();

// Create the stack
new MyTerraformStack(app, 'cdktf-demo-stack');

// Configure remote backend (optional)
app.outdir = 'cdktf.out';

// Synthesize Terraform configuration
app.synth();

💻 CDKTF with Python for Azure Resources python

🟡 intermediate ⭐⭐⭐⭐

Using Terraform CDK with Python to create Azure resources

⏱️ 25 min 🏷️ cdktf, python, azure, iac
Prerequisites: Python, Terraform CDK, Azure subscription
# CDKTF Python for Azure Resources
from cdktf import App, TerraformStack, TerraformOutput
from cdktf_cdktf_provider_azure import provider as azurerm_provider
from cdktf_cdktf_provider_azure import resource_group, storage_account, virtual_network, subnet, network_interface, linux_virtual_machine

class AzureWebAppStack(TerraformStack):
    def __init__(self, scope, id):
        super().__init__(scope, id)

        # Configure Azure Provider
        azurerm = azurerm_provider.AzurermProvider(self, "azure",
            features={}
        )

        # Resource Group
        resource_group = resource_group.ResourceGroup(self, "app-rg",
            name="cdktf-webapp-rg",
            location="East US",
            tags={
                "Environment": "development",
                "Project": "cdktf-demo"
            }
        )

        # Virtual Network
        virtual_network = virtual_network.VirtualNetwork(self, "app-vnet",
            name="cdktf-vnet",
            address_space=["10.0.0.0/16"],
            location=resource_group.location,
            resource_group_name=resource_group.name,
            tags={
                "Name": "cdktf-vnet"
            }
        )

        # Subnet
        subnet = subnet.Subnet(self, "app-subnet",
            name="cdktf-subnet",
            resource_group_name=resource_group.name,
            virtual_network_name=virtual_network.name,
            address_prefixes=["10.0.1.0/24"],
            depends_on=[virtual_network]
        )

        # Public IP
        from cdktf_cdktf_provider_azure import public_ip
        public_ip = public_ip.PublicIp(self, "app-pip",
            name="cdktf-pip",
            location=resource_group.location,
            resource_group_name=resource_group.name,
            allocation_method="Dynamic",
            tags={
                "Name": "cdktf-pip"
            }
        )

        # Network Security Group
        from cdktf_cdktf_provider_azure import network_security_group
        nsg = network_security_group.NetworkSecurityGroup(self, "app-nsg",
            name="cdktf-nsg",
            location=resource_group.location,
            resource_group_name=resource_group.name,
            security_rule=[
                {
                    "name": "SSH",
                    "priority": 1001,
                    "direction": "Inbound",
                    "access": "Allow",
                    "protocol": "Tcp",
                    "source_port_range": "*",
                    "destination_port_range": "22",
                    "source_address_prefix": "*",
                    "destination_address_prefix": "*"
                },
                {
                    "name": "HTTP",
                    "priority": 1002,
                    "direction": "Inbound",
                    "access": "Allow",
                    "protocol": "Tcp",
                    "source_port_range": "*",
                    "destination_port_range": "80",
                    "source_address_prefix": "*",
                    "destination_address_prefix": "*"
                }
            ],
            tags={
                "Name": "cdktf-nsg"
            }
        )

        # Network Interface
        network_interface = network_interface.NetworkInterface(self, "app-nic",
            name="cdktf-nic",
            location=resource_group.location,
            resource_group_name=resource_group.name,
            ip_configuration=[{
                "name": "internal",
                "subnet_id": subnet.id,
                "private_ip_address_allocation": "Dynamic",
                "public_ip_address_id": public_ip.id
            }],
            depends_on=[subnet, public_ip]
        )

        # Associate NSG with NIC
        from cdktf_cdktf_provider_azure import network_interface_security_group_association
        nsg_assoc = network_interface_security_group_association.NetworkInterfaceSecurityGroupAssociation(
            self, "app-nsg-assoc",
            network_interface_id=network_interface.id,
            network_security_group_id=nsg.id
        )

        # Storage Account
        storage_account = storage_account.StorageAccount(self, "app-storage",
            name=f"cdktfstorage{self.generate_random_string(8)}",
            resource_group_name=resource_group.name,
            location=resource_group.location,
            account_tier="Standard",
            account_replication_type="LRS",
            tags={
                "Name": "cdktf-storage"
            }
        )

        # Linux Virtual Machine
        linux_vm = linux_virtual_machine.LinuxVirtualMachine(self, "app-vm",
            name="cdktf-linux-vm",
            location=resource_group.location,
            resource_group_name=resource_group.name,
            network_interface_ids=[network_interface.id],
            size="Standard_B1s",
            admin_username="adminuser",
            admin_ssh_key={
                "username": "adminuser",
                "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC..."  # Replace with your public key
            },
            os_disk={
                "caching": "ReadWrite",
                "storage_account_type": "Standard_LRS"
            },
            source_image_reference={
                "publisher": "Canonical",
                "offer": "UbuntuServer",
                "sku": "18.04-LTS",
                "version": "latest"
            },
            custom_data=self.get_user_data(),
            tags={
                "Name": "cdktf-linux-vm"
            },
            depends_on=[network_interface]
        )

        # Outputs
        TerraformOutput(self, "resource_group_name",
            value=resource_group.name
        )

        TerraformOutput(self, "vm_public_ip",
            value=public_ip.ip_address
        )

        TerraformOutput(self, "storage_account_name",
            value=storage_account.name
        )

    def generate_random_string(self, length):
        import random
        import string
        return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length))

    def get_user_data(self):
        return '''#!/bin/bash
apt-get update
apt-get install -y apache2
systemctl start apache2
systemctl enable apache2
echo "<h1>Hello from CDKTF on Azure!</h1>" > /var/www/html/index.html
'''

# Initialize the app
app = App(outdir="cdktf.out")

# Create the stack
AzureWebAppStack(app, "azure-webapp-stack")

# Synthesize Terraform configuration
app.synth()

💻 CDKTF Component Pattern typescript

🟡 intermediate ⭐⭐⭐⭐

Creating reusable components with Terraform CDK

⏱️ 30 min 🏷️ cdktf, components, typescript, iac, architecture
Prerequisites: Advanced CDKTF knowledge, TypeScript, AWS
// CDKTF Component Pattern - Reusable Infrastructure Components
import { Construct } from 'constructs';
import { TerraformStack, TerraformOutput } from 'cdktf';
import { AwsProvider } from '@cdktf/provider-aws/lib/provider';
import { S3Bucket } from '@cdktf/provider-aws/lib/s3-bucket';
import { IamRole, IamRolePolicyAttachment } from '@cdktf/provider-aws/lib/iam';
import { DataAwsIamPolicyDocument } from '@cdktf/provider-aws/lib/data-aws-iam-policy-document';
import { LambdaFunction } from '@cdktf/provider-aws/lib/lambda';
import { ApiGatewayRestApi, ApiGatewayResource, ApiGatewayMethod, ApiGatewayIntegration } from '@cdktf/provider-aws/lib/api-gateway';
import { RdsInstance } from '@cdktf/provider-aws/lib/rds-instance';
import { RdsSubnetGroup } from '@cdktf/provider-aws/lib/rds-subnet-group';
import { SecurityGroup } from '@cdktf/provider-aws/lib/security-group';

// Reusable Web Application Component
class WebAppComponent extends Construct {
  public readonly bucket: S3Bucket;
  public readonly apiEndpoint: string;

  constructor(scope: Construct, id: string, props: {
    appName: string;
    environment: string;
    runtime?: string;
    handler?: string;
  }) {
    super(scope, id);

    // Create S3 bucket for static assets
    this.bucket = new S3Bucket(this, `${id}-bucket`, {
      bucket: `${props.appName}-${props.environment}-assets-${Math.floor(Math.random() * 1000000)}`,
      acl: 'private',
      versioning: {
        enabled: true
      },
      website: {
        indexDocument: 'index.html',
        errorDocument: 'error.html'
      },
      tags: {
        Name: `${props.appName}-${props.environment}-assets`,
        Environment: props.environment
      }
    });

    // Create IAM role for Lambda
    const assumeRolePolicy = new DataAwsIamPolicyDocument(this, 'lambda-assume-role', {
      statement: [{
        effect: 'Allow',
        principals: [{
          type: 'Service',
          identifiers: ['lambda.amazonaws.com']
        }],
        actions: ['sts:AssumeRole']
      }]
    });

    const lambdaRole = new IamRole(this, 'lambda-role', {
      name: `${props.appName}-${props.environment}-lambda-role`,
      assumeRolePolicy: assumeRolePolicy.json,
      tags: {
        Name: `${props.appName}-${props.environment}-lambda-role`,
        Environment: props.environment
      }
    });

    // Attach basic Lambda execution policy
    new IamRolePolicyAttachment(this, 'lambda-basic-execution', {
      policyArn: 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
      role: lambdaRole.name
    });

    // Create Lambda function
    const lambdaFunction = new LambdaFunction(this, 'api-function', {
      functionName: `${props.appName}-${props.environment}-api`,
      runtime: props.runtime || 'nodejs18.x',
      handler: props.handler || 'index.handler',
      role: lambdaRole.arn,
      s3Bucket: this.bucket.id,
      s3Key: 'api/lambda.zip',
      tags: {
        Name: `${props.appName}-${props.environment}-api`,
        Environment: props.environment
      }
    });

    // Create API Gateway
    const api = new ApiGatewayRestApi(this, 'api-gateway', {
      name: `${props.appName}-${props.environment}-api`,
      description: `API Gateway for ${props.appName} in ${props.environment}`
    });

    // Create API resource
    const apiResource = new ApiGatewayResource(this, 'api-resource', {
      restApiId: api.id,
      parentId: api.rootResourceId,
      pathPart: '{proxy+}'
    });

    // Create API method
    const apiMethod = new ApiGatewayMethod(this, 'api-method', {
      restApiId: api.id,
      resourceId: apiResource.id,
      httpMethod: 'ANY',
      authorization: 'NONE'
    });

    // Create API integration
    new ApiGatewayIntegration(this, 'api-integration', {
      restApiId: api.id,
      resourceId: apiResource.id,
      httpMethod: apiMethod.httpMethod,
      integrationHttpMethod: 'POST',
      type: 'AWS_PROXY',
      uri: lambdaFunction.invokeArn
    });

    // Grant API Gateway permission to invoke Lambda
    new IamRolePolicyAttachment(this, 'lambda-api-gateway-permission', {
      policyArn: 'arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole',
      role: lambdaRole.name
    });

    this.apiEndpoint = api.deploymentStageInvokeUrl;

    // Output
    new TerraformOutput(this, 'bucket_name', {
      value: this.bucket.id,
      description: 'S3 bucket name for static assets'
    });

    new TerraformOutput(this, 'api_endpoint', {
      value: this.apiEndpoint,
      description: 'API Gateway endpoint URL'
    });
  }
}

// Database Component
class DatabaseComponent extends Construct {
  public readonly endpoint: string;

  constructor(scope: Construct, id: string, props: {
    appName: string;
    environment: string;
    instanceClass?: string;
    allocatedStorage?: number;
  }) {
    super(scope, id);

    // Create subnet group for RDS
    const subnetGroup = new RdsSubnetGroup(this, 'db-subnet-group', {
      name: `${props.appName}-${props.environment}-db-subnet-group`,
      subnetIds: [], // Pass subnet IDs as parameters
      tags: {
        Name: `${props.appName}-${props.environment}-db-subnet-group`,
        Environment: props.environment
      }
    });

    // Create security group for RDS
    const dbSecurityGroup = new SecurityGroup(this, 'db-security-group', {
      name: `${props.appName}-${props.environment}-db-sg`,
      vpcId: '', // Pass VPC ID as parameter
      ingress: [{
        description: 'Allow MySQL traffic',
        fromPort: 3306,
        toPort: 3306,
        protocol: 'tcp',
        securityGroups: [] // Pass app security group IDs
      }],
      tags: {
        Name: `${props.appName}-${props.environment}-db-sg`,
        Environment: props.environment
      }
    });

    // Create RDS instance
    const dbInstance = new RdsInstance(this, 'db-instance', {
      identifier: `${props.appName}-${props.environment}-db`,
      engine: 'mysql',
      instanceClass: props.instanceClass || 'db.t3.micro',
      allocatedStorage: props.allocatedStorage || 20,
      storageType: 'gp2',
      engineVersion: '8.0',
      username: 'admin',
      password: 'changeme123!', // Use secret management in production
      dbSubnetGroupName: subnetGroup.name,
      vpcSecurityGroupIds: [dbSecurityGroup.id],
      skipFinalSnapshot: true,
      storageEncrypted: true,
      tags: {
        Name: `${props.appName}-${props.environment}-db`,
        Environment: props.environment
      }
    });

    this.endpoint = dbInstance.endpoint;

    new TerraformOutput(this, 'db_endpoint', {
      value: this.endpoint,
      description: 'Database endpoint'
    });
  }
}

// Main stack using components
class MyWebAppStack extends TerraformStack {
  constructor(scope: Construct, id: string) {
    super(scope, id);

    // Configure AWS provider
    new AwsProvider(this, 'aws', {
      region: 'us-west-2',
      defaultTags: {
        tags: {
          Project: 'cdktf-component-demo',
          ManagedBy: 'cdktf'
        }
      }
    });

    // Create web application component
    const webApp = new WebAppComponent(this, 'webapp', {
      appName: 'my-app',
      environment: 'production',
      runtime: 'nodejs18.x',
      handler: 'api.handler'
    });

    // Create database component
    const database = new DatabaseComponent(this, 'database', {
      appName: 'my-app',
      environment: 'production',
      instanceClass: 'db.t3.small',
      allocatedStorage: 50
    });

    // Additional outputs
    new TerraformOutput(this, 'app_info', {
      value: {
        staticAssetsBucket: webApp.bucket.id,
        apiEndpoint: webApp.apiEndpoint,
        databaseEndpoint: database.endpoint
      }
    });
  }
}

// Usage
import { App } from 'cdktf';

const app = new App();
new MyWebAppStack(app, 'component-demo-stack');
app.synth();

💻 CDKTF Testing Strategy typescript

🔴 complex ⭐⭐⭐⭐⭐

Testing Terraform CDK stacks and components

⏱️ 35 min 🏷️ cdktf, testing, typescript, iac
Prerequisites: CDKTF experience, Testing knowledge, Jest
// CDKTF Testing Strategy
import { Testing } from 'cdktf';
import { SynthesizedStack, TerraformStack } from 'cdktf';
import { App } from 'cdktf';
import { AwsProvider } from '@cdktf/provider-aws/lib/provider';
import { S3Bucket } from '@cdktf/provider-aws/lib/s3-bucket';
import { expect, haveResource } from '@cdktf/assert';

// Example stack to test
class ExampleStack extends TerraformStack {
  public readonly bucket: S3Bucket;

  constructor(scope: Construct, id: string) {
    super(scope, id);

    new AwsProvider(this, 'aws', {
      region: 'us-east-1'
    });

    this.bucket = new S3Bucket(this, 'test-bucket', {
      bucket: 'my-test-bucket',
      versioning: {
        enabled: true
      }
    });
  }
}

// Basic synthesis test
describe('ExampleStack Synthesis', () => {
  let app: App;
  let stack: ExampleStack;
  let synthesized: SynthesizedStack;

  beforeEach(() => {
    app = new App();
    stack = new ExampleStack(app, 'test-stack');
    synthesized = Testing.synth(stack);
  });

  test('should have correct AWS provider configuration', () => {
    const provider = synthesized.getResource('aws_provider.aws');
    expect(provider).toBeDefined();
    expect(provider.config.region).toBe('us-east-1');
  });

  test('should create S3 bucket with correct properties', () => {
    const bucket = synthesized.getResource('aws_s3_bucket.test-bucket');
    expect(bucket).toBeDefined();
    expect(bucket.config.bucket).toBe('my-test-bucket');
    expect(bucket.config.versioning).toEqual({
      enabled: true
    });
  });

  test('should generate valid Terraform JSON', () => {
    const terraformJson = synthesized.toHcl();
    expect(terraformJson).toContain('resource "aws_s3_bucket" "test-bucket"');
    expect(terraformJson).toContain('versioning');
    expect(terraformJson).toContain('enabled = true');
  });
});

// Snapshot testing
describe('ExampleStack Snapshots', () => {
  test('should match snapshot', () => {
    const app = new App();
    const stack = new ExampleStack(app, 'test-stack');
    const synthesized = Testing.synth(stack);

    // Compare with snapshot
    expect(synthesized).toMatchSnapshot();
  });
});

// Component testing with fixtures
import { Template } from 'cdktf';

describe('Infrastructure Template Testing', () => {
  test('should create infrastructure with expected resources', () => {
    const app = new App();
    const stack = new ExampleStack(app, 'test-stack');

    // Get the template
    const template = Template.fromStack(stack);

    // Test that specific resources exist
    template.hasResourceProperties('aws_s3_bucket', {
      bucket: 'my-test-bucket',
      versioning: {
        enabled: true
      }
    });

    // Test resource count
    template.resourceCountIs('aws_s3_bucket', 1);
    template.resourceCountIs('aws_provider', 1);
  });

  test('should have correct resource dependencies', () => {
    const app = new App();
    const stack = new ExampleStack(app, 'test-stack');

    const template = Template.fromStack(stack);

    // Test that S3 bucket depends on AWS provider
    const dependencies = template.getDependencies('aws_s3_bucket.test-bucket');
    expect(dependencies).toContain('aws_provider.aws');
  });
});

// Validation tests
describe('Infrastructure Validation', () => {
  test('should validate naming conventions', () => {
    const app = new App();
    const stack = new ExampleStack(app, 'test-stack');
    const synthesized = Testing.synth(stack);

    // Get all resource names and validate patterns
    const resources = synthesized.resources;
    const bucketResource = resources.find(r => r.type === 'aws_s3_bucket');

    expect(bucketResource).toBeDefined();
    expect(bucketResource!.config.bucket).toMatch(/^[a-z0-9][a-z0-9-]*[a-z0-9]$/);
  });

  test('should validate required properties', () => {
    const app = new App();
    const stack = new ExampleStack(app, 'test-stack');

    // Test that required properties are set
    expect(() => {
      const bucket = stack.bucket;
      expect(bucket.id).toBeDefined();
      expect(bucket.bucket).toBeDefined();
    }).not.toThrow();
  });
});

// Integration tests with real Terraform
import { execSync } from 'child_process';
import * as path from 'path';
import * as fs from 'fs';

describe('Integration Tests', () => {
  const workingDir = path.join(__dirname, 'terraform-working-dir');

  beforeEach(() => {
    // Clean up working directory
    if (fs.existsSync(workingDir)) {
      fs.rmSync(workingDir, { recursive: true, force: true });
    }
    fs.mkdirSync(workingDir, { recursive: true });
  });

  afterEach(() => {
    // Clean up after tests
    if (fs.existsSync(workingDir)) {
      try {
        execSync('terraform destroy -auto-approve', {
          cwd: workingDir,
          stdio: 'pipe'
        });
      } catch (error) {
        // Ignore destroy errors
      }
      fs.rmSync(workingDir, { recursive: true, force: true });
    }
  });

  test('should successfully plan and apply with real Terraform', () => {
    const app = new App();
    const stack = new ExampleStack(app, 'integration-test-stack');

    // Synthesize to working directory
    Testing.synthToDirectory(stack, workingDir);

    // Initialize Terraform
    execSync('terraform init', {
      cwd: workingDir,
      stdio: 'pipe'
    });

    // Run Terraform plan
    const planOutput = execSync('terraform plan -json', {
      cwd: workingDir,
      encoding: 'utf-8'
    });

    const plan = JSON.parse(planOutput);
    expect(plan.planned_values).toBeDefined();

    // Run Terraform apply (only in test environment with proper cleanup)
    if (process.env.RUN_INTEGRATION_TESTS === 'true') {
      const applyOutput = execSync('terraform apply -auto-approve', {
        cwd: workingDir,
        encoding: 'utf-8'
      });

      expect(applyOutput).toContain('Apply complete');

      // Verify resources were created
      const output = execSync('terraform output -json', {
        cwd: workingDir,
        encoding: 'utf-8'
      });

      const outputs = JSON.parse(output);
      expect(outputs).toBeDefined();
    }
  });
});

// Performance tests
describe('Performance Tests', () => {
  test('should synthesize within reasonable time', () => {
    const startTime = Date.now();

    const app = new App();
    const stack = new ExampleStack(app, 'performance-test-stack');
    Testing.synth(stack);

    const endTime = Date.now();
    const synthesisTime = endTime - startTime;

    // Should complete synthesis in under 5 seconds for this simple stack
    expect(synthesisTime).toBeLessThan(5000);
  });

  test('should handle large stacks efficiently', () => {
    const startTime = Date.now();

    const app = new App();
    const stack = new TerraformStack(app, 'large-test-stack');

    new AwsProvider(stack, 'aws', { region: 'us-east-1' });

    // Create many resources
    for (let i = 0; i < 100; i++) {
      new S3Bucket(stack, `bucket-${i}`, {
        bucket: `large-test-bucket-${i}`,
        versioning: { enabled: true }
      });
    }

    const synthesized = Testing.synth(stack);
    const endTime = Date.now();

    const synthesisTime = endTime - startTime;

    // Should handle 100 resources in reasonable time
    expect(synthesisTime).toBeLessThan(30000);
    expect(synthesized.resources.length).toBeGreaterThanOrEqual(100);
  });
});