🎯 Exemples recommandés
Balanced sample collections from various categories for you to explore
Exemples AWS S3 Lambda
Exemples d'architecture serverless AWS S3 et Lambda incluant les déclencheurs S3, fonctions Lambda et patterns d'applications serverless
⚙️ Déclencheur S3 Lambda de Base
🟢 simple
⭐⭐
Configuration de bucket S3 de base avec déclencheurs Lambda pour traiter les téléchargements et modifications de fichiers
⏱️ 20 min
🏷️ aws, s3, lambda, serverless, infrastructure
Prerequisites:
AWS account, AWS CLI, SAM CLI, Node.js
# AWS S3 and Lambda Basic Configuration
# Serverless architecture for file processing and automation
# 1. AWS SAM Template (template.yaml)
AWSTemplateFormatVersion: '2010-09-09'
Transform: AWS::Serverless-2016-10-31
Description: S3 Lambda file processing application
Globals:
Function:
Runtime: nodejs18.x
Timeout: 300
MemorySize: 512
Environment:
Variables:
TABLE_NAME: !Ref FileProcessingTable
Resources:
# S3 Bucket for file uploads
FileUploadBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: !Sub 'file-upload-bucket-${AWS::StackName}'
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
BucketEncryption:
ServerSideEncryptionConfiguration:
- ServerSideEncryptionByDefault:
SSEAlgorithm: AES256
NotificationConfiguration:
LambdaConfigurations:
- Event: s3:ObjectCreated:*
Function: !GetAtt FileProcessorFunction.Arn
# Lambda function for processing uploaded files
FileProcessorFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: lambda/
Handler: index.handler
Runtime: nodejs18.x
Environment:
Variables:
BUCKET_NAME: !Ref FileUploadBucket
TABLE_NAME: !Ref FileProcessingTable
Policies:
- S3ReadPolicy:
BucketName: !Ref FileUploadBucket
- DynamoDBCrudPolicy:
TableName: !Ref FileProcessingTable
# DynamoDB table for tracking file processing
FileProcessingTable:
Type: AWS::DynamoDB::Table
Properties:
TableName: !Sub 'file-processing-table-${AWS::StackName}'
AttributeDefinitions:
- AttributeName: fileId
AttributeType: S
KeySchema:
- AttributeName: fileId
KeyType: HASH
BillingMode: PAY_PER_REQUEST
StreamSpecification:
StreamViewType: NEW_AND_OLD_IMAGES
# IAM Role for Lambda with S3 and DynamoDB permissions
LambdaExecutionRole:
Type: AWS::IAM::Role
Properties:
RoleName: !Sub 's3-lambda-execution-role-${AWS::StackName}'
AssumeRolePolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Principal:
Service: lambda.amazonaws.com
Action: sts:AssumeRole
ManagedPolicyArns:
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
Policies:
- PolicyName: S3AccessPolicy
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- s3:GetObject
- s3:PutObject
- s3:DeleteObject
Resource: !Sub 'arn:aws:s3:::${FileUploadBucket}/*'
- PolicyName: DynamoDBAccessPolicy
PolicyDocument:
Version: '2012-10-17'
Statement:
- Effect: Allow
Action:
- dynamodb:PutItem
- dynamodb:GetItem
- dynamodb:UpdateItem
- dynamodb:Query
- dynamodb:Scan
Resource:
- !GetAtt FileProcessingTable.Arn
- !Sub '${FileProcessingTable.Arn}/index/*'
Outputs:
BucketName:
Description: S3 bucket for file uploads
Value: !Ref FileUploadBucket
Export:
Name: !Sub '${AWS::StackName}-FileUploadBucket'
FunctionName:
Description: Lambda function ARN
Value: !GetAtt FileProcessorFunction.Arn
Export:
Name: !Sub '${AWS::StackName}-FileProcessorFunction'
TableName:
Description: DynamoDB table name
Value: !Ref FileProcessingTable
Export:
Name: !Sub '${AWS::StackName}-FileProcessingTable'
💻 Fonction Lambda de Traitement de Fichiers javascript
🟡 intermediate
⭐⭐⭐
Fonction Lambda Node.js pour traiter les téléchargements de fichiers S3 avec extraction de métadonnées et génération de miniatures
⏱️ 35 min
🏷️ aws, lambda, s3, image-processing, serverless
Prerequisites:
Node.js, AWS SDK, Sharp library, DynamoDB
// Lambda Function for S3 File Processing
// File: lambda/index.js
const AWS = require('aws-sdk');
const sharp = require('sharp');
const { S3Client, GetObjectCommand, PutObjectCommand } = require('@aws-sdk/client-s3');
const { DynamoDBClient, PutItemCommand, UpdateItemCommand } = require('@aws-sdk/client-dynamodb');
const { marshall, unmarshall } = require('@aws-sdk/util-dynamodb');
// Initialize AWS SDK clients
const s3Client = new S3Client({ region: process.env.AWS_REGION });
const dynamoDbClient = new DynamoDBClient({ region: process.env.AWS_REGION });
// Environment variables
const BUCKET_NAME = process.env.BUCKET_NAME;
const TABLE_NAME = process.env.TABLE_NAME;
const THUMBNAIL_FOLDER = 'thumbnails/';
// Supported image formats for processing
const SUPPORTED_FORMATS = ['jpg', 'jpeg', 'png', 'webp', 'gif'];
const THUMBNAIL_SIZES = [150, 300, 600];
/**
* Main Lambda handler for S3 events
*/
exports.handler = async (event, context) => {
console.log('Received S3 event:', JSON.stringify(event, null, 2));
try {
// Process each record in the S3 event
for (const record of event.Records) {
await processS3Record(record);
}
return {
statusCode: 200,
body: JSON.stringify({
message: 'Files processed successfully',
recordsProcessed: event.Records.length
})
};
} catch (error) {
console.error('Error processing files:', error);
return {
statusCode: 500,
body: JSON.stringify({
error: 'File processing failed',
details: error.message
})
};
}
};
/**
* Process individual S3 record
*/
async function processS3Record(record) {
const eventName = record.eventName;
const bucketName = record.s3.bucket.name;
const objectKey = decodeURIComponent(record.s3.object.key.replace(/+/g, ' '));
console.log(`Processing ${eventName} for object: ${objectKey} in bucket: ${bucketName}`);
// Skip processing for thumbnail files to prevent infinite loops
if (objectKey.startsWith(THUMBNAIL_FOLDER)) {
console.log('Skipping thumbnail file');
return;
}
// Extract file extension
const fileExtension = objectKey.split('.').pop().toLowerCase();
if (SUPPORTED_FORMATS.includes(fileExtension)) {
console.log(`Processing supported image format: ${fileExtension}`);
await processImageFile(bucketName, objectKey, fileExtension);
} else {
console.log(`File format ${fileExtension} not supported for image processing`);
await processGenericFile(bucketName, objectKey);
}
}
/**
* Process image files - generate thumbnails and extract metadata
*/
async function processImageFile(bucketName, objectKey, fileExtension) {
const startTime = Date.now();
const fileId = generateFileId(objectKey);
try {
// Get the original image from S3
const getObjectCommand = new GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const response = await s3Client.send(getObjectCommand);
const imageBuffer = await streamToBuffer(response.Body);
// Extract image metadata using sharp
const metadata = await sharp(imageBuffer).metadata();
console.log('Image metadata:', metadata);
// Generate thumbnails in different sizes
const thumbnails = await generateThumbnails(imageBuffer, objectKey, fileExtension);
// Store file information in DynamoDB
const fileRecord = {
fileId,
objectKey,
bucketName,
originalSize: imageBuffer.length,
contentType: response.ContentType,
width: metadata.width,
height: metadata.height,
format: metadata.format,
size: metadata.size,
thumbnails,
processingTime: Date.now() - startTime,
status: 'completed',
processedAt: new Date().toISOString(),
expiresAt: new Date(Date.now() + 30 * 24 * 60 * 60 * 1000).toISOString() // 30 days
};
await saveFileRecord(fileRecord);
console.log(`Successfully processed image: ${objectKey}`);
} catch (error) {
console.error(`Error processing image ${objectKey}:`, error);
// Save error record to DynamoDB
const errorRecord = {
fileId,
objectKey,
bucketName,
status: 'failed',
error: error.message,
processedAt: new Date().toISOString(),
expiresAt: new Date(Date.now() + 7 * 24 * 60 * 60 * 1000).toISOString() // 7 days for failed records
};
await saveFileRecord(errorRecord);
throw error;
}
}
/**
* Process non-image files
*/
async function processGenericFile(bucketName, objectKey) {
const fileId = generateFileId(objectKey);
const startTime = Date.now();
try {
// Get object metadata
const getObjectCommand = new GetObjectCommand({
Bucket: bucketName,
Key: objectKey
});
const response = await s3Client.send(getObjectCommand);
// Store file information in DynamoDB
const fileRecord = {
fileId,
objectKey,
bucketName,
originalSize: response.ContentLength,
contentType: response.ContentType,
processingTime: Date.now() - startTime,
status: 'completed',
processedAt: new Date().toISOString(),
expiresAt: new Date(Date.now() + 30 * 24 * 60 * 60 * 1000).toISOString()
};
await saveFileRecord(fileRecord);
console.log(`Successfully processed file: ${objectKey}`);
} catch (error) {
console.error(`Error processing file ${objectKey}:`, error);
throw error;
}
}
/**
* Generate thumbnails in multiple sizes
*/
async function generateThumbnails(imageBuffer, originalKey, fileExtension) {
const thumbnails = [];
const baseName = originalKey.split('.').slice(0, -1).join('.');
for (const size of THUMBNAIL_SIZES) {
try {
// Generate thumbnail
const thumbnailBuffer = await sharp(imageBuffer)
.resize(size, size, {
fit: 'inside',
withoutEnlargement: true
})
.jpeg({ quality: 80 })
.toBuffer();
// Generate thumbnail key
const thumbnailKey = `${THUMBNAIL_FOLDER}${baseName}_${size}px.${fileExtension}`;
// Upload thumbnail to S3
const putObjectCommand = new PutObjectCommand({
Bucket: BUCKET_NAME,
Key: thumbnailKey,
Body: thumbnailBuffer,
ContentType: 'image/jpeg',
Metadata: {
originalKey,
size: size.toString()
}
});
await s3Client.send(putObjectCommand);
thumbnails.push({
size,
key: thumbnailKey,
url: `https://${BUCKET_NAME}.s3.${process.env.AWS_REGION}.amazonaws.com/${thumbnailKey}`,
sizeBytes: thumbnailBuffer.length
});
console.log(`Generated thumbnail: ${thumbnailKey} (${size}px)`);
} catch (error) {
console.error(`Error generating ${size}px thumbnail:`, error);
}
}
return thumbnails;
}
/**
* Save file record to DynamoDB
*/
async function saveFileRecord(fileRecord) {
const putItemCommand = new PutItemCommand({
TableName: TABLE_NAME,
Item: marshall(fileRecord)
});
await dynamoDbClient.send(putItemCommand);
}
/**
* Utility function to generate unique file ID
*/
function generateFileId(objectKey) {
const timestamp = Date.now();
const randomString = Math.random().toString(36).substring(2, 8);
const fileHash = Buffer.from(objectKey).toString('base64').substring(0, 8);
return `${timestamp}-${fileHash}-${randomString}`;
}
/**
* Convert stream to buffer
*/
async function streamToBuffer(stream) {
return new Promise((resolve, reject) => {
const chunks = [];
stream.on('data', (chunk) => chunks.push(chunk));
stream.on('error', reject);
stream.on('end', () => resolve(Buffer.concat(chunks)));
});
}
// Package.json for Lambda function
/*
{
"name": "s3-file-processor",
"version": "1.0.0",
"description": "Lambda function for processing S3 file uploads",
"main": "index.js",
"dependencies": {
"@aws-sdk/client-s3": "^3.400.0",
"@aws-sdk/client-dynamodb": "^3.400.0",
"@aws-sdk/util-dynamodb": "^3.400.0",
"sharp": "^0.32.6"
},
"devDependencies": {
"aws-sdk": "^2.1490.0"
}
}
*/
⚙️ Patterns Serverless Avancés
🔴 complex
⭐⭐⭐⭐
Patterns complexes d'architecture serverless incluant l'intégration API Gateway, les flux de travail pilotés par événements et le monitoring
⏱️ 60 min
🏷️ aws, serverless, advanced, architecture, patterns
Prerequisites:
AWS fundamentals, Serverless Framework, Step Functions, EventBridge
# Advanced Serverless Architecture Patterns
# 1. Complete Serverless Application with API Gateway, S3, and Lambda
# serverless.yml
service: file-processing-app
frameworkVersion: '3'
provider:
name: aws
runtime: nodejs18.x
region: us-east-1
stage: ${opt:stage, 'dev'}
environment:
STAGE: ${self:provider.stage}
REGION: ${self:provider.region}
FILE_TABLE: ${self:service}-files-${self:provider.stage}
UPLOAD_BUCKET: ${self:service}-uploads-${self:provider.stage}
THUMBNAIL_BUCKET: ${self:service}-thumbnails-${self:provider.stage}
PROCESSING_QUEUE_URL: !Ref ProcessingQueue.QueueUrl
iamRoleStatements:
- Effect: Allow
Action:
- s3:GetObject
- s3:PutObject
- s3:DeleteObject
Resource:
- !Sub "arn:aws:s3:::${self:provider.environment.UPLOAD_BUCKET}/*"
- !Sub "arn:aws:s3:::${self:provider.environment.THUMBNAIL_BUCKET}/*"
- Effect: Allow
Action:
- dynamodb:Query
- dynamodb:Scan
- dynamodb:GetItem
- dynamodb:PutItem
- dynamodb:UpdateItem
- dynamodb:DeleteItem
Resource:
- !GetAtt FilesTable.Arn
- !Sub "${FilesTable.Arn}/index/*"
- Effect: Allow
Action:
- sqs:SendMessage
- sqs:ReceiveMessage
- sqs:DeleteMessage
- sqs:GetQueueAttributes
Resource:
- !GetAtt ProcessingQueue.Arn
functions:
# API Gateway endpoints
uploadUrl:
handler: src/api/uploadUrl.handler
events:
- http:
path: upload/url
method: post
cors: true
fileStatus:
handler: src/api/fileStatus.handler
events:
- http:
path: files/{fileId}/status
method: get
cors: true
fileList:
handler: src/api/fileList.handler
events:
- http:
path: files
method: get
cors: true
# S3 trigger for file processing
processUpload:
handler: src/processors/processUpload.handler
events:
- s3:
bucket: !Ref UploadBucket
event: s3:ObjectCreated:*
rules:
- prefix: uploads/
existing: false
# SQS-based async processing
thumbnailGenerator:
handler: src/processors/generateThumbnails.handler
events:
- sqs:
arn:
!GetAtt ProcessingQueue.Arn
batchSize: 5
maximumBatchingWindowInSeconds: 10
# Scheduled cleanup
cleanupOldFiles:
handler: src/jobs/cleanupOldFiles.handler
events:
- schedule:
rate: rate(1 day)
input:
cleanupDays: 30
resources:
Resources:
UploadBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: ${self:provider.environment.UPLOAD_BUCKET}
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
NotificationConfiguration:
QueueConfigurations:
- Event: s3:ObjectCreated:*
Queue: !GetAtt ProcessingQueue.Arn
Filter:
S3Key:
Rules:
- Name: prefix
Value: uploads/
ThumbnailBucket:
Type: AWS::S3::Bucket
Properties:
BucketName: ${self:provider.environment.THUMBNAIL_BUCKET}
PublicAccessBlockConfiguration:
BlockPublicAcls: true
BlockPublicPolicy: true
IgnorePublicAcls: true
RestrictPublicBuckets: true
CorsConfiguration:
CorsRules:
- AllowedOrigins: ['*']
AllowedHeaders: ['*']
AllowedMethods: [GET, HEAD]
MaxAge: 3600
ProcessingQueue:
Type: AWS::SQS::Queue
Properties:
QueueName: ${self:service}-processing-queue-${self:provider.stage}
VisibilityTimeout: 900
MessageRetentionPeriod: 1209600 # 14 days
RedrivePolicy:
deadLetterTargetArn: !GetAtt ProcessingDLQ.Arn
maxReceiveCount: 3
ProcessingDLQ:
Type: AWS::SQS::Queue
Properties:
QueueName: ${self:service}-processing-dlq-${self:provider.stage}
MessageRetentionPeriod: 1209600
FilesTable:
Type: AWS::DynamoDB::Table
Properties:
TableName: ${self:provider.environment.FILE_TABLE}
BillingMode: PAY_PER_REQUEST
AttributeDefinitions:
- AttributeName: fileId
AttributeType: S
- AttributeName: userId
AttributeType: S
- AttributeName: status
AttributeType: S
- AttributeName: createdAt
AttributeType: N
KeySchema:
- AttributeName: fileId
KeyType: HASH
GlobalSecondaryIndexes:
- IndexName: UserStatusIndex
KeySchema:
- AttributeName: userId
KeyType: HASH
- AttributeName: status
KeyType: RANGE
Projection:
ProjectionType: ALL
- IndexName: StatusCreatedIndex
KeySchema:
- AttributeName: status
KeyType: HASH
- AttributeName: createdAt
KeyType: RANGE
Projection:
ProjectionType: ALL
PointInTimeRecoverySpecification:
PointInTimeRecoveryEnabled: true
TTLSpecification:
AttributeName: expiresAt
# CloudWatch Alarms
ProcessingQueueDepthAlarm:
Type: AWS::CloudWatch::Alarm
Properties:
AlarmName: ${self:service}-queue-depth-alarm-${self:provider.stage}
AlarmDescription: 'Alarm when processing queue depth is too high'
Namespace: AWS/SQS
MetricName: ApproximateNumberOfMessagesVisible
Dimensions:
- Name: QueueName
Value: !GetAtt ProcessingQueue.QueueName
Threshold: 100
ComparisonOperator: GreaterThanThreshold
EvaluationPeriods: 2
Period: 300
Statistic: Sum
AlarmActions:
- !Ref SNSAlertTopic
# SNS Topic for alerts
SNSAlertTopic:
Type: AWS::SNS::Topic
Properties:
TopicName: ${self:service}-alerts-${self:provider.stage}
SNSAlertSubscription:
Type: AWS::SNS::Subscription
Properties:
TopicArn: !Ref SNSAlertTopic
Protocol: email
Endpoint: ${ssm:/my-app/alert-email}
# 2. Custom Lambda Authorizer for API Security
# src/auth/customAuthorizer.js
const jwt = require('jsonwebtoken');
const { dynamoDbClient } = require('../lib/aws');
module.exports.handler = async (event) => {
const token = event.headers.authorization?.replace('Bearer ', '');
if (!token) {
return generatePolicy('user', 'Deny', event.methodArn);
}
try {
const decoded = jwt.verify(token, process.env.JWT_SECRET);
const user = await getUser(decoded.userId);
if (!user || !user.isActive) {
return generatePolicy('user', 'Deny', event.methodArn);
}
return generatePolicy(user.userId, 'Allow', event.methodArn, {
userId: user.userId,
role: user.role,
permissions: user.permissions
});
} catch (error) {
console.error('Authorization error:', error);
return generatePolicy('user', 'Deny', event.methodArn);
}
};
function generatePolicy(principalId, effect, resource, context = {}) {
return {
principalId,
policyDocument: {
Version: '2012-10-17',
Statement: [{
Action: 'execute-api:Invoke',
Effect: effect,
Resource: resource
}]
},
context
};
}
# 3. EventBridge Event Routing
# serverless.yml snippet (add to functions section)
eventBridgeHandler:
handler: src/events/eventBridge.handler
events:
- eventBridge:
eventBus: !Ref CustomEventBus
pattern:
source:
- com.myapp.file.processing
detailType:
- FileProcessed
- ProcessingFailed
# 4. Step Functions Workflow for Complex Processing
# step-functions-workflow.json
{
"Comment": "Complex file processing workflow",
"StartAt": "ValidateFile",
"States": {
"ValidateFile": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:validateFile",
"Next": "CheckFileType",
"Retry": [
{
"ErrorEquals": ["States.ALL"],
"IntervalSeconds": 2,
"MaxAttempts": 3,
"BackoffRate": 2.0
}
]
},
"CheckFileType": {
"Type": "Choice",
"Choices": [
{
"Variable": "$.fileType",
"StringEquals": "image",
"Next": "GenerateThumbnails"
},
{
"Variable": "$.fileType",
"StringEquals": "video",
"Next": "ExtractVideoMetadata"
},
{
"Variable": "$.fileType",
"StringEquals": "document",
"Next": "ExtractText"
}
],
"Default": "ArchiveFile"
},
"GenerateThumbnails": {
"Type": "Parallel",
"Branches": [
{
"StartAt": "SmallThumbnail",
"States": {
"SmallThumbnail": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:generateThumbnail",
"Parameters": {
"fileKey.$": "$.fileKey",
"size": 150
},
"End": true
}
}
},
{
"StartAt": "MediumThumbnail",
"States": {
"MediumThumbnail": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:generateThumbnail",
"Parameters": {
"fileKey.$": "$.fileKey",
"size": 300
},
"End": true
}
}
},
{
"StartAt": "LargeThumbnail",
"States": {
"LargeThumbnail": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:generateThumbnail",
"Parameters": {
"fileKey.$": "$.fileKey",
"size": 600
},
"End": true
}
}
}
],
"Next": "UpdateDatabase"
},
"ExtractVideoMetadata": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:extractVideoMetadata",
"Next": "UpdateDatabase"
},
"ExtractText": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:extractText",
"Next": "UpdateDatabase"
},
"UpdateDatabase": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:updateDatabase",
"Next": "SendNotification"
},
"ArchiveFile": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:archiveFile",
"Next": "SendNotification"
},
"SendNotification": {
"Type": "Task",
"Resource": "arn:aws:lambda:REGION:ACCOUNT_ID:function:sendNotification",
"End": true
}
}
}
# 5. Infrastructure as Code with CDK (TypeScript)
# lib/file-processing-stack.ts
import * as cdk from 'aws-cdk-lib';
import { Construct } from 'constructs';
import {
aws_s3 as s3,
aws_lambda as lambda,
aws_sqs as sqs,
aws_dynamodb as dynamodb,
aws_apigateway as apigateway,
aws_events as events,
aws_events_targets as targets
} from 'aws-cdk-lib';
export class FileProcessingStack extends cdk.Stack {
constructor(scope: Construct, id: string, props?: cdk.StackProps) {
super(scope, id, props);
// S3 Buckets
const uploadBucket = new s3.Bucket(this, 'UploadBucket', {
bucketName: 'file-uploads',
encryption: s3.BucketEncryption.S3_MANAGED,
blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL,
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
const thumbnailBucket = new s3.Bucket(this, 'ThumbnailBucket', {
bucketName: 'file-thumbnails',
encryption: s3.BucketEncryption.S3_MANAGED,
blockPublicAccess: s3.BlockPublicAccess.BLOCK_ALL,
removalPolicy: cdk.RemovalPolicy.DESTROY,
});
// DynamoDB Table
const filesTable = new dynamodb.Table(this, 'FilesTable', {
tableName: 'files',
partitionKey: { name: 'fileId', type: dynamodb.AttributeType.STRING },
sortKey: { name: 'createdAt', type: dynamodb.AttributeType.NUMBER },
billingMode: dynamodb.BillingMode.PAY_PER_REQUEST,
removalPolicy: cdk.RemovalPolicy.DESTROY,
timeToLiveAttribute: 'expiresAt',
});
// Add GSI
filesTable.addGlobalSecondaryIndex({
indexName: 'UserIdIndex',
partitionKey: { name: 'userId', type: dynamodb.AttributeType.STRING },
sortKey: { name: 'createdAt', type: dynamodb.AttributeType.NUMBER },
});
// SQS Queue
const processingQueue = new sqs.Queue(this, 'ProcessingQueue', {
queueName: 'file-processing-queue',
visibilityTimeout: cdk.Duration.minutes(15),
retentionPeriod: cdk.Duration.days(14),
deadLetterQueue: {
maxReceiveCount: 3,
queue: new sqs.Queue(this, 'ProcessingDLQ', {
queueName: 'file-processing-dlq',
}),
},
});
// Lambda Functions
const fileProcessor = new lambda.Function(this, 'FileProcessor', {
runtime: lambda.Runtime.NODEJS_18_X,
handler: 'index.handler',
code: lambda.Code.fromAsset('lambda/processor'),
memorySize: 1024,
timeout: cdk.Duration.minutes(5),
environment: {
UPLOAD_BUCKET: uploadBucket.bucketName,
THUMBNAIL_BUCKET: thumbnailBucket.bucketName,
FILES_TABLE: filesTable.tableName,
PROCESSING_QUEUE_URL: processingQueue.queueUrl,
},
});
// Grant permissions
uploadBucket.grantRead(fileProcessor);
thumbnailBucket.grantReadWrite(fileProcessor);
filesTable.grantReadWriteData(fileProcessor);
processingQueue.grantSendMessages(fileProcessor);
// EventBridge for event routing
const eventBus = new events.EventBus(this, 'FileProcessingEventBus', {
eventBusName: 'file-processing-events',
});
// EventBridge rules
const fileProcessedRule = new events.Rule(this, 'FileProcessedRule', {
eventBus,
eventPattern: {
source: ['com.myapp.file.processor'],
detailType: ['FileProcessed'],
},
});
fileProcessedRule.addTarget(new targets.LambdaFunction(fileProcessor));
}
}