DigitalOcean Samples

DigitalOcean cloud platform examples including Droplets, Kubernetes, Spaces, and App Platform

Key Facts

Category
Cloud Computing
Items
4
Format Families
yaml, text

Sample Overview

DigitalOcean cloud platform examples including Droplets, Kubernetes, Spaces, and App Platform This sample set belongs to Cloud Computing and can be used to test related workflows inside Elysia Tools.

💻 Droplet Management API python

🟢 simple ⭐⭐

Create, manage, and monitor DigitalOcean Droplets using the API

⏱️ 15 min 🏷️ digitalocean, api, droplets, infrastructure
Prerequisites: DigitalOcean account, API token, Python knowledge
# DigitalOcean Droplet Management
# Python - droplet_manager.py + requirements.txt

import os
import time
import requests
from datetime import datetime
import json

class DigitalOceanManager:
    def __init__(self, api_token):
        self.api_token = api_token
        self.base_url = "https://api.digitalocean.com/v2"
        self.headers = {
            "Authorization": f"Bearer {api_token}",
            "Content-Type": "application/json"
        }

    def create_droplet(self, name, region, size, image, ssh_keys=None, tags=None):
        """Create a new DigitalOcean Droplet"""
        url = f"{self.base_url}/droplets"

        data = {
            "name": name,
            "region": region,
            "size": size,
            "image": image,
            "ssh_keys": ssh_keys or [],
            "backups": False,
            "ipv6": True,
            "user_data": None,
            "private_networking": False,
            "tags": tags or []
        }

        try:
            response = requests.post(url, headers=self.headers, json=data)
            response.raise_for_status()

            droplet_info = response.json()['droplet']

            print(f"✅ Droplet '{name}' created successfully!")
            print(f"   ID: {droplet_info['id']}")
            print(f"   IP: {droplet_info['networks']['v4'][0]['ip_address']}")

            return droplet_info

        except requests.exceptions.RequestException as e:
            print(f"❌ Error creating droplet: {e}")
            if response.text:
                error_detail = json.loads(response.text)
                print(f"   Details: {error_detail.get('message', 'Unknown error')}")
            return None

    def list_droplets(self, tag_name=None):
        """List all droplets or droplets with specific tag"""
        url = f"{self.base_url}/droplets"
        if tag_name:
            url += f"?tag_name={tag_name}"

        try:
            response = requests.get(url, headers=self.headers)
            response.raise_for_status()

            droplets = response.json()['droplets']

            print(f"📋 Found {len(droplets)} droplet(s):")
            print("-" * 60)

            for droplet in droplets:
                status_emoji = "🟢" if droplet['status'] == 'active' else "🟡"
                ip = droplet['networks']['v4'][0]['ip_address'] if droplet['networks']['v4'] else "N/A"

                print(f"{status_emoji} {droplet['name']}")
                print(f"   ID: {droplet['id']}")
                print(f"   IP: {ip}")
                print(f"   Status: {droplet['status']}")
                print(f"   Region: {droplet['region']['slug']}")
                print(f"   Size: {droplet['size_slug']}")
                if droplet.get('tags'):
                    print(f"   Tags: {', '.join(droplet['tags'])}")
                print()

            return droplets

        except requests.exceptions.RequestException as e:
            print(f"❌ Error listing droplets: {e}")
            return []

    def get_droplet_info(self, droplet_id):
        """Get detailed information about a specific droplet"""
        url = f"{self.base_url}/droplets/{droplet_id}"

        try:
            response = requests.get(url, headers=self.headers)
            response.raise_for_status()

            droplet = response.json()['droplet']

            print(f"ℹ️  Droplet Information for '{droplet['name']}'")
            print("-" * 50)
            print(f"ID: {droplet['id']}")
            print(f"Status: {droplet['status']}")
            print(f"Region: {droplet['region']['name']} ({droplet['region']['slug']})")
            print(f"Size: {droplet['size_slug']}")
            print(f"CPUs: {droplet['vcpus']}")
            print(f"Memory: {droplet['memory']} MB")
            print(f"Disk: {droplet['disk']} GB")
            print(f"Image: {droplet['image']['distribution']} {droplet['image']['name']}")

            # Network information
            if droplet['networks']['v4']:
                for network in droplet['networks']['v4']:
                    print(f"IP ({network['type']}): {network['ip_address']}")

            print(f"Created: {droplet['created_at']}")

            if droplet.get('tags'):
                print(f"Tags: {', '.join(droplet['tags'])}")

            return droplet

        except requests.exceptions.RequestException as e:
            print(f"❌ Error getting droplet info: {e}")
            return None

    def power_off_droplet(self, droplet_id):
        """Power off a droplet"""
        url = f"{self.base_url}/droplets/{droplet_id}/actions"
        data = {"type": "power_off"}

        try:
            response = requests.post(url, headers=self.headers, json=data)
            response.raise_for_status()

            action = response.json()['action']
            print(f"🔌 Power off initiated for Droplet {droplet_id}")
            print(f"   Action ID: {action['id']}")
            print(f"   Status: {action['status']}")

            return action

        except requests.exceptions.RequestException as e:
            print(f"❌ Error powering off droplet: {e}")
            return None

    def power_on_droplet(self, droplet_id):
        """Power on a droplet"""
        url = f"{self.base_url}/droplets/{droplet_id}/actions"
        data = {"type": "power_on"}

        try:
            response = requests.post(url, headers=self.headers, json=data)
            response.raise_for_status()

            action = response.json()['action']
            print(f"🔌 Power on initiated for Droplet {droplet_id}")
            print(f"   Action ID: {action['id']}")
            print(f"   Status: {action['status']}")

            return action

        except requests.exceptions.RequestException as e:
            print(f"❌ Error powering on droplet: {e}")
            return None

    def delete_droplet(self, droplet_id):
        """Delete a droplet (PERMANENT!)"""
        url = f"{self.base_url}/droplets/{droplet_id}"

        # Confirm deletion
        confirm = input(f"⚠️  Are you sure you want to delete Droplet {droplet_id}? This cannot be undone! (yes/no): ")
        if confirm.lower() != 'yes':
            print("❌ Deletion cancelled")
            return False

        try:
            response = requests.delete(url, headers=self.headers)
            response.raise_for_status()

            print(f"✅ Droplet {droplet_id} deleted successfully")
            return True

        except requests.exceptions.RequestException as e:
            print(f"❌ Error deleting droplet: {e}")
            return False

    def create_snapshot(self, droplet_id, snapshot_name):
        """Create a snapshot of a droplet"""
        url = f"{self.base_url}/droplets/{droplet_id}/actions"
        data = {
            "type": "snapshot",
            "name": snapshot_name
        }

        try:
            response = requests.post(url, headers=self.headers, json=data)
            response.raise_for_status()

            action = response.json()['action']
            print(f"📸 Snapshot '{snapshot_name}' creation initiated")
            print(f"   Action ID: {action['id']}")
            print(f"   Status: {action['status']}")

            return action

        except requests.exceptions.RequestException as e:
            print(f"❌ Error creating snapshot: {e}")
            return None

    def get_action_status(self, action_id):
        """Check the status of an action"""
        url = f"{self.base_url}/actions/{action_id}"

        try:
            response = requests.get(url, headers=self.headers)
            response.raise_for_status()

            action = response.json()['action']
            return action

        except requests.exceptions.RequestException as e:
            print(f"❌ Error getting action status: {e}")
            return None

# Example usage
if __name__ == "__main__":
    # Get API token from environment variable
    api_token = os.environ.get('DIGITALOCEAN_API_TOKEN')

    if not api_token:
        print("❌ Please set DIGITALOCEAN_API_TOKEN environment variable")
        exit(1)

    # Initialize manager
    manager = DigitalOceanManager(api_token)

    # Example: Create a droplet
    print("🚀 Creating a new droplet...")
    droplet = manager.create_droplet(
        name="my-app-server",
        region="nyc3",  # New York 3
        size="s-1vcpu-1gb",  # $5/month
        image="ubuntu-22-04-x64",  # Ubuntu 22.04
        tags=["web", "production"]
    )

    if droplet:
        # Wait for droplet to be active
        print("⏳ Waiting for droplet to be active...")
        time.sleep(30)

        # List all droplets
        manager.list_droplets()

    # Example: Get droplet info (replace with actual droplet ID)
    # manager.get_droplet_info("your-droplet-id")

    # Example: Create snapshot (replace with actual droplet ID)
    # manager.create_snapshot("your-droplet-id", "backup-before-update")

# requirements.txt
"""
requests>=2.31.0
python-dotenv>=1.0.0
"""

💻 Kubernetes Cluster Deployment text

🟡 intermediate ⭐⭐⭐

Deploy and manage applications on DigitalOcean Kubernetes Service (DOKS)

⏱️ 30 min 🏷️ digitalocean, kubernetes, doks, deployment, containers
Prerequisites: DigitalOcean account, Kubernetes knowledge, kubectl installed, doctl CLI
# DigitalOcean Kubernetes Deployment
# deployment.yaml - Complete application deployment on DOKS

apiVersion: v1
kind: Namespace
metadata:
  name: myapp
  labels:
    name: myapp
    environment: production

---
# ConfigMap for application configuration
apiVersion: v1
kind: ConfigMap
metadata:
  name: myapp-config
  namespace: myapp
data:
  NODE_ENV: "production"
  PORT: "3000"
  REDIS_HOST: "redis-service"
  REDIS_PORT: "6379"
  DB_HOST: "postgresql-service"
  DB_PORT: "5432"

---
# Secret for sensitive data
apiVersion: v1
kind: Secret
metadata:
  name: myapp-secrets
  namespace: myapp
type: Opaque
data:
  # Base64 encoded values
  # echo -n 'your-database-password' | base64
  DB_PASSWORD: eW91ci1kYXRhYmFzZS1wYXNzd29yZA==
  # echo -n 'your-jwt-secret' | base64
  JWT_SECRET: eW91ci1qd3Qtc2VjcmV0
  # echo -n 'your-api-key' | base64
  API_KEY: eW91ci1hcGkta2V5

---
# Persistent Volume Claim for database storage
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: postgres-pvc
  namespace: myapp
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: do-block-storage
  resources:
    requests:
      storage: 10Gi

---
# PostgreSQL Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: postgres
  namespace: myapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postgres
  template:
    metadata:
      labels:
        app: postgres
    spec:
      containers:
      - name: postgres
        image: postgres:15-alpine
        env:
        - name: POSTGRES_DB
          value: "myapp"
        - name: POSTGRES_USER
          value: "myappuser"
        - name: POSTGRES_PASSWORD
          valueFrom:
            secretKeyRef:
              name: myapp-secrets
              key: DB_PASSWORD
        ports:
        - containerPort: 5432
        volumeMounts:
        - name: postgres-storage
          mountPath: /var/lib/postgresql/data
        resources:
          requests:
            memory: "256Mi"
            cpu: "250m"
          limits:
            memory: "512Mi"
            cpu: "500m"
      volumes:
      - name: postgres-storage
        persistentVolumeClaim:
          claimName: postgres-pvc

---
# Redis Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
  namespace: myapp
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      containers:
      - name: redis
        image: redis:7-alpine
        ports:
        - containerPort: 6379
        resources:
          requests:
            memory: "128Mi"
            cpu: "100m"
          limits:
            memory: "256Mi"
            cpu: "250m"

---
# Application Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp
  namespace: myapp
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
  template:
    metadata:
      labels:
        app: myapp
    spec:
      containers:
      - name: myapp
        image: your-registry/myapp:latest
        imagePullPolicy: Always
        ports:
        - containerPort: 3000
        envFrom:
        - configMapRef:
            name: myapp-config
        - secretRef:
            name: myapp-secrets
        resources:
          requests:
            memory: "256Mi"
            cpu: "250m"
          limits:
            memory: "512Mi"
            cpu: "500m"
        livenessProbe:
          httpGet:
            path: /health
            port: 3000
          initialDelaySeconds: 30
          periodSeconds: 10
        readinessProbe:
          httpGet:
            path: /ready
            port: 3000
          initialDelaySeconds: 5
          periodSeconds: 5

---
# Services
apiVersion: v1
kind: Service
metadata:
  name: postgres-service
  namespace: myapp
spec:
  selector:
    app: postgres
  ports:
  - port: 5432
    targetPort: 5432
  type: ClusterIP

---
apiVersion: v1
kind: Service
metadata:
  name: redis-service
  namespace: myapp
spec:
  selector:
    app: redis
  ports:
  - port: 6379
    targetPort: 6379
  type: ClusterIP

---
apiVersion: v1
kind: Service
metadata:
  name: myapp-service
  namespace: myapp
spec:
  selector:
    app: myapp
  ports:
  - port: 80
    targetPort: 3000
  type: ClusterIP

---
# Ingress for external access
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: myapp-ingress
  namespace: myapp
  annotations:
    kubernetes.io/ingress.class: "nginx"
    cert-manager.io/cluster-issuer: "letsencrypt-prod"
    nginx.ingress.kubernetes.io/rate-limit: "100"
    nginx.ingress.kubernetes.io/rate-limit-window: "1m"
spec:
  tls:
  - hosts:
    - yourdomain.com
    secretName: myapp-tls
  rules:
  - host: yourdomain.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: myapp-service
            port:
              number: 80

---
# Horizontal Pod Autoscaler
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: myapp-hpa
  namespace: myapp
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: myapp
  minReplicas: 3
  maxReplicas: 10
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 80

---
# Network Policy for security
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: myapp-netpol
  namespace: myapp
spec:
  podSelector:
    matchLabels:
      app: myapp
  policyTypes:
  - Ingress
  - Egress
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: ingress-nginx
    ports:
    - protocol: TCP
      port: 3000
  egress:
  - to:
    - podSelector:
        matchLabels:
          app: postgres
    ports:
    - protocol: TCP
      port: 5432
  - to:
    - podSelector:
        matchLabels:
          app: redis
    ports:
    - protocol: TCP
      port: 6379

---
# PodDisruptionBudget for high availability
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: myapp-pdb
  namespace: myapp
spec:
  minAvailable: 2
  selector:
    matchLabels:
      app: myapp

# Additional Kubernetes manifests for DigitalOcean

# Load Balancer Service (if you need DO load balancer)
---
apiVersion: v1
kind: Service
metadata:
  name: myapp-lb
  namespace: myapp
  annotations:
    service.beta.kubernetes.io/do-loadbalancer-name: "myapp-loadbalancer"
    service.beta.kubernetes.io/do-loadbalancer-protocol: "http"
    service.beta.kubernetes.io/do-loadbalancer-port-forwarding-rules: "443:http"
    service.beta.kubernetes.io/do-loadbalancer-http-ports: "80"
    service.beta.kubernetes.io/do-loadbalancer-algorithm: "least_connections"
spec:
  type: LoadBalancer
  selector:
    app: myapp
  ports:
  - name: http
    port: 80
    targetPort: 3000
  - name: https
    port: 443
    targetPort: 3000

# doctl commands for cluster management
"""
# Create DigitalOcean Kubernetes cluster
doctl kubernetes cluster create myapp-cluster \
    --region nyc3 \
    --version 1.28.2-do.1 \
    --node-pool "name=worker-pool;size=s-2vcpu-4gb;count=3" \
    --auto-upgrade \
    --enable-cluster-autoscaling \
    --min-nodes=1 \
    --max-nodes=5

# Get cluster credentials
doctl kubernetes cluster kubeconfig save myapp-cluster

# Create block storage for persistent volumes
doctl volume create postgres-storage --region nyc3 --size 10Gi

# Create domain for DNS management
doctl compute domain create yourdomain.com

# Create DNS records
doctl compute domain records create yourdomain.com --type A --name www --data 192.168.1.1

# Upload container registry
doctl registry repository create myapp

# Tag and push image
docker tag myapp:latest registry.digitalocean.com/your-username/myapp:latest
docker push registry.digitalocean.com/your-username/myapp:latest
"""

💻 Spaces Object Storage javascript

🟡 intermediate ⭐⭐⭐

File upload, download, and management with DigitalOcean Spaces

⏱️ 25 min 🏷️ digitalocean, spaces, storage, s3, files
Prerequisites: DigitalOcean account, Spaces bucket, Node.js knowledge
// DigitalOcean Spaces File Storage
// JavaScript/Node.js - spaces-manager.js

const AWS = require('aws-sdk');
const fs = require('fs');
const path = require('path');
const crypto = require('crypto');

class SpacesManager {
    constructor(accessKey, secretKey, region = 'nyc3', bucketName) {
        // DigitalOcean Spaces uses S3-compatible API
        this.s3 = new AWS.S3({
            accessKeyId: accessKey,
            secretAccessKey: secretKey,
            region: region,
            endpoint: new AWS.Endpoint(`https://${region}.digitaloceanspaces.com`)
        });

        this.bucketName = bucketName;
        this.region = region;
    }

    async uploadFile(filePath, keyPrefix = '', metadata = {}) {
        /** Upload a file to Spaces */
        try {
            const fileName = path.basename(filePath);
            const key = keyPrefix ? `${keyPrefix}/${fileName}` : fileName;

            // Read file
            const fileContent = fs.readFileSync(filePath);

            // Calculate file hash
            const fileHash = crypto.createHash('md5').update(fileContent).digest('hex');

            // Set content type based on file extension
            const contentTypes = {
                '.jpg': 'image/jpeg',
                '.jpeg': 'image/jpeg',
                '.png': 'image/png',
                '.gif': 'image/gif',
                '.pdf': 'application/pdf',
                '.txt': 'text/plain',
                '.json': 'application/json',
                '.html': 'text/html',
                '.css': 'text/css',
                '.js': 'application/javascript',
                '.zip': 'application/zip',
                '.mp4': 'video/mp4',
                '.mp3': 'audio/mpeg'
            };

            const ext = path.extname(fileName).toLowerCase();
            const contentType = contentTypes[ext] || 'application/octet-stream';

            const params = {
                Bucket: this.bucketName,
                Key: key,
                Body: fileContent,
                ContentType: contentType,
                ACL: 'public-read', // Make file publicly accessible
                Metadata: {
                    'original-name': fileName,
                    'upload-time': new Date().toISOString(),
                    'file-hash': fileHash,
                    ...metadata
                }
            };

            const result = await this.s3.upload(params).promise();

            console.log(`✅ File uploaded successfully:`);
            console.log(`   Key: ${result.Key}`);
            console.log(`   URL: ${result.Location}`);
            console.log(`   Size: ${fs.statSync(filePath).size} bytes`);
            console.log(`   Hash: ${fileHash}`);

            return {
                key: result.Key,
                url: result.Location,
                size: result.ContentLength,
                hash: fileHash
            };

        } catch (error) {
            console.error(`❌ Error uploading file: ${error.message}`);
            throw error;
        }
    }

    async downloadFile(key, localPath) {
        /** Download a file from Spaces */
        try {
            const params = {
                Bucket: this.bucketName,
                Key: key
            };

            const result = await this.s3.getObject(params).promise();

            // Ensure directory exists
            const dir = path.dirname(localPath);
            if (!fs.existsSync(dir)) {
                fs.mkdirSync(dir, { recursive: true });
            }

            // Write file
            fs.writeFileSync(localPath, result.Body);

            console.log(`✅ File downloaded successfully:`);
            console.log(`   Key: ${key}`);
            console.log(`   Path: ${localPath}`);
            console.log(`   Size: ${result.ContentLength} bytes`);

            return {
                key: key,
                path: localPath,
                size: result.ContentLength,
                metadata: result.Metadata
            };

        } catch (error) {
            console.error(`❌ Error downloading file: ${error.message}`);
            throw error;
        }
    }

    async listFiles(prefix = '', maxKeys = 1000) {
        /** List files in Spaces */
        try {
            const params = {
                Bucket: this.bucketName,
                Prefix: prefix,
                MaxKeys: maxKeys
            };

            const result = await this.s3.listObjectsV2(params).promise();

            console.log(`📋 Found ${result.Contents.length} file(s) in Spaces:`);
            console.log("-" * 80);

            const files = result.Contents.map(obj => {
                const fileSize = this.formatFileSize(obj.Size);
                const lastModified = new Date(obj.LastModified).toLocaleString();

                console.log(`📄 ${obj.Key}`);
                console.log(`   Size: ${fileSize}`);
                console.log(`   Modified: ${lastModified}`);
                console.log(`   URL: https://${this.bucketName}.${this.region}.digitaloceanspaces.com/${obj.Key}`);
                console.log();

                return {
                    key: obj.Key,
                    size: obj.Size,
                    lastModified: obj.LastModified,
                    url: `https://${this.bucketName}.${this.region}.digitaloceanspaces.com/${obj.Key}`,
                    etag: obj.ETag
                };
            });

            if (result.IsTruncated) {
                console.log(`ℹ️  More files available. Use continuation token for next page.`);
            }

            return files;

        } catch (error) {
            console.error(`❌ Error listing files: ${error.message}`);
            throw error;
        }
    }

    async deleteFile(key) {
        /** Delete a file from Spaces */
        try {
            const params = {
                Bucket: this.bucketName,
                Key: key
            };

            await this.s3.deleteObject(params).promise();

            console.log(`🗑️  File deleted successfully: ${key}`);

            return true;

        } catch (error) {
            console.error(`❌ Error deleting file: ${error.message}`);
            throw error;
        }
    }

    async generatePresignedUrl(key, expiresIn = 3600) {
        /** Generate a presigned URL for temporary access */
        try {
            const params = {
                Bucket: this.bucketName,
                Key: key,
                Expires: expiresIn
            };

            const url = await this.s3.getSignedUrlPromise('getObject', params);

            console.log(`🔗 Presigned URL generated:`);
            console.log(`   Key: ${key}`);
            console.log(`   URL: ${url}`);
            console.log(`   Expires in: ${expiresIn} seconds`);

            return url;

        } catch (error) {
            console.error(`❌ Error generating presigned URL: ${error.message}`);
            throw error;
        }
    }

    async createFolder(folderPath) {
        /** Create a folder (actually just a placeholder object) */
        try {
            const key = folderPath.endsWith('/') ? folderPath : `${folderPath}/`;

            const params = {
                Bucket: this.bucketName,
                Key: key,
                Body: '',
                ContentType: 'application/x-directory'
            };

            await this.s3.putObject(params).promise();

            console.log(`📁 Folder created: ${key}`);

            return key;

        } catch (error) {
            console.error(`❌ Error creating folder: ${error.message}`);
            throw error;
        }
    }

    async getFileInfo(key) {
        /** Get detailed information about a file */
        try {
            const params = {
                Bucket: this.bucketName,
                Key: key
            };

            const result = await this.s3.headObject(params).promise();

            console.log(`ℹ️  File Information for: ${key}`);
            console.log("-" * 50);
            console.log(`Size: ${this.formatFileSize(result.ContentLength)}`);
            console.log(`Last Modified: ${new Date(result.LastModified).toLocaleString()}`);
            console.log(`Content Type: ${result.ContentType}`);
            console.log(`ETag: ${result.ETag}`);
            console.log(`URL: https://${this.bucketName}.${this.region}.digitaloceanspaces.com/${key}`);

            if (result.Metadata) {
                console.log(`Metadata: ${JSON.stringify(result.Metadata, null, 2)}`);
            }

            return {
                key: key,
                size: result.ContentLength,
                lastModified: result.LastModified,
                contentType: result.ContentType,
                etag: result.ETag,
                metadata: result.Metadata
            };

        } catch (error) {
            console.error(`❌ Error getting file info: ${error.message}`);
            throw error;
        }
    }

    formatFileSize(bytes) {
        /** Format file size in human readable format */
        const units = ['B', 'KB', 'MB', 'GB', 'TB'];
        let size = bytes;
        let unitIndex = 0;

        while (size >= 1024 && unitIndex < units.length - 1) {
            size /= 1024;
            unitIndex++;
        }

        return `${size.toFixed(2)} ${units[unitIndex]}`;
    }

    async syncFolder(localFolder, spacesPrefix = '', excludePatterns = []) {
        /** Sync a local folder to Spaces */
        try {
            console.log(`🔄 Starting sync from ${localFolder} to Spaces prefix ${spacesPrefix}...`);

            let uploadedCount = 0;
            let skippedCount = 0;
            let errorCount = 0;

            // Recursively walk through local folder
            const walkDir = (dir, prefix = '') => {
                const files = fs.readdirSync(dir);

                files.forEach(file => {
                    const filePath = path.join(dir, file);
                    const stats = fs.statSync(filePath);

                    // Check if file should be excluded
                    if (excludePatterns.some(pattern => file.match(pattern))) {
                        console.log(`⏭️  Skipping excluded file: ${filePath}`);
                        skippedCount++;
                        return;
                    }

                    if (stats.isDirectory()) {
                        // Recurse into subdirectory
                        const subPrefix = prefix ? `${prefix}/${file}` : file;
                        walkDir(filePath, subPrefix);
                    } else {
                        // Upload file
                        const keyPrefix = spacesPrefix ? `${spacesPrefix}/${prefix}` : prefix;

                        try {
                            this.uploadFile(filePath, keyPrefix, {
                                'local-path': filePath,
                                'sync-time': new Date().toISOString()
                            });
                            uploadedCount++;
                        } catch (error) {
                            console.error(`❌ Error uploading ${filePath}: ${error.message}`);
                            errorCount++;
                        }
                    }
                });
            };

            walkDir(localFolder);

            console.log(`\n✅ Sync completed:`);
            console.log(`   Uploaded: ${uploadedCount} files`);
            console.log(`   Skipped: ${skippedCount} files`);
            console.log(`   Errors: ${errorCount} files`);

            return {
                uploaded: uploadedCount,
                skipped: skippedCount,
                errors: errorCount
            };

        } catch (error) {
            console.error(`❌ Error syncing folder: ${error.message}`);
            throw error;
        }
    }
}

// Example usage
async function main() {
    // Load environment variables
    require('dotenv').config();

    // Initialize Spaces manager
    const spacesManager = new SpacesManager(
        process.env.SPACES_ACCESS_KEY,
        process.env.SPACES_SECRET_KEY,
        process.env.SPACES_REGION || 'nyc3',
        process.env.SPACES_BUCKET_NAME
    );

    try {
        // Example 1: Upload a file
        console.log("📤 Uploading example file...");
        await spacesManager.uploadFile('./example.txt', 'documents');

        // Example 2: List files
        console.log("\n📋 Listing files...");
        await spacesManager.listFiles();

        // Example 3: Create folder
        console.log("\n📁 Creating folder...");
        await spacesManager.createFolder('images/2024');

        // Example 4: Generate presigned URL
        console.log("\n🔗 Generating presigned URL...");
        await spacesManager.generatePresignedUrl('documents/example.txt', 3600);

        // Example 5: Sync folder (if folder exists)
        if (fs.existsSync('./public')) {
            console.log("\n🔄 Syncing public folder...");
            await spacesManager.syncFolder('./public', 'assets', [
                /.DS_Store/,
                /Thumbs.db/,
                /.git/
            ]);
        }

    } catch (error) {
        console.error('❌ Error in main:', error);
    }
}

// Export the class
module.exports = SpacesManager;

// Run example if called directly
if (require.main === module) {
    main();
}

// package.json dependencies
/*
{
  "dependencies": {
    "aws-sdk": "^2.1500.0",
    "dotenv": "^16.3.1"
  }
}
*/

💻 App Platform Deployment text

🔴 complex ⭐⭐⭐⭐

Deploy containerized and static applications to DigitalOcean App Platform

⏱️ 35 min 🏷️ digitalocean, app platform, deployment, containers, paas
Prerequisites: DigitalOcean account, App Platform enabled, Docker knowledge, YAML configuration
# DigitalOcean App Platform Deployment
# app.yaml - Complete application deployment configuration

# Production configuration
name: my-production-app
region: nyc3
services:
- name: web
  # Source code location
  source_dir: /
  # GitHub repository
  github:
    repo: your-username/your-app
    branch: main
    deploy_on_push: true

  # Docker image configuration
  image:
    registry_type: DOCR
    repository: your-username/myapp
    tag: latest

  # Build configuration
  build_command: |
    npm install --production
    npm run build

  # Run command
  run_command: npm start

  # HTTP port
  http_port: 3000

  # Instance count (auto-scaling)
  instance_count: 2
  instance_size_slug: professional-xs

  # Environment variables
  env:
  - key: NODE_ENV
    value: production
  - key: PORT
    value: "3000"
  - key: DATABASE_URL
    value: ${db.DATABASE_URL}
  - key: REDIS_URL
    value: ${redis.REDIS_URL}
  - key: JWT_SECRET
    value: ${_self.JWT_SECRET}

  # Health check
  health_check:
    http_path: /health
    port: 3000
    initial_delay_seconds: 30
    period_seconds: 10
    timeout_seconds: 5
    success_threshold: 3
    failure_threshold: 5

  # Resource limits
  resource_limits:
    memory: 512Mi
    cpu: 500m

  # Deployment strategy
  deployment_strategy:
    type: RollingUpdate
    max_unavailable: 1
    max_surge: 1

# Static site service
- name: docs
  source_dir: ./docs
  github:
    repo: your-username/docs
    branch: gh-pages
  build_command: |
    npm install
    npm run build
  output_dir: _site
  http_port: 80
  routes:
  - path: /docs
    preserve_path_prefix: true
  resource_limits:
    memory: 128Mi
    cpu: 100m

# Workers/Cron jobs
jobs:
- name: daily-backup
  source_dir: ./scripts
  image:
    registry_type: DOCKER_HUB
    repository: your-username/backup-worker
    tag: latest
  run_command: node backup.js
  schedule: "0 2 * * *"  # Daily at 2 AM
  kind: CRON
  env:
  - key: DATABASE_URL
    value: ${db.DATABASE_URL}
  - key: SPACES_ACCESS_KEY
    value: ${_self.SPACES_ACCESS_KEY}
  - key: SPACES_SECRET_KEY
    value: ${_self.SPACES_SECRET_KEY}
  resource_limits:
    memory: 256Mi
    cpu: 250m

# Databases
databases:
- name: db
  engine: PG
  version: "15"
  size: db-s-1vcpu-2gb
  # Connection details (automatically injected as environment variables)
  # DATABASE_URL
  # DB_HOST, DB_PORT, DB_NAME, DB_USER, DB_PASSWORD
  # BACKUP_URL for restore

# Redis cache
- name: redis
  engine: REDIS
  version: "7"
  size: redis-s-1vcpu-512mb
  # Connection details (automatically injected as environment variables)
  # REDIS_URL
  # REDIS_HOST, REDIS_PORT, REDIS_PASSWORD

# Domains
domains:
- domain: yourdomain.com
  type: PRIMARY
  zone: yourdomain.com
  wildcard: true

# Monitoring and alerts
alerts:
- rule: DEPLOYMENT_FAILED
  value: "1"
  operator: GREATER_THAN
  notification_emails:
  - [email protected]
- rule: DOMAIN_FAILED
  value: "1"
  operator: GREATER_THAN
  notification_emails:
  - [email protected]
- rule: HIGH_ERROR_RATE
  value: "90"
  operator: GREATER_THAN
  notification_emails:
  - [email protected]
  comparison: PERCENTAGE

# Development environment variant
# app.dev.yaml
---
name: my-app-dev
region: nyc3
services:
- name: web
  source_dir: /
  github:
    repo: your-username/your-app
    branch: develop
    deploy_on_push: true
  build_command: npm install && npm run build
  run_command: npm run dev
  http_port: 3000
  instance_count: 1
  instance_size_slug: basic-xxs
  env:
  - key: NODE_ENV
    value: development
  resource_limits:
    memory: 256Mi
    cpu: 100m

# Staging environment variant
# app.staging.yaml
---
name: my-app-staging
region: nyc3
services:
- name: web
  source_dir: /
  github:
    repo: your-username/your-app
    branch: staging
    deploy_on_push: true
  build_command: npm ci --production && npm run build
  run_command: npm start
  http_port: 3000
  instance_count: 1
  instance_size_slug: basic-xs
  env:
  - key: NODE_ENV
    value: staging
  - key: DATABASE_URL
    value: ${db-staging.DATABASE_URL}
  resource_limits:
    memory: 512Mi
    cpu: 250m

# Multi-service application (microservices)
# app-microservices.yaml
---
name: my-microservices-app
region: nyc3
services:
# API Gateway
- name: api-gateway
  source_dir: ./gateway
  github:
    repo: your-username/microservices
    branch: main
  build_command: |
    cd gateway
    npm install --production
    npm run build
  run_command: |
    cd gateway
    npm start
  http_port: 8080
  instance_count: 2
  instance_size_slug: basic-xs
  routes:
  - path: /
    preserve_path_prefix: false
  env:
  - key: AUTH_SERVICE_URL
    value: http://auth-service:3001
  - key: USER_SERVICE_URL
    value: http://user-service:3002
  - key: PRODUCT_SERVICE_URL
    value: http://product-service:3003

# Auth Service
- name: auth-service
  source_dir: ./auth
  github:
    repo: your-username/microservices
    branch: main
  build_command: |
    cd auth
    npm install --production
    npm run build
  run_command: |
    cd auth
    npm start
  http_port: 3001
  instance_count: 1
  instance_size_slug: basic-xxs
  internal_ports:
  - 3001

# User Service
- name: user-service
  source_dir: ./user
  github:
    repo: your-username/microservices
    branch: main
  build_command: |
    cd user
    npm install --production
    npm run build
  run_command: |
    cd user
    npm start
  http_port: 3002
  instance_count: 1
  instance_size_slug: basic-xxs
  internal_ports:
  - 3002
  env:
  - key: DATABASE_URL
    value: ${user-db.DATABASE_URL}

# Product Service
- name: product-service
  source_dir: ./product
  github:
    repo: your-username/microservices
    branch: main
  build_command: |
    cd product
    npm install --production
    npm run build
  run_command: |
    cd product
    npm start
  http_port: 3003
  instance_count: 2
  instance_size_slug: basic-xs
  internal_ports:
  - 3003
  env:
  - key: DATABASE_URL
    value: ${product-db.DATABASE_URL}
  - key: REDIS_URL
    value: ${cache.REDIS_URL}

# Databases for microservices
databases:
- name: user-db
  engine: PG
  version: "15"
  size: db-s-1vcpu-1gb
- name: product-db
  engine: PG
  version: "15"
  size: db-s-2vcpu-4gb

# Shared cache
- name: cache
  engine: REDIS
  version: "7"
  size: redis-s-1vcpu-1gb

# CLI commands for App Platform
"""
# Install doctl
curl -sSL https://dl.digitalocean.com/doctl/install.sh | sh

# Authenticate
doctl auth init

# Create app from spec
doctl apps create --spec app.yaml

# Update app
doctl apps update <app-id> --spec app.yaml

# List apps
doctl apps list

# Get app details
doctl apps get <app-id>

# List deployments
doctl apps list-deployments <app-id>

# Create database
doctl databases create mydb --engine pg --version 15 --size db-s-1vcpu-2gb --region nyc3

# Create Redis
doctl databases create mycache --engine redis --version 7 --size redis-s-1vcpu-1gb --region nyc3

# Scale app
doctl apps update <app-id> --spec spec/production.yaml

# View logs
doctl apps logs <app-id> --follow

# Delete app
doctl apps delete <app-id>
"""