Примеры Service Mesh Consul

Примеры service mesh HashiCorp Consul с обнаружением служб, конфигурацией и безопасностью

⚙️ Базовая настройка и регистрация сервисов Consul json

🟢 simple ⭐⭐

Базовая настройка Consul с регистрацией сервисов и проверками работоспособности

⏱️ 15 min 🏷️ consul, configuration, service discovery
Prerequisites: Consul CLI, Docker or VM environment
{
  "datacenter": "dc1",
  "data_dir": "/opt/consul/data",
  "log_level": "INFO",
  "server": true,
  "bootstrap_expect": 1,
  "ui_config": {
    "enabled": true
  },
  "bind_addr": "0.0.0.0",
  "client_addr": "0.0.0.0",
  "ports": {
    "http": 8500,
    "https": 8501,
    "dns": 8600,
    "serf_lan": 8301,
    "serf_wan": 8302,
    "server": 8300
  },
  "connect": {
    "enabled": true
  },
  "acl": {
    "enabled": true,
    "default_policy": "deny",
    "down_policy": "extend-cache"
  },
  "autopilot": {
    "cleanup_dead_servers": true,
    "last_contact_threshold": "200ms",
    "max_trailing_logs": 250,
    "server_stabilization_time": "10s",
    "enable_reconciliation": false,
    "disable_upgrade_migration": false,
    "upgrade_version_tag": ""
  }
}

# Consul Agent Configuration for Servers
{
  "node_name": "consul-server-1",
  "datacenter": "dc1",
  "data_dir": "/opt/consul/data",
  "log_level": "INFO",
  "server": true,
  "bootstrap_expect": 1,
  "ui_config": {
    "enabled": true
  },
  "bind_addr": "0.0.0.0",
  "client_addr": "0.0.0.0",
  "advertise_addr": "{{GetInterfaceIP \"eth0\"}}",
  "retry_join": [
    "provider=aws tag_key=consul tag_value=server"
  ],
  "connect": {
    "enabled": true
  },
  "ports": {
    "http": 8500,
    "https": 8501,
    "dns": 8600,
    "serf_lan": 8301,
    "serf_wan": 8302,
    "server": 8300
  },
  "telemetry": {
    "disable_hostname": true,
    "enable_host_metrics": true,
    "enable_service_metrics": true
  }
}

# Consul Agent Configuration for Clients
{
  "node_name": "{{GetInterfaceIP \"eth0\"}}",
  "datacenter": "dc1",
  "data_dir": "/opt/consul/data",
  "log_level": "INFO",
  "server": false,
  "bind_addr": "0.0.0.0",
  "client_addr": "0.0.0.0",
  "advertise_addr": "{{GetInterfaceIP \"eth0\"}}",
  "retry_join": [
    "provider=aws tag_key=consul tag_value=server"
  ],
  "connect": {
    "enabled": true
  },
  "ports": {
    "http": 8500,
    "https": 8501,
    "dns": 8600,
    "serf_lan": 8301,
    "serf_wan": 8302
  },
  "services": [
    {
      "name": "web",
      "tags": ["v1"],
      "port": 8080,
      "connect": {
        "sidecar_service": {}
      },
      "check": {
        "id": "web-check",
        "http": "http://localhost:8080/health",
        "interval": "10s",
        "timeout": "3s"
      }
    },
    {
      "name": "api",
      "tags": ["v1"],
      "port": 9090,
      "connect": {
        "sidecar_service": {}
      },
      "check": {
        "id": "api-check",
        "http": "http://localhost:9090/health",
        "interval": "10s",
        "timeout": "3s"
      }
    }
  ]
}

💻 Намерения и безопасность Service Mesh Consul hcl

🟡 intermediate ⭐⭐⭐

Безопасность service mesh с намерениями и mTLS

⏱️ 25 min 🏷️ consul, service mesh, security, intentions
Prerequisites: Consul Enterprise or OSS with ACLs, HCL knowledge
# Consul Intentions for Service Mesh Security
# Define service communication rules with HCL

# Allow all services to access the web service
consul_intention "web_allow_all" {
  source_name      = "*"
  destination_name = "web"
  action           = "allow"
  description      = "Allow all services to access web frontend"
}

# Restrict API access to specific services
consul_intention "api_restricted" {
  source_name      = "web"
  destination_name = "api"
  action           = "allow"
  description      = "Allow web service to access API"
}

consul_intention "api_deny_others" {
  source_name      = "*"
  destination_name = "api"
  action           = "deny"
  description      = "Deny all other services from accessing API directly"
}

# Database access restrictions
consul_intention "db_api_only" {
  source_name      = "api"
  destination_name = "database"
  action           = "allow"
  description      = "Only API service can access database"
}

consul_intention "db_deny_direct" {
  source_name      = "web"
  destination_name = "database"
  action           = "deny"
  description      = "Web service cannot access database directly"
}

# External service access with L7 intentions
consul_intention "payment_api" {
  source_name      = "api"
  destination_name = "payment"
  action           = "allow"
  description      = "API can access payment service"
  permissions = [
    {
      action = "read",
      http = {
        path_exact = "/api/payment/status"
      }
    },
    {
      action = "write",
      http = {
        path_exact = "/api/payment/process"
      }
    }
  ]
}

# Fine-grained API access control
consul_intention "api_v1_access" {
  source_name      = "mobile-app"
  destination_name = "api"
  action           = "allow"
  description      = "Mobile app can access API v1 endpoints only"
  permissions = [
    {
      action = "read",
      http = {
        path_prefix = "/api/v1/"
      }
    }
  ]
}

# Deny access to deprecated endpoints
consul_intention "block_deprecated_api" {
  source_name      = "*"
  destination_name = "api"
  action           = "deny"
  description      = "Block access to deprecated API endpoints"
  permissions = [
    {
      action = "deny",
      http = {
        path_prefix = "/api/v0/"
      }
    }
  ]
}

# Time-based access control
consul_intention "maintenance_window" {
  source_name      = "admin-tools"
  destination_name = "api"
  action           = "allow"
  description      = "Admin tools access during maintenance window"
  permissions = [
    {
      action = "read",
      http = {
        path_prefix = "/api/admin/"
      }
    }
  ]
}

# Rate limiting configuration
consul_service_config_entry "api_rate_limit" {
  name = "api"
  kind = "service-defaults"

  protocol = "http"

  mesh_gateway = {
    mode = "local"
  }

  connect_gateway = {
    proxy = {
      destinations = [
        {
          service_name = "api"
          port         = 9090
        }
      ]
    }
  }
}

# Service resolver with failover
consul_service_resolver_entry "api_resolver" {
  name = "api"

  default_subset = "v1"

  subsets {
    name = "v1"
    filter = "Service.Meta.version == \"v1\""
  }

  subsets {
    name = "v2"
    filter = "Service.Meta.version == \"v2\""
  }

  failover {
    service_name = "api-v2"
    destinations = ["*"]
  }
}

# Service router for traffic splitting
consul_service_router_entry "api_router" {
  name = "api"

  routes {
    match {
      http {
        path_prefix = "/api/v2/"
      }
    }
    destination {
      service_subset = "v2"
      service = "api"
    }
  }

  routes {
    match {
      http {
        path_prefix = "/api/"
      }
    }
    destination {
      service_subset = "v1"
      service = "api"
    }
  }
}

# Split configuration
consul_service_splitter_entry "api_splitter" {
  name = "api"

  splits {
    weight = 90
    service_subset = "v1"
    service = "api"
  }

  splits {
    weight = 10
    service_subset = "v2"
    service = "api"
  }
}

# Service defaults with TLS and timeouts
consul_service_config_entry "web_defaults" {
  name = "web"
  kind = "service-defaults"

  protocol = "http"

  connect = {
    sidecar_service = {
      proxy = {
        config = {
          envoy_extra_static_clusters = jsonencode([
            {
              name = "service_stats"
              type = "STATIC"
              connect_timeout = "5s"
              lb_policy = "ROUND_ROBIN"
              load_assignment = {
                cluster_name = "service_stats"
                endpoints = [{
                  lb_endpoints = [{
                    endpoint = {
                      address = {
                        socket_address = {
                          address = "127.0.0.1"
                          port_value = 9102
                        }
                      }
                    }
                  }]
                }]
              }
            }
          ])
        }
      }
    }
  }
}

💻 Service Mesh Consul в Kubernetes yaml

🟡 intermediate ⭐⭐⭐⭐

Развертывание service mesh Consul в Kubernetes

⏱️ 30 min 🏷️ consul, kubernetes, helm, service mesh
Prerequisites: Kubernetes cluster, Helm, Consul Enterprise or OSS
# Consul on Kubernetes with Service Mesh
# consul-values.yaml for Helm installation

global:
  name: consul
  image: hashicorp/consul:1.17.0
  imageK8S: hashicorp/consul-k8s-control-plane:1.1.1
  datacenter: dc1
  tls:
    enabled: true
    https: true
    rpc: true
    grpc: true
    verifyIncoming: true
    verifyIncomingRpc: true
    verifyOutgoing: true
    verifyServerHostname: true
    autoEncrypt:
      tls: true

server:
  replicas: 3
  bootstrapExpect: 3
  storage: 10Gi
  storageClass: gp2
  extraConfig: |
    {
      "autopilot": {
        "cleanupDeadServers": true,
        "lastContactThreshold": "200ms",
        "maxTrailingLogs": 250,
        "serverStabilizationTime": "10s",
        "enableReconciliation": false,
        "disableUpgradeMigration": false,
        "upgradeVersionTag": ""
      }
    }

ui:
  enabled: true
  service:
    type: LoadBalancer
    annotations: |
      service.beta.kubernetes.io/aws-load-balancer-type: "nlb"

connectInject:
  enabled: true
  default: true
  sidecarProxy:
    resources:
      requests:
        cpu: 100m
        memory: 64Mi
      limits:
        cpu: 200m
        memory: 128Mi
  transparentProxy:
    defaultEnabled: true

ingressGateways:
  enabled: true
  defaults:
    service:
      type: LoadBalancer
      annotations: |
        service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
  gateways:
    - name: ingress-gateway
      service:
        ports:
          - port: 8080
          - port: 8443

meshGateway:
  enabled: true
  replicas: 2
  service:
    type: LoadBalancer
    annotations: |
      service.beta.kubernetes.io/aws-load-balancer-type: "nlb"

apiGateway:
  enabled: true
  image: hashicorp/consul-api-gateway:1.0.1

terminatingGateways:
  enabled: true
  gateways:
    - name: terminating-gateway
      services:
        - name: external-database

controller:
  enabled: true

prometheus:
  enabled: true

# Service definitions for applications
---
apiVersion: v1
kind: Service
metadata:
  name: web-service
  namespace: default
  labels:
    app: web
spec:
  selector:
    app: web
  ports:
    - name: http
      port: 8080
      targetPort: 8080
  type: ClusterIP

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-deployment
  namespace: default
  labels:
    app: web
spec:
  replicas: 2
  selector:
    matchLabels:
      app: web
  template:
    metadata:
      labels:
        app: web
      annotations:
        'consul.hashicorp.com/connect-service-upstreams': 'api:9090'
        'consul.hashicorp.com/connect-inject': 'true'
        'consul.hashicorp.com/connect-service-port': '8080'
        'consul.hashicorp.com/connect-service-protocol': 'http'
        'consul.hashicorp.com/sidecar-proxy-cpu-limit': '200m'
        'consul.hashicorp.com/sidecar-proxy-memory-limit': '128Mi'
    spec:
      containers:
        - name: web
          image: nginx:alpine
          ports:
            - containerPort: 8080
          env:
            - name: CONSUL_HTTP_ADDR
              value: 'consul-server:8500'
            - name: API_URL
              value: 'http://localhost:9090'
          command: ["/bin/sh"]
          args:
            - -c
            - |
              echo "server {
                listen 8080;
                location / {
                  proxy_pass http://localhost:9090;
                  proxy_set_header Host $host;
                  proxy_set_header X-Real-IP $remote_addr;
                }
                location /health {
                  return 200 'OK';
                  add_header Content-Type text/plain;
                }
              }" > /etc/nginx/conf.d/default.conf
              nginx -g 'daemon off;'

---
apiVersion: v1
kind: Service
metadata:
  name: api-service
  namespace: default
  labels:
    app: api
spec:
  selector:
    app: api
  ports:
    - name: http
      port: 9090
      targetPort: 9090
  type: ClusterIP

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: api-deployment
  namespace: default
  labels:
    app: api
spec:
  replicas: 2
  selector:
    matchLabels:
      app: api
  template:
    metadata:
      labels:
        app: api
        version: v1
      annotations:
        'consul.hashicorp.com/connect-inject': 'true'
        'consul.hashicorp.com/connect-service-port': '9090'
        'consul.hashicorp.com/connect-service-protocol': 'http'
        'consul.hashicorp.com/service-tags': 'v1,api'
        'consul.hashicorp.com/service-meta-version': 'v1'
    spec:
      containers:
        - name: api
          image: python:3.9-alpine
          ports:
            - containerPort: 9090
          env:
            - name: PORT
              value: '9090'
            - name: CONSUL_HTTP_ADDR
              value: 'consul-server:8500'
          command: ["/bin/sh"]
          args:
            - -c
            - |
              cat > app.py << 'EOF'
              from flask import Flask, jsonify, request
              import os
              import time

              app = Flask(__name__)

              @app.route('/health')
              def health():
                  return jsonify({'status': 'ok'})

              @app.route('/api/users')
              def users():
                  return jsonify({
                      'users': [
                          {'id': 1, 'name': 'John Doe'},
                          {'id': 2, 'name': 'Jane Smith'}
                      ]
                  })

              @app.route('/api/data', methods=['POST'])
              def data():
                  data = request.get_json()
                  return jsonify({
                      'received': data,
                      'timestamp': time.time()
                  })

              if __name__ == '__main__':
                  app.run(host='0.0.0.0', port=int(os.getenv('PORT', 9090)))
              EOF

              pip install flask
              python app.py

---
# Consul Service Entries for Intentions
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceIntentions
metadata:
  name: api-intentions
spec:
  destination:
    name: api
  sources:
    - name: web
      action: allow
      permissions:
        - http:
            pathPrefix: "/api/"
            methods: ["GET", "POST"]
    - name: "*"
      action: deny

---
# Ingress Gateway Configuration
apiVersion: consul.hashicorp.com/v1alpha1
kind: IngressGateway
metadata:
  name: web-ingress
spec:
  listeners:
    - port: 8080
      protocol: HTTP
      services:
        - name: web
          hosts: ["web.example.com"]
    - port: 8443
      protocol: HTTPS
      tls:
        enabled: true
      services:
        - name: web
          hosts: ["web.example.com"]

---
# API Gateway Configuration
apiVersion: consul.hashicorp.com/v1alpha1
kind: APIGateway
metadata:
  name: web-api-gateway
spec:
  listeners:
    - name: http
      port: 8080
      protocol: HTTP
      tls: null

---
apiVersion: consul.hashicorp.com/v1alpha1
kind: HTTPRoute
metadata:
  name: web-route
spec:
  parentRefs:
    - name: web-api-gateway
      kind: APIGateway
  hostnames: ["web.example.com"]
  rules:
    - matches:
        - path:
            match: Prefix
            value: "/"
      forwardTo:
        - serviceName: web
          port: 8080

---
# Service Resolvers for Traffic Splitting
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceResolver
metadata:
  name: api-resolver
spec:
  defaultSubset: v1
  subsets:
    v1:
      filter: 'Service.Meta.version == "v1"'
    v2:
      filter: 'Service.Meta.version == "v2"'
  failover:
    - service: api
      serviceSubsets: ["v2"]
      destinations:
        - peer: dc2

---
# Service Splitter for A/B Testing
apiVersion: consul.hashicorp.com/v1alpha1
kind: ServiceSplitter
metadata:
  name: api-splitter
spec:
  splits:
    - weightPercent: 80
      serviceSubset: v1
    - weightPercent: 20
      serviceSubset: v2

💻 Интеграция Consul с Nomad hcl

🔴 complex ⭐⭐⭐⭐⭐

Интеграция обнаружения сервисов Consul с планированием заданий Nomad

⏱️ 40 min 🏷️ consul, nomad, orchestration, batch, cron jobs
Prerequisites: HashiCorp Nomad, Consul, Docker, HCL knowledge
# Nomad Job Files with Consul Integration

# Consul Provider Configuration for Nomad
consul {
  address = "127.0.0.1:8500"
  token = "${CONSUL_TOKEN}"
  auth {
    enabled = true
  }
  ssl {
    enabled = true
    verify = false
  }
}

# Web Application Job with Consul Connect
job "web-app" {
  datacenters = ["dc1"]
  type = "service"

  update {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "10s"
    healthy_deadline = "3m"
    progress_deadline = "10m"
    auto_revert = false
    canary = 0
  }

  migrate {
    max_parallel = 1
    health_check = "checks"
    min_healthy_time = "10s"
    healthy_deadline = "5m"
  }

  reschedule {
    delay = "30s"
    delay_function = "exponential"
    max_delay = "1h"
    unlimited = true
  }

  group "web" {
    count = 2

    network {
      mode = "bridge"

      port "http" {
        static = 8080
        to = 8080
      }
    }

    service {
      name = "web"
      port = "8080"

      connect {
        sidecar_service {
          proxy {
            upstreams {
              destination_name = "api"
              local_bind_port = 8081
            }
            upstreams {
              destination_name = "cache"
              local_bind_port = 6379
            }
          }
        }
      }

      tags = [
        "web",
        "v1",
        "urlprefix-web.example.com"
      ]

      check {
        type = "http"
        path = "/health"
        interval = "10s"
        timeout = "3s"
        check_restart {
          limit = 3
          grace = "90s"
          ignore_warnings = false
        }
      }
    }

    task "server" {
      driver = "docker"

      config {
        image = "nginx:alpine"
        ports = ["http"]

        volumes = [
          "local/nginx.conf:/etc/nginx/conf.d/default.conf"
        ]
      }

      template {
        data = <<-EOF
          upstream api_backend {
            server {{ env "NOMAD_UPSTREAM_ADDR_api" }};
          }

          server {
            listen 8080;

            location /health {
              access_log off;
              return 200 'healthy\n';
              add_header Content-Type text/plain;
            }

            location /api/ {
              proxy_pass http://api_backend/;
              proxy_set_header Host $host;
              proxy_set_header X-Real-IP $remote_addr;
              proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            }

            location / {
              root   /usr/share/nginx/html;
              index  index.html index.htm;
            }
          }
        EOF

        destination = "local/nginx.conf"
        change_mode = "restart"
      }

      env {
        CONSUL_HTTP_ADDR = "127.0.0.1:8500"
      }

      resources {
        cpu    = 200 # MHz
        memory = 128 # MB
      }

      kill_timeout = "30s"
      kill_signal = "SIGINT"

      restart {
        attempts = 3
        interval = "30s"
        delay = "15s"
        mode = "fail"
      }
    }
  }
}

# API Service Job with Database Connection
job "api-service" {
  datacenters = ["dc1"]
  type = "service"

  group "api" {
    count = 3

    network {
      mode = "bridge"

      port "http" {
        static = 9090
        to = 9090
      }
    }

    service {
      name = "api"
      port = "9090"

      connect {
        sidecar_service {
          proxy {
            config {
              envoy_prometheus_bind_addr = "0.0.0.0:9102"
            }
          }
        }
      }

      tags = [
        "api",
        "v1",
        "metrics"
      ]

      check {
        type = "http"
        path = "/health"
        interval = "15s"
        timeout = "3s"
      }

      meta {
        version = "1.0"
        canary = "false"
      }
    }

    task "server" {
      driver = "docker"

      config {
        image = "python:3.9-slim"

        ports = ["http"]

        volumes = [
          "local/app:/app"
        ]

        command = "python"
        args = ["-m", "flask", "run", "--host=0.0.0.0", "--port=9090"]
      }

      template {
        data = <<-EOF
          from flask import Flask, jsonify, request
          import os
          import redis
          import psycopg2
          import logging

          app = Flask(__name__)

          # Configure logging
          logging.basicConfig(level=logging.INFO)
          logger = logging.getLogger(__name__)

          # Redis connection (through Consul Connect)
          redis_host = os.getenv('REDIS_HOST', 'localhost')
          redis_client = redis.Redis(host=redis_host, port=6379, decode_responses=True)

          # Database connection
          db_host = os.getenv('DB_HOST', 'localhost')
          db_name = os.getenv('DB_NAME', 'app_db')
          db_user = os.getenv('DB_USER', 'app_user')
          db_password = os.getenv('DB_PASSWORD', 'password')

          def get_db_connection():
              return psycopg2.connect(
                  host=db_host,
                  database=db_name,
                  user=db_user,
                  password=db_password
              )

          @app.route('/health')
          def health():
              try:
                  # Test Redis connection
                  redis_client.ping()
                  return jsonify({'status': 'healthy'})
              except Exception as e:
                  logger.error(f'Health check failed: {e}')
                  return jsonify({'status': 'unhealthy', 'error': str(e)}), 503

          @app.route('/api/users')
          def get_users():
              try:
                  conn = get_db_connection()
                  cur = conn.cursor()
                  cur.execute("SELECT id, name, email FROM users")
                  users = cur.fetchall()
                  cur.close()
                  conn.close()

                  return jsonify({
                      'users': [
                          {'id': user[0], 'name': user[1], 'email': user[2]}
                          for user in users
                      ]
                  })
              except Exception as e:
                  logger.error(f'Database error: {e}')
                  return jsonify({'error': 'Database error'}), 500

          @app.route('/api/cache/<key>')
          def get_cache(key):
              try:
                  value = redis_client.get(key)
                  if value:
                      return jsonify({'key': key, 'value': value})
                  else:
                      return jsonify({'error': 'Key not found'}), 404
              except Exception as e:
                  return jsonify({'error': str(e)}), 500

          if __name__ == '__main__':
              app.run(host='0.0.0.0', port=9090, debug=True)
        EOF

        destination = "local/app/app.py"
        change_mode = "restart"
      }

      template {
        data = <<-EOF
          Flask==2.3.2
          redis==4.6.0
          psycopg2-binary==2.9.6
          gunicorn==21.2.0
        EOF

        destination = "local/app/requirements.txt"
        change_mode = "noop"
      }

      env {
        CONSUL_HTTP_ADDR = "127.0.0.1:8500"
        REDIS_HOST = "{{ env "NOMAD_UPSTREAM_ADDR_cache" }}"
        DB_HOST = "${db_host}"
        DB_NAME = "${db_name}"
        DB_USER = "${db_user}"
        DB_PASSWORD = "${db_password}"
      }

      resources {
        cpu    = 500 # MHz
        memory = 512 # MB
      }

      kill_timeout = "30s"
    }
  }
}

# Batch Job with Consul Template
job "batch-processor" {
  datacenters = ["dc1"]
  type = "batch"

  parameterized {
    payload = "forbidden"
    meta_required = ["input_file", "output_path"]
  }

  group "processor" {
    count = 1

    task "process" {
      driver = "docker"

      config {
        image = "python:3.9-slim"

        volumes = [
          "local/scripts:/scripts"
        ]

        command = "python"
        args = ["/scripts/process_data.py"]
      }

      template {
        data = <<-EOF
          import os
          import sys
          import requests
          import logging
          from datetime import datetime

          # Configure logging
          logging.basicConfig(level=logging.INFO)
          logger = logging.getLogger(__name__)

          def get_service_address(service_name):
              """Get service address from Consul"""
              consul_url = os.getenv('CONSUL_HTTP_ADDR', '127.0.0.1:8500')
              try:
                  response = requests.get(f'http://{consul_url}/v1/health/service/{service_name}')
                  if response.status_code == 200:
                      services = response.json()
                      if services:
                          service = services[0]
                          return f"{service['Service']['Address']}:{service['Service']['Port']}"
              except Exception as e:
                  logger.error(f"Error getting service {service_name}: {e}")
              return None

          def main():
              input_file = os.getenv('INPUT_FILE')
              output_path = os.getenv('OUTPUT_PATH')

              api_address = get_service_address('api')
              if not api_address:
                  logger.error("Could not find API service")
                  sys.exit(1)

              logger.info(f"Processing {input_file} using API at {api_address}")

              # Process data logic here
              # This is a placeholder for actual processing
              output_file = os.path.join(output_path, f"processed_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json")

              # Create output file
              with open(output_file, 'w') as f:
                  f.write('{"processed": true, "timestamp": "' + datetime.now().isoformat() + '"}')

              logger.info(f"Processing complete. Output saved to {output_file}")

          if __name__ == '__main__':
              main()
        EOF

        destination = "local/scripts/process_data.py"
        change_mode = "noop"
      }

      env {
        INPUT_FILE = "${meta.input_file}"
        OUTPUT_PATH = "${meta.output_path}"
        CONSUL_HTTP_ADDR = "127.0.0.1:8500"
      }

      resources {
        cpu    = 1000 # MHz
        memory = 1024 # MB
      }

      restart {
        attempts = 0
        mode = "fail"
      }
    }
  }
}

# Periodic Job for Cleanup
job "periodic-cleanup" {
  datacenters = ["dc1"]
  type = "batch"

  periodic {
    cron = "0 2 * * *"  # Run daily at 2 AM
    prohibit_overlap = true
    time_zone = "America/Los_Angeles"
  }

  group "cleanup" {
    count = 1

    task "cleanup" {
      driver = "docker"

      config {
        image = "alpine:latest"
        command = "/bin/sh"
        args = ["/scripts/cleanup.sh"]

        volumes = [
          "local/scripts:/scripts"
        ]
      }

      template {
        data = <<-EOF
          #!/bin/sh

          set -e

          LOG_FILE="/var/log/cleanup.log"
          API_SERVICE="api"

          log() {
              echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" | tee -a "$LOG_FILE"
          }

          log("Starting cleanup process")

          # Get API service address from Consul
          API_ADDRESS=$(consul catalog services | grep "$API_SERVICE" | cut -d' ' -f1)

          if [ -n "$API_ADDRESS" ]; then
              log("Found API service: $API_ADDRESS")

              # Call API cleanup endpoint
              if curl -f -X POST "http://localhost:9090/api/cleanup" > /dev/null 2>&1; then
                  log("API cleanup successful")
              else
                  log("API cleanup failed")
                  exit 1
              fi
          else
              log("API service not found in Consul")
              exit 1
          fi

          # Clean up old log files
          find /var/log -name "*.log" -mtime +30 -delete
          log("Old log files cleaned up")

          # Clean up temporary files
          find /tmp -type f -mtime +1 -delete
          log("Temporary files cleaned up")

          log("Cleanup process completed")
        EOF

        destination = "local/scripts/cleanup.sh"
        perms = "755"
        change_mode = "noop"
      }

      env {
        CONSUL_HTTP_ADDR = "127.0.0.1:8500"
      }

      resources {
        cpu    = 200 # MHz
        memory = 128 # MB
      }

      restart {
        attempts = 3
        delay = "30s"
      }
    }
  }
}