Примеры Ansible Playbook

Полные примеры Ansible playbook для автоматизации, управления конфигурацией и развертывания

💻 Базовая Настройка Web Сервера yaml

🟢 simple ⭐⭐

Полная автоматизация web сервера с Nginx, SSL и настройкой firewall

⏱️ 15 min 🏷️ ansible, automation, web server, nginx
Prerequisites: Ansible basics, Linux administration, Web server concepts
# Basic Web Server Setup with Ansible
# This playbook sets up a complete web server with Nginx, SSL, and security

---
# 1. Site Configuration (site.yml)
- name: Configure web servers
  hosts: webservers
  become: yes
  vars_files:
    - vars/main.yml
  roles:
    - common
    - nginx
    - ssl
    - firewall

# 2. Inventory (inventory/hosts)
[webservers]
web1.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa
web2.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa

[webservers:vars]
ansible_python_interpreter=/usr/bin/python3

# 3. Variables (vars/main.yml)
# System Configuration
timezone: "America/Los_Angeles"
locale: "en_US.UTF-8"

# Web Server Configuration
server_name: "{{ inventory_hostname }}"
web_root: "/var/www/{{ server_name }}"
nginx_user: "www-data"
nginx_group: "www-data"

# SSL Configuration
ssl_enabled: true
ssl_country: "US"
ssl_state: "California"
ssl_city: "San Francisco"
ssl_organization: "Company Inc"
ssl_organizational_unit: "IT Department"
ssl_email: "admin@{{ server_name }}"

# Firewall Configuration
firewall_enabled: true
firewall_allowed_ports:
  - 22    # SSH
  - 80    # HTTP
  - 443   # HTTPS

# Application Configuration
app_name: "myapp"
app_version: "1.0.0"
app_repo_url: "https://github.com/company/myapp.git"
app_branch: "main"

# 4. Common Role (roles/common/tasks/main.yml)
- name: Update apt cache
  apt:
    update_cache: yes
    cache_valid_time: 3600

- name: Install required packages
  apt:
    name:
      - curl
      - wget
      - git
      - unzip
      - htop
      - vim
      - tree
      - software-properties-common
    state: present

- name: Set timezone
  timezone:
    name: "{{ timezone }}"

- name: Set locale
  locale_gen:
    name: "{{ locale }}"
    state: present

- name: Create system user for web applications
  user:
    name: "{{ app_name }}"
    shell: /bin/bash
    home: "/opt/{{ app_name }}"
    create_home: yes
    state: present

- name: Create log directory
  file:
    path: "/var/log/{{ app_name }}"
    state: directory
    owner: "{{ app_name }}"
    group: "{{ app_name }}"
    mode: '0755'

- name: Create application directory
  file:
    path: "/opt/{{ app_name }}/apps"
    state: directory
    owner: "{{ app_name }}"
    group: "{{ app_name }}"
    mode: '0755'

# 5. Nginx Role (roles/nginx/tasks/main.yml)
- name: Install Nginx
  apt:
    name: nginx
    state: present

- name: Create Nginx directories
  file:
    path: "{{ item }}"
    state: directory
    owner: "{{ nginx_user }}"
    group: "{{ nginx_group }}"
    mode: '0755'
  loop:
    - "{{ web_root }}"
    - "/etc/nginx/sites-available"
    - "/etc/nginx/sites-enabled"
    - "/var/log/nginx"

- name: Copy Nginx main configuration
  template:
    src: nginx.conf.j2
    dest: /etc/nginx/nginx.conf
    owner: root
    group: root
    mode: '0644'
  notify: restart nginx

- name: Create Nginx site configuration
  template:
    src: site.conf.j2
    dest: "/etc/nginx/sites-available/{{ server_name }}"
    owner: root
    group: root
    mode: '0644'
  notify: restart nginx

- name: Enable Nginx site
  file:
    src: "/etc/nginx/sites-available/{{ server_name }}"
    dest: "/etc/nginx/sites-enabled/{{ server_name }}"
    state: link
  notify: restart nginx

- name: Remove default Nginx site
  file:
    path: "/etc/nginx/sites-enabled/default"
    state: absent
  notify: restart nginx

- name: Start and enable Nginx
  service:
    name: nginx
    state: started
    enabled: yes

# 6. Nginx Templates (roles/nginx/templates/nginx.conf.j2)
user {{ nginx_user }};
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;

events {
    worker_connections 768;
    # multi_accept on;
}

http {
    ##
    # Basic Settings
    ##
    sendfile on;
    tcp_nopush on;
    types_hash_max_size 2048;
    # server_tokens off;

    # server_names_hash_bucket_size 64;
    # server_name_in_redirect off;

    include /etc/nginx/mime.types;
    default_type application/octet-stream;

    ##
    # SSL Settings
    ##
    ssl_protocols TLSv1.2 TLSv1.3;
    ssl_prefer_server_ciphers on;
    ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
    ssl_ecdh_curve secp384r1;
    ssl_session_timeout  10m;
    ssl_session_cache shared:SSL:10m;
    ssl_session_tickets off;

    ##
    # Logging Settings
    ##
    access_log /var/log/nginx/access.log;
    error_log /var/log/nginx/error.log;

    ##
    # Gzip Settings
    ##
    gzip on;
    gzip_disable "msie6";
    gzip_vary on;
    gzip_proxied any;
    gzip_comp_level 6;
    gzip_types
        text/plain
        text/css
        application/json
        application/javascript
        text/xml
        application/xml
        application/xml+rss
        text/javascript;

    ##
    # Include server configurations
    ##
    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}

# 7. Nginx Site Template (roles/nginx/templates/site.conf.j2)
server {
    listen 80;
    listen [::]:80;
    server_name {{ server_name }};

    # Redirect HTTP to HTTPS
    {% if ssl_enabled %}
    return 301 https://$server_name$request_uri;
}

server {
    listen 443 ssl http2;
    listen [::]:443 ssl http2;
    server_name {{ server_name }};
    {% endif %}

    root {{ web_root }};
    index index.html index.htm index.php;

    # SSL Configuration
    {% if ssl_enabled %}
    ssl_certificate /etc/ssl/certs/{{ server_name }}.crt;
    ssl_certificate_key /etc/ssl/private/{{ server_name }}.key;
    ssl_trusted_certificate /etc/ssl/certs/{{ server_name }}-ca.crt;
    {% endif %}

    # Security Headers
    add_header X-Frame-Options "SAMEORIGIN" always;
    add_header X-XSS-Protection "1; mode=block" always;
    add_header X-Content-Type-Options "nosniff" always;
    add_header Referrer-Policy "no-referrer-when-downgrade" always;
    add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;

    # Logging
    access_log /var/log/nginx/{{ server_name }}_access.log;
    error_log /var/log/nginx/{{ server_name }}_error.log;

    # Main location
    location / {
        try_files $uri $uri/ =404;
    }

    # Static file caching
    location ~* .(jpg|jpeg|png|gif|ico|css|js)$ {
        expires 1y;
        add_header Cache-Control "public, immutable";
    }

    # PHP processing (if needed)
    location ~ .php$ {
        include snippets/fastcgi-php.conf;
        fastcgi_pass unix:/var/run/php/php7.4-fpm.sock;
    }

    # Deny access to .htaccess files
    location ~ /.ht {
        deny all;
    }
}

# 8. SSL Role (roles/ssl/tasks/main.yml)
- name: Create SSL directory
  file:
    path: "{{ item }}"
    state: directory
    mode: '0755'
  loop:
    - /etc/ssl/certs
    - /etc/ssl/private

- name: Generate OpenSSL private key
  openssl_privatekey:
    path: "/etc/ssl/private/{{ server_name }}.key"
    size: 2048
    type: RSA
    mode: '0600'

- name: Create OpenSSL certificate signing request
  openssl_csr:
    path: "/etc/ssl/{{ server_name }}.csr"
    privatekey_path: "/etc/ssl/private/{{ server_name }}.key"
    common_name: "{{ server_name }}"
    country_name: "{{ ssl_country }}"
    state_or_province_name: "{{ ssl_state }}"
    locality_name: "{{ ssl_city }}"
    organization_name: "{{ ssl_organization }}"
    organizational_unit_name: "{{ ssl_organizational_unit }}"
    email_address: "{{ ssl_email }}"

- name: Generate self-signed SSL certificate
  openssl_certificate:
    path: "/etc/ssl/certs/{{ server_name }}.crt"
    privatekey_path: "/etc/ssl/private/{{ server_name }}.key"
    csr_path: "/etc/ssl/{{ server_name }}.csr"
    provider: selfsigned
    selfsigned_not_after: "+3650d"
    mode: '0644'

- name: Create SSL certificate bundle
  shell: |
    cat /etc/ssl/certs/{{ server_name }}.crt /etc/ssl/certs/ca-certificates.crt > /etc/ssl/certs/{{ server_name }}-ca.crt
    chmod 644 /etc/ssl/certs/{{ server_name }}-ca.crt

# 9. Firewall Role (roles/firewall/tasks/main.yml)
- name: Install UFW (Uncomplicated Firewall)
  apt:
    name: ufw
    state: present

- name: Reset UFW to default settings
  ufw:
    state: reset

- name: Set default firewall policies
  ufw:
    policy: deny
    direction: incoming
  ufw:
    policy: allow
    direction: outgoing

- name: Allow SSH connections
  ufw:
    rule: allow
    name: OpenSSH

- name: Allow specific ports
  ufw:
    rule: allow
    port: "{{ item }}"
    proto: tcp
  loop: "{{ firewall_allowed_ports }}"
  when: firewall_enabled

- name: Enable UFW firewall
  ufw:
    state: enabled

# 10. Handlers (handlers/main.yml)
- name: restart nginx
  service:
    name: nginx
    state: restarted

- name: restart php-fpm
  service:
    name: php7.4-fpm
    state: restarted

- name: reload firewall
  ufw:
    state: reloaded

# 11. README.md
# Ansible Web Server Setup

## Overview
This Ansible playbook automates the complete setup of a production-ready web server with:
- Nginx web server
- SSL/TLS configuration
- Firewall setup (UFW)
- Security hardening
- Performance optimization

## Prerequisites
- Ansible 2.9+
- Ubuntu 20.04+ target servers
- SSH access with sudo privileges

## Usage

1. **Update inventory**
   ```bash
   # Edit inventory/hosts
   vim inventory/hosts
   ```

2. **Configure variables**
   ```bash
   # Edit variables
   vim vars/main.yml
   ```

3. **Run the playbook**
   ```bash
   ansible-playbook -i inventory/hosts site.yml
   ```

4. **Verify installation**
   ```bash
   # Check Nginx status
   sudo systemctl status nginx

   # Test website
   curl -I https://your-server.com
   ```

## File Structure
```
.
├── site.yml              # Main playbook
├── inventory/
│   └── hosts             # Server inventory
├── vars/
│   └── main.yml         # Variables
├── roles/
│   ├── common/
│   │   └── tasks/
│   │       └── main.yml
│   ├── nginx/
│   │   ├── tasks/
│   │   │   └── main.yml
│   │   └── templates/
│   │       ├── nginx.conf.j2
│   │       └── site.conf.j2
│   ├── ssl/
│   │   └── tasks/
│   │       └── main.yml
│   └── firewall/
│       └── tasks/
│           └── main.yml
└── handlers/
    └── main.yml         # Event handlers
```

## Customization

### Add New Sites
1. Update inventory with new hosts
2. Adjust variables in vars/main.yml
3. Run playbook against new hosts

### Modify SSL Settings
Update SSL variables in vars/main.yml:
- ssl_enabled
- ssl_country
- ssl_state
- ssl_city
- ssl_organization

### Firewall Configuration
Adjust firewall_allowed_ports in vars/main.yml for your specific requirements.

💻 Автоматизированная Настройка Базы Данных yaml

🟡 intermediate ⭐⭐⭐⭐

Полная автоматизация базы данных с MySQL, репликацией, бэкапом и мониторингом

⏱️ 25 min 🏷️ ansible, database, mysql, automation
Prerequisites: Ansible advanced, MySQL administration, Database concepts
# Database Automation with Ansible
# Complete setup for MySQL with replication, backup, and monitoring

---
# 1. Database Setup Playbook (site.yml)
- name: Setup database servers
  hosts: databases
  become: yes
  vars_files:
    - vars/main.yml
  roles:
    - mysql
    - replication
    - backup
    - monitoring

- name: Configure replication
  hosts: db_replicas
  become: yes
  vars_files:
    - vars/main.yml
  roles:
    - replica_setup

# 2. Inventory (inventory/hosts)
[databases]
db1.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa
db2.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa

[db_master]
db1.example.com

[db_replicas]
db2.example.com

[databases:vars]
ansible_python_interpreter=/usr/bin/python3

# 3. Variables (vars/main.yml)
# MySQL Configuration
mysql_root_password: "StrongPassword123!"
mysql_app_user: "app_user"
mysql_app_password: "AppPassword456!"
mysql_app_database: "production_app"

# MySQL Server Configuration
mysql_bind_address: "0.0.0.0"
mysql_server_id: "{{ 1 if inventory_hostname in groups['db_master'] else 2 + ansible_loop.index }}"
mysql_max_connections: 1000
mysql_innodb_buffer_pool_size: "256M"
mysql_log_bin: "mysql-bin"
mysql_binlog_format: "ROW"
mysql_expire_logs_days: 7

# Replication Configuration
replication_enabled: true
replication_user: "repl_user"
replication_password: "ReplPassword789!"
replication_master_host: "{{ hostvars[groups['db_master'][0]]['ansible_default_ipv4']['address'] }}"

# Backup Configuration
backup_enabled: true
backup_retention_days: 30
backup_s3_bucket: "company-mysql-backups"
backup_s3_region: "us-west-2"
backup_schedule: "0 2 * * *"  # Daily at 2 AM

# Monitoring Configuration
monitoring_enabled: true
monitoring_user: "monitoring_user"
monitoring_password: "MonitoringPass123!"
monitoring_allowed_hosts:
  - "10.0.0.0/8"
  - "192.168.0.0/16"

# Security Configuration
mysql_remove_anonymous_users: true
mysql_remove_remote_root: true
mysql_disallow_remote_root: true

# Performance Tuning
performance_schema: true
slow_query_log: true
slow_query_log_file: "/var/log/mysql/slow.log"
long_query_time: 2

# 4. MySQL Role (roles/mysql/tasks/main.yml)
- name: Install MySQL dependencies
  apt:
    name:
      - python3-pymysql
      - mysql-server
      - mysql-client
    state: present

- name: Start MySQL service
  service:
    name: mysql
    state: started
    enabled: yes

- name: Update MySQL configuration
  template:
    src: my.cnf.j2
    dest: /etc/mysql/my.cnf
    owner: root
    group: root
    mode: '0644'
  notify: restart mysql

- name: Set MySQL root password
  mysql_user:
    name: root
    password: "{{ mysql_root_password }}"
    login_unix_socket: /var/run/mysqld/mysqld.sock
    state: present

- name: Create .my.cnf for root user
  template:
    src: my_root.cnf.j2
    dest: /root/.my.cnf
    owner: root
    group: root
    mode: '0600'

- name: Remove anonymous MySQL users
  mysql_user:
    name: ""
    host_all: yes
    state: absent
    login_user: root
    login_password: "{{ mysql_root_password }}"

- name: Remove remote MySQL root access
  mysql_user:
    name: root
    host: "{{ item }}"
    state: absent
    login_user: root
    login_password: "{{ mysql_root_password }}"
  loop:
    - "{{ ansible_fqdn }}"
    - "{{ ansible_hostname }}"
    - "{{ ansible_default_ipv4.address }}"
    - "::1"
    - "127.0.0.1"
    - "%"
  when: mysql_remove_remote_root

- name: Create application database
  mysql_db:
    name: "{{ mysql_app_database }}"
    state: present
    login_user: root
    login_password: "{{ mysql_root_password }}"

- name: Create application user
  mysql_user:
    name: "{{ mysql_app_user }}"
    password: "{{ mysql_app_password }}"
    priv: "{{ mysql_app_database }}.*:ALL"
    host: "%"
    state: present
    login_user: root
    login_password: "{{ mysql_root_password }}"

- name: Create monitoring user
  mysql_user:
    name: "{{ monitoring_user }}"
    password: "{{ monitoring_password }}"
    priv: "*.*:SELECT,SHOW VIEW"
    host: "{{ item }}"
    state: present
    login_user: root
    login_password: "{{ mysql_root_password }}"
  loop: "{{ monitoring_allowed_hosts }}"
  when: monitoring_enabled

- name: Create replication user (master only)
  mysql_user:
    name: "{{ replication_user }}"
    password: "{{ replication_password }}"
    priv: "*.*:REPLICATION SLAVE"
    host: "%"
    state: present
    login_user: root
    login_password: "{{ mysql_root_password }}"
  when: inventory_hostname in groups['db_master']

# 5. MySQL Configuration Template (roles/mysql/templates/my.cnf.j2)
[mysqld]
# Basic Settings
user            = mysql
pid-file        = /var/run/mysqld/mysqld.pid
socket          = /var/run/mysqld/mysqld.sock
port            = 3306
basedir         = /usr
datadir         = /var/lib/mysql
tmpdir          = /tmp
lc-messages-dir = /usr/share/mysql
bind-address    = {{ mysql_bind_address }}

# Character Settings
character-set-server  = utf8mb4
collation-server      = utf8mb4_unicode_ci

# Performance Settings
max_connections       = {{ mysql_max_connections }}
innodb_buffer_pool_size = {{ mysql_innodb_buffer_pool_size }}
innodb_log_file_size   = 128M
innodb_flush_log_at_trx_commit = 1
innodb_flush_method    = O_DIRECT

# Logging
log_error               = /var/log/mysql/error.log
slow_query_log          = {{ 'ON' if slow_query_log else 'OFF' }}
slow_query_log_file     = {{ slow_query_log_file }}
long_query_time         = {{ long_query_time }}

# Binary Logging (for replication)
log-bin                 = {{ mysql_log_bin }}
binlog_format           = {{ mysql_binlog_format }}
expire_logs_days        = {{ mysql_expire_logs_days }}
server-id               = {{ mysql_server_id }}

# Performance Schema
performance_schema      = {{ 'ON' if performance_schema else 'OFF' }}

# Security
local-infile = 0
skip-show-database = 1

[mysqldump]
quick
quote-names
max_allowed_packet  = 16M

[mysql]
auto-rehash
disable-auto-rehash
prompt = '\u@\h [\d]>\_ '

[isamchk]
key_buffer_size = 16M

!includedir /etc/mysql/conf.d/
!includedir /etc/mysql/mysql.conf.d/

# 6. Root MySQL Configuration (roles/mysql/templates/my_root.cnf.j2)
[client]
user=root
password={{ mysql_root_password }}
host=localhost
socket=/var/run/mysqld/mysqld.sock

[mysql]
user=root
password={{ mysql_root_password }}

[mysqldump]
user=root
password={{ mysql_root_password }}

# 7. Replication Role (roles/replication/tasks/main.yml)
- name: Get master status
  mysql_replication:
    mode: getmaster
    login_user: root
    login_password: "{{ mysql_root_password }}"
  register: master_status
  when: inventory_hostname in groups['db_master']

- name: Configure replica
  mysql_replication:
    mode: changemaster
    master_host: "{{ replication_master_host }}"
    master_user: "{{ replication_user }}"
    master_password: "{{ replication_password }}"
    master_log_file: "{{ hostvars[groups['db_master'][0]]['master_status']['File'] }}"
    master_log_pos: "{{ hostvars[groups['db_master'][0]]['master_status']['Position'] }}"
    login_user: root
    login_password: "{{ mysql_root_password }}"
  when: inventory_hostname in groups['db_replicas']

- name: Start replica
  mysql_replication:
    mode: startslave
    login_user: root
    login_password: "{{ mysql_root_password }}"
  when: inventory_hostname in groups['db_replicas']

- name: Check replica status
  mysql_replication:
    mode: getslave
    login_user: root
    login_password: "{{ mysql_root_password }}"
  register: replica_status
  when: inventory_hostname in groups['db_replicas']

- name: Display replica status
  debug:
    var: replica_status
  when: inventory_hostname in groups['db_replicas']

# 8. Backup Role (roles/backup/tasks/main.yml)
- name: Install backup dependencies
  apt:
    name:
      - awscli
      - python3-boto3
      - percona-xtrabackup-24
    state: present
  when: backup_enabled

- name: Create backup directory
  file:
    path: /opt/mysql-backups
    state: directory
    mode: '0755'
  when: backup_enabled

- name: Create backup script
  template:
    src: backup.sh.j2
    dest: /opt/mysql-backups/backup.sh
    mode: '0755'
  when: backup_enabled

- name: Create backup cron job (master only)
  cron:
    name: "MySQL Backup"
    minute: "0"
    hour: "2"
    job: "/opt/mysql-backups/backup.sh"
    user: root
  when: backup_enabled and inventory_hostname in groups['db_master']

# 9. Backup Script Template (roles/backup/templates/backup.sh.j2)
#!/bin/bash
# MySQL Backup Script

# Variables
BACKUP_DIR="/opt/mysql-backups"
DATE=$(date +%Y%m%d_%H%M%S)
DB_NAME="{{ mysql_app_database }}"
DB_USER="{{ mysql_app_user }}"
DB_PASS="{{ mysql_app_password }}"
S3_BUCKET="{{ backup_s3_bucket }}"
S3_REGION="{{ backup_s3_region }}"
RETENTION_DAYS={{ backup_retention_days }}

# Create backup directory
mkdir -p $BACKUP_DIR/$DATE

# Take database backup
echo "Starting database backup..."
mysqldump -u$DB_USER -p$DB_PASS --single-transaction --routines --triggers --all-databases | gzip > $BACKUP_DIR/$DATE/mysql_backup_$DATE.sql.gz

# Upload to S3
echo "Uploading backup to S3..."
aws s3 cp $BACKUP_DIR/$DATE/mysql_backup_$DATE.sql.gz s3://$S3_BUCKET/mysql/ --region $S3_REGION

# Clean local backups older than 7 days
find $BACKUP_DIR -type d -mtime +7 -exec rm -rf {} \;

# Clean S3 backups older than retention days
aws s3 ls s3://$S3_BUCKET/mysql/ --region $S3_REGION | while read -r line; do
    createDate=$(echo $line|awk {'print $1" "$2'})
    createDate=$(date -d"$createDate" +%s)
    olderThan=$(date -d"$RETENTION_DAYS days ago" +%s)
    if [[ $createDate -lt $olderThan ]]; then
        fileName=$(echo $line|awk {'print $4'})
        aws s3 rm s3://$S3_BUCKET/mysql/$fileName --region $S3_REGION
    fi
done

echo "Backup completed successfully!"

# 10. Monitoring Role (roles/monitoring/tasks/main.yml)
- name: Install monitoring tools
  apt:
    name:
      - monitoring-plugins
      - nagios-plugins
      - python3-psutil
    state: present
  when: monitoring_enabled

- name: Create monitoring script
  template:
    src: mysql_monitoring.py.j2
    dest: /opt/mysql-monitoring/mysql_monitoring.py
    mode: '0755'
  when: monitoring_enabled

- name: Create monitoring directory
  file:
    path: /opt/mysql-monitoring
    state: directory
    mode: '0755'
  when: monitoring_enabled

- name: Setup monitoring cron job
  cron:
    name: "MySQL Monitoring"
    minute: "*/5"
    job: "/opt/mysql-monitoring/mysql_monitoring.py"
    user: root
  when: monitoring_enabled

# 11. Handlers (handlers/main.yml)
- name: restart mysql
  service:
    name: mysql
    state: restarted

- name: reload mysql
  service:
    name: mysql
    state: reloaded

# 12. Additional Configuration Files

# Monitoring Script Template (roles/monitoring/templates/mysql_monitoring.py.j2)
#!/usr/bin/env python3
import pymysql
import sys
import os

# MySQL connection settings
MYSQL_USER = "{{ monitoring_user }}"
MYSQL_PASS = "{{ monitoring_password }}"
MYSQL_HOST = "localhost"

def check_mysql():
    try:
        # Connect to MySQL
        conn = pymysql.connect(
            host=MYSQL_HOST,
            user=MYSQL_USER,
            password=MYSQL_PASS
        )

        with conn.cursor() as cursor:
            # Check if MySQL is running
            cursor.execute("SELECT 1")
            result = cursor.fetchone()

            if result:
                print("MySQL is running normally")
                return 0
            else:
                print("MySQL is not responding")
                return 1

    except Exception as e:
        print(f"MySQL check failed: {e}")
        return 1
    finally:
        if 'conn' in locals():
            conn.close()

if __name__ == "__main__":
    sys.exit(check_mysql())

💻 Развертывание Приложения Kubernetes yaml

🔴 complex ⭐⭐⭐⭐⭐

Продвинутый Ansible для развертывания приложений Kubernetes с Helm, мониторингом и интеграцией CI/CD

⏱️ 35 min 🏷️ ansible, kubernetes, helm, automation
Prerequisites: Ansible expert, Kubernetes, Helm, DevOps concepts
# Kubernetes Application Deployment with Ansible
# Complete automation for Kubernetes apps with Helm, monitoring, and GitOps

---
# 1. Kubernetes Deployment Playbook (site.yml)
- name: Deploy Kubernetes Applications
  hosts: k8s_cluster
  become: yes
  vars_files:
    - vars/main.yml
  roles:
    - kubernetes-setup
    - ingress-controller
    - cert-manager
    - monitoring
    - application-deploy

- name: Setup monitoring stack
  hosts: k8s_cluster
  become: yes
  vars_files:
    - vars/main.yml
  roles:
    - prometheus
    - grafana
    - jaeger

# 2. Inventory (inventory/hosts)
[k8s_cluster]
k8s-master1.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa
k8s-worker1.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa
k8s-worker2.example.com ansible_user=ubuntu ansible_ssh_private_key_file=~/.ssh/id_rsa

[k8s_cluster:vars]
ansible_python_interpreter=/usr/bin/python3
kubectl_config_file: "/root/.kube/config"

# 3. Variables (vars/main.yml)
# Kubernetes Configuration
kubernetes_version: "1.28.0"
cluster_name: "production-cluster"
cluster_domain: "k8s.example.com"

# Application Configuration
app_name: "microservice-app"
app_namespace: "production"
app_version: "2.1.0"
app_replicas: 3

# Container Registry
container_registry: "registry.example.com"
app_image: "{{ container_registry }}/{{ app_name }}:{{ app_version }}"
image_pull_secret: "registry-secret"

# Ingress Configuration
ingress_enabled: true
ingress_host: "{{ app_name }}.{{ cluster_domain }}"
ingress_tls_enabled: true
cert_issuer_email: "admin@{{ cluster_domain }}"

# Database Configuration
database_enabled: true
database_type: "postgresql"
database_name: "{{ app_name }}_db"
database_user: "dbuser"
database_password: "{{ vault_database_password }}"

# Redis Configuration
redis_enabled: true
redis_name: "{{ app_name }}-redis"

# Monitoring Configuration
monitoring_enabled: true
prometheus_enabled: true
grafana_enabled: true
jaeger_enabled: true

# CI/CD Integration
ci_cd_enabled: true
webhook_url: "https://github.com/{{ app_name }}/webhooks"
webhook_secret: "{{ vault_webhook_secret }}"

# Resource Limits
app_cpu_request: "100m"
app_cpu_limit: "500m"
app_memory_request: "128Mi"
app_memory_limit: "512Mi"

# Health Checks
health_check_path: "/health"
readiness_check_path: "/ready"

# Autoscaling
hpa_enabled: true
hpa_min_replicas: 3
hpa_max_replicas: 10
hpa_cpu_target: 70
hpa_memory_target: 80

# 4. Kubernetes Setup Role (roles/kubernetes-setup/tasks/main.yml)
- name: Install required packages
  apt:
    name:
      - apt-transport-https
      - ca-certificates
      - curl
      - gnupg
      - lsb-release
    state: present

- name: Add Docker GPG key
  apt_key:
    url: https://download.docker.com/linux/ubuntu/gpg
    state: present

- name: Add Docker repository
  apt_repository:
    repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ ansible_distribution_release }} stable"
    state: present

- name: Install Docker
  apt:
    name:
      - docker-ce
      - docker-ce-cli
      - containerd.io
    state: present

- name: Add Kubernetes GPG key
  apt_key:
    url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
    state: present

- name: Add Kubernetes repository
  apt_repository:
    repo: "deb https://apt.kubernetes.io/ kubernetes-xenial main"
    state: present

- name: Install Kubernetes tools
  apt:
    name:
      - kubelet={{ kubernetes_version }}-00
      - kubeadm={{ kubernetes_version }}-00
      - kubectl={{ kubernetes_version }}-00
    state: present
    update_cache: yes
    force: yes

- name: Hold Kubernetes packages
  dpkg_selections:
    name: "{{ item }}"
    selection: hold
  loop:
    - kubelet
    - kubeadm
    - kubectl

- name: Start and enable Docker
  systemd:
    name: docker
    state: started
    enabled: yes

- name: Configure Docker daemon
  template:
    src: daemon.json.j2
    dest: /etc/docker/daemon.json
  notify: restart docker

- name: Create Kubernetes configuration directory
  file:
    path: /etc/kubernetes
    state: directory
    mode: '0755'

- name: Generate cluster configuration (master only)
  template:
    src: cluster-config.yaml.j2
    dest: /etc/kubernetes/cluster-config.yaml
  when: inventory_hostname in groups['k8s_masters']

- name: Initialize Kubernetes cluster (master only)
  shell: |
    kubeadm init --config /etc/kubernetes/cluster-config.yaml
    creates: /etc/kubernetes/admin.conf
  when: inventory_hostname in groups['k8s_masters']

- name: Setup kubectl for root user (master only)
  file:
    src: /etc/kubernetes/admin.conf
    dest: /root/.kube/config
    state: link
    force: yes
  when: inventory_hostname in groups['k8s_masters']

- name: Get join command (master only)
  shell: kubeadm token create --print-join-command
  register: kubeadm_join_command
  when: inventory_hostname in groups['k8s_masters']

- name: Join worker nodes to cluster
  shell: "{{ kubeadm_join_command.stdout_lines[0] }}"
  when: inventory_hostname in groups['k8s_workers']

- name: Install Flannel CNI plugin (master only)
  shell: |
    kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/master/Documentation/kube-flannel.yml
  when: inventory_hostname in groups['k8s_masters']

# 5. Ingress Controller Role (roles/ingress-controller/tasks/main.yml)
- name: Add Helm repository
  shell: |
    helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
    helm repo update
  when: inventory_hostname in groups['k8s_masters']

- name: Create ingress-nginx namespace
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: Namespace
      metadata:
        name: ingress-nginx
  when: inventory_hostname in groups['k8s_masters']

- name: Install NGINX Ingress Controller
  shell: |
    helm install ingress-nginx ingress-nginx/ingress-nginx       --namespace ingress-nginx       --create-namespace       --set controller.replicaCount=2       --set controller.nodeSelector."kubernetes.io/os"=linux       --set defaultBackend.nodeSelector."kubernetes.io/os"=linux       --set controller.admissionWebhooks.patch.nodeSelector."kubernetes.io/os"=linux
  when: inventory_hostname in groups['k8s_masters']

# 6. Cert Manager Role (roles/cert-manager/tasks/main.yml)
- name: Add Jetstack Helm repository
  shell: |
    helm repo add jetstack https://charts.jetstack.io
    helm repo update
  when: inventory_hostname in groups['k8s_masters']

- name: Create cert-manager namespace
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: Namespace
      metadata:
        name: cert-manager
  when: inventory_hostname in groups['k8s_masters']

- name: Install cert-manager CRDs
  shell: |
    kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.0/cert-manager.crds.yaml
  when: inventory_hostname in groups['k8s_masters']

- name: Install cert-manager
  shell: |
    helm install cert-manager jetstack/cert-manager       --namespace cert-manager       --create-namespace       --version v1.13.0
  when: inventory_hostname in groups['k8s_masters']

- name: Create ClusterIssuer for Let's Encrypt
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: cert-manager.io/v1
      kind: ClusterIssuer
      metadata:
        name: letsencrypt-prod
      spec:
        acme:
          server: https://acme-v02.api.letsencrypt.org/directory
          email: "{{ cert_issuer_email }}"
          privateKeySecretRef:
            name: letsencrypt-prod-private-key
          solvers:
            - http01:
                ingress:
                  class: nginx
  when: inventory_hostname in groups['k8s_masters']

# 7. Application Deployment Role (roles/application-deploy/tasks/main.yml)
- name: Create application namespace
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: Namespace
      metadata:
        name: "{{ app_namespace }}"

- name: Create image pull secret
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: Secret
      metadata:
        name: "{{ image_pull_secret }}"
        namespace: "{{ app_namespace }}"
      type: kubernetes.io/dockerconfigjson
      data:
        .dockerconfigjson: "{{ vault_docker_config }}"

- name: Deploy application using Helm
  kubernetes.core.helm:
    kubeconfig: "{{ kubectl_config_file }}"
    name: "{{ app_name }}"
    chart_ref: "./charts/{{ app_name }}"
    release_namespace: "{{ app_namespace }}"
    values:
      image:
        repository: "{{ container_registry }}/{{ app_name }}"
        tag: "{{ app_version }}"
        pullSecret: "{{ image_pull_secret }}"
      replicaCount: "{{ app_replicas }}"
      service:
        type: ClusterIP
        port: 80
      ingress:
        enabled: "{{ ingress_enabled }}"
        className: "nginx"
        hosts:
          - host: "{{ ingress_host }}"
            paths:
              - path: /
                pathType: Prefix
        tls:
          - secretName: "{{ app_name }}-tls"
            hosts:
              - "{{ ingress_host }}"
      resources:
        requests:
          cpu: "{{ app_cpu_request }}"
          memory: "{{ app_memory_request }}"
        limits:
          cpu: "{{ app_cpu_limit }}"
          memory: "{{ app_memory_limit }}"
      livenessProbe:
        httpGet:
          path: "{{ health_check_path }}"
          port: http
        initialDelaySeconds: 30
        periodSeconds: 10
      readinessProbe:
        httpGet:
          path: "{{ readiness_check_path }}"
          port: http
        initialDelaySeconds: 5
        periodSeconds: 5
      autoscaling:
        enabled: "{{ hpa_enabled }}"
        minReplicas: "{{ hpa_min_replicas }}"
        maxReplicas: "{{ hpa_max_replicas }}"
        targetCPUUtilizationPercentage: "{{ hpa_cpu_target }}"
        targetMemoryUtilizationPercentage: "{{ hpa_memory_target }}"

# 8. Prometheus Role (roles/prometheus/tasks/main.yml)
- name: Create monitoring namespace
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: Namespace
      metadata:
        name: monitoring
  when: monitoring_enabled

- name: Install Prometheus Operator
  shell: |
    helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
    helm repo update
    helm install prometheus prometheus-community/kube-prometheus-stack       --namespace monitoring       --create-namespace       --set prometheus.prometheusSpec.storageSpec.volumeClaimTemplate.spec.resources.requests.storage=50Gi       --set grafana.persistence.storageClassName=gp2       --set grafana.persistence.size=10Gi
  when: monitoring_enabled

# 9. Grafana Dashboard Configuration (roles/monitoring/tasks/grafana.yml)
- name: Create Grafana dashboard for application
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: ConfigMap
      metadata:
        name: "{{ app_name }}-dashboard"
        namespace: monitoring
        labels:
          grafana_dashboard: "1"
      data:
        dashboard.json: |
          {
            "dashboard": {
              "id": null,
              "title": "{{ app_name }} Application Metrics",
              "tags": ["{{ app_name }}", "application"],
              "timezone": "browser",
              "panels": [
                {
                  "title": "Request Rate",
                  "type": "graph",
                  "targets": [
                    {
                      "expr": "rate(http_requests_total[5m])",
                      "legendFormat": "{{ app_name }} requests"
                    }
                  ],
                  "gridPos": {"h": 8, "w": 12, "x": 0, "y": 0}
                }
              ],
              "time": {"from": "now-1h", "to": "now"},
              "refresh": "30s"
            }
          }
  when: monitoring_enabled

# 10. CI/CD Integration Role (roles/ci-cd/tasks/main.yml)
- name: Create Argo CD namespace
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: v1
      kind: Namespace
      metadata:
        name: argocd
  when: ci_cd_enabled

- name: Install Argo CD
  shell: |
    kubectl create namespace argocd
    kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
  when: ci_cd_enabled

- name: Create Argo CD application
  kubernetes.core.k8s:
    kubeconfig: "{{ kubectl_config_file }}"
    state: present
    definition:
      apiVersion: argoproj.io/v1alpha1
      kind: Application
      metadata:
        name: "{{ app_name }}-app"
        namespace: argocd
      spec:
        project: default
        source:
          repoURL: "https://github.com/company/{{ app_name }}.git"
          targetRevision: main
          path: k8s
        destination:
          server: https://kubernetes.default.svc
          namespace: "{{ app_namespace }}"
        syncPolicy:
          automated:
            prune: true
            selfHeal: true
  when: ci_cd_enabled

# 11. Additional Configuration Files

# Docker Daemon Template (roles/kubernetes-setup/templates/daemon.json.j2)
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "insecure-registries": ["{{ container_registry }}"]
}

# Cluster Configuration Template (roles/kubernetes-setup/templates/cluster-config.yaml.j2)
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: "{{ ansible_default_ipv4.address }}"
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/containerd/containerd.sock
  kubeletExtraArgs:
    node-ip: "{{ ansible_default_ipv4.address }}"
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: "v{{ kubernetes_version }}"
clusterName: "{{ cluster_name }}"
controlPlaneEndpoint: "{{ ansible_default_ipv4.address }}:6443"
networking:
  podSubnet: "10.244.0.0/16"
  serviceSubnet: "10.96.0.0/12"
etcd:
  external:
    endpoints:
    - http://{{ ansible_default_ipv4.address }}:2379
apiServer:
  extraArgs:
    service-node-port-range: "30000-32767"
  certSANs:
    - "{{ ansible_fqdn }}"
    - "{{ ansible_hostname }}"
    - "{{ ansible_default_ipv4.address }}"
    - "{{ cluster_name }}"

# Helm Chart Structure (charts/microservice-app/)
# Chart.yaml
apiVersion: v2
name: microservice-app
description: A Helm chart for Microservice Application
type: application
version: 1.0.0
appVersion: "{{ app_version }}"

# values.yaml
replicaCount: 1
image:
  repository: "{{ app_image }}"
  pullPolicy: IfNotPresent
  tag: "{{ app_version }}"

service:
  type: ClusterIP
  port: 80
  targetPort: 3000

ingress:
  enabled: false
  className: "nginx"
  annotations: {}
  hosts:
    - host: chart-example.local
      paths:
        - path: /
          pathType: Prefix
  tls: []

resources: {}
autoscaling:
  enabled: false
  minReplicas: 1
  maxReplicas: 100
  targetCPUUtilizationPercentage: 80

# templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ include "chart.fullname" . }}
  labels:
    {{- include "chart.labels" . | nindent 4 }}
spec:
  replicas: {{ .Values.replicaCount }}
  selector:
    matchLabels:
      {{- include "chart.selectorLabels" . | nindent 6 }}
  template:
    metadata:
      labels:
        {{- include "chart.selectorLabels" . | nindent 8 }}
    spec:
      containers:
        - name: {{ .Chart.Name }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          ports:
            - name: http
              containerPort: 80
              protocol: TCP
          livenessProbe:
            httpGet:
              path: {{ .Values.healthCheck.path | default "/health" }}
              port: http
          readinessProbe:
            httpGet:
              path: {{ .Values.readinessCheck.path | default "/ready" }}
              port: http
          resources:
            {{- toYaml .Values.resources | nindent 12 }}