🎯 Рекомендуемые коллекции
Балансированные коллекции примеров кода из различных категорий, которые вы можете исследовать
Примеры Docker и Kubernetes
Основные примеры оркестрации контейнеров, включая Dockerfiles, Docker Compose и манифесты Kubernetes для современного развертывания приложений
💻 Dockerfile для Node.js приложения dockerfile
Production-ready Dockerfile для Node.js приложений с многоэтапной сборкой и лучшими практиками безопасности
# Multi-stage Dockerfile for Node.js Application
# Stage 1: Build stage
FROM node:18-alpine AS builder
# Set working directory
WORKDIR /app
# Copy package files
COPY package*.json ./
# Install dependencies
RUN npm ci --only=production && npm cache clean --force
# Copy source code
COPY . .
# Create non-root user
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nodejs -u 1001
# Change ownership of the app directory
RUN chown -R nodejs:nodejs /app
USER nodejs
# Expose port
EXPOSE 3000
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 CMD node healthcheck.js
# Start the application
CMD ["node", "server.js"]
# Stage 2: Production stage (if needed for smaller image)
# FROM node:18-alpine AS production
# WORKDIR /app
# COPY --from=builder --chown=nodejs:nodejs /app/node_modules ./node_modules
# COPY --from=builder --chown=nodejs:nodejs /app ./
# USER nodejs
# EXPOSE 3000
# CMD ["node", "server.js"]
💻 Docker Compose - Веб-стек приложения yaml
Полная настройка Docker Compose для веб-приложения с базой данных, кэшем Redis и обратным прокси Nginx
version: '3.8'
services:
# Web Application
app:
build:
context: .
dockerfile: Dockerfile
container_name: myapp
restart: unless-stopped
env_file:
- .env
environment:
- NODE_ENV=production
- DB_HOST=database
- REDIS_HOST=redis
- DB_PORT=5432
- REDIS_PORT=6379
depends_on:
database:
condition: service_healthy
redis:
condition: service_started
networks:
- app-network
volumes:
- app-logs:/app/logs
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
# PostgreSQL Database
database:
image: postgres:15-alpine
container_name: myapp-db
restart: unless-stopped
environment:
POSTGRES_DB: ${DB_NAME:-myapp}
POSTGRES_USER: ${DB_USER:-postgres}
POSTGRES_PASSWORD: ${DB_PASSWORD}
volumes:
- postgres_data:/var/lib/postgresql/data
- ./database/init:/docker-entrypoint-initdb.d
networks:
- app-network
ports:
- "5432:5432"
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${DB_USER:-postgres} -d ${DB_NAME:-myapp}"]
interval: 10s
timeout: 5s
retries: 5
# Redis Cache
redis:
image: redis:7-alpine
container_name: myapp-redis
restart: unless-stopped
command: redis-server --appendonly yes --requirepass ${REDIS_PASSWORD}
volumes:
- redis_data:/data
networks:
- app-network
ports:
- "6379:6379"
healthcheck:
test: ["CMD", "redis-cli", "--raw", "incr", "ping"]
interval: 10s
timeout: 3s
retries: 5
# Nginx Reverse Proxy
nginx:
image: nginx:alpine
container_name: myapp-nginx
restart: unless-stopped
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/conf.d:/etc/nginx/conf.d:ro
- ./ssl:/etc/nginx/ssl:ro
- nginx_logs:/var/log/nginx
depends_on:
- app
networks:
- app-network
healthcheck:
test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost/health"]
interval: 30s
timeout: 10s
retries: 3
volumes:
postgres_data:
driver: local
redis_data:
driver: local
app-logs:
driver: local
nginx_logs:
driver: local
networks:
app-network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16
💻 Манифест развертывания Kubernetes yaml
Production-ready развертывание Kubernetes с сервисом, configmap и ingress для контейнеризированных приложений
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-deployment
namespace: production
labels:
app: myapp
version: v1
environment: production
spec:
replicas: 3
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: myapp
template:
metadata:
labels:
app: myapp
version: v1
environment: production
spec:
containers:
- name: myapp
image: myregistry/myapp:1.0.0
imagePullPolicy: Always
ports:
- containerPort: 3000
protocol: TCP
name: http
env:
- name: NODE_ENV
value: "production"
- name: DB_HOST
valueFrom:
configMapKeyRef:
name: myapp-config
key: database_host
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: myapp-secrets
key: db_password
- name: REDIS_HOST
valueFrom:
configMapKeyRef:
name: myapp-config
key: redis_host
resources:
requests:
memory: "256Mi"
cpu: "250m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
volumeMounts:
- name: config-volume
mountPath: /app/config
readOnly: true
- name: logs-volume
mountPath: /app/logs
volumes:
- name: config-volume
configMap:
name: myapp-config
- name: logs-volume
emptyDir: {}
imagePullSecrets:
- name: registry-secret
nodeSelector:
node-type: application
tolerations:
- key: "workload"
operator: "Equal"
value: "application"
effect: "NoSchedule"
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- myapp
topologyKey: kubernetes.io/hostname
---
apiVersion: v1
kind: Service
metadata:
name: myapp-service
namespace: production
labels:
app: myapp
spec:
type: ClusterIP
ports:
- port: 80
targetPort: 3000
protocol: TCP
name: http
selector:
app: myapp
---
apiVersion: v1
kind: ConfigMap
metadata:
name: myapp-config
namespace: production
data:
database_host: "database-service.production.svc.cluster.local"
redis_host: "redis-service.production.svc.cluster.local"
app_config.json: |
{
"port": 3000,
"logLevel": "info",
"features": {
"enableCache": true,
"enableMetrics": true
}
}
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp-ingress
namespace: production
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/ssl-redirect: "true"
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
spec:
tls:
- hosts:
- myapp.example.com
secretName: myapp-tls
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp-service
port:
number: 80
---
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: myapp-hpa
namespace: production
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: myapp-deployment
minReplicas: 3
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
💻 Продвинутая многоэтапная сборка Docker dockerfile
Сложная многоэтапная сборка Docker для Go приложений с отдельными этапами сборки, тестирования и производства
# Multi-stage Dockerfile for Go Application with security and optimization
# Stage 1: Base dependencies layer
FROM golang:1.21-alpine AS base
WORKDIR /app
# Install build dependencies
RUN apk add --no-cache git ca-certificates tzdata
# Create non-root user
RUN addgroup -g 1001 -S appgroup && adduser -u 1001 -S appuser -G appgroup
# Stage 2: Dependencies layer
FROM base AS deps
COPY go.mod go.sum ./
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download && go mod verify
# Stage 3: Build stage
FROM base AS builder
ARG VERSION=dev
ARG COMMIT=unknown
ARG BUILD_TIME
COPY --from=deps /go/pkg/mod /go/pkg/mod
COPY . .
# Build the application
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s -X main.version=${VERSION} -X main.commit=${COMMIT} -X main.buildTime=${BUILD_TIME}" -a -installsuffix cgo -o main ./cmd/server
# Stage 4: Test stage
FROM base AS tester
COPY --from=deps /go/pkg/mod /go/pkg/mod
COPY . .
# Run tests with coverage
RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go test -v -race -coverprofile=coverage.out ./... && go tool cover -html=coverage.out -o coverage.html
# Stage 5: Security scanning stage
FROM base AS security
COPY --from=builder /app/main .
# Run security scan
RUN apk add --no-cache nmap && which gosec || (wget -O /usr/local/bin/gosec https://github.com/securecodewarrior/gosec/releases/download/v2.15.0/gosec_2.15.0_linux_amd64.tar.gz && tar xzf /usr/local/bin/gosec -C /usr/local/bin/ gosec && chmod +x /usr/local/bin/gosec) && gosec ./...
# Stage 6: Production stage
FROM scratch AS production
# Import CA certificates from base stage
COPY --from=base /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
# Import timezone data
COPY --from=base /usr/share/zoneinfo /usr/share/zoneinfo
# Import user and group from base stage
COPY --from=base /etc/passwd /etc/passwd
COPY --from=base /etc/group /etc/group
# Import the binary from builder stage
COPY --from=builder /app/main /app/main
# Use non-root user
USER appuser:appgroup
# Expose port
EXPOSE 8080
# Health check
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 CMD ["/app/main", "--health-check"]
# Set entrypoint
ENTRYPOINT ["/app/main"]
# Default command
CMD ["--config", "/config/config.yaml"]
# Stage 7: Development stage (with debugging)
FROM base AS development
RUN go install github.com/cosmtrek/air@latest
COPY --from=deps /go/pkg/mod /go/pkg/mod
COPY . .
EXPOSE 8080 4000
CMD ["air", "-c", ".air.toml"]
💻 Kubernetes StatefulSet для базы данных yaml
Конфигурация StatefulSet для stateful приложений вроде баз данных с постоянным хранилищем и сетевой идентичностью
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgres-statefulset
namespace: database
labels:
app: postgres
tier: database
spec:
serviceName: postgres-service
replicas: 3
selector:
matchLabels:
app: postgres
template:
metadata:
labels:
app: postgres
tier: database
spec:
securityContext:
runAsUser: 999
runAsGroup: 999
fsGroup: 999
containers:
- name: postgres
image: postgres:15-alpine
imagePullPolicy: IfNotPresent
ports:
- name: postgres
containerPort: 5432
protocol: TCP
env:
- name: POSTGRES_DB
value: "myapp"
- name: POSTGRES_USER
value: "postgres"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: postgres-secret
key: password
- name: PGDATA
value: /var/lib/postgresql/data/pgdata
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REPLICATION_MODE
value: "master"
volumeMounts:
- name: postgres-storage
mountPath: /var/lib/postgresql/data
- name: postgres-config
mountPath: /etc/postgresql/postgresql.conf
subPath: postgresql.conf
readOnly: true
- name: postgres-init
mountPath: /docker-entrypoint-initdb.d
readOnly: true
resources:
requests:
memory: "1Gi"
cpu: "500m"
limits:
memory: "2Gi"
cpu: "1000m"
livenessProbe:
exec:
command:
- pg_isready
- -U
- postgres
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
exec:
command:
- pg_isready
- -U
- postgres
initialDelaySeconds: 5
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "pg_ctl stop -m fast"]
volumeClaimTemplates:
- metadata:
name: postgres-storage
labels:
app: postgres
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: fast-ssd
resources:
requests:
storage: 20Gi
volumes:
- name: postgres-config
configMap:
name: postgres-config
- name: postgres-init
configMap:
name: postgres-init
---
apiVersion: v1
kind: Service
metadata:
name: postgres-service
namespace: database
labels:
app: postgres
spec:
ports:
- port: 5432
targetPort: 5432
name: postgres
clusterIP: None
selector:
app: postgres
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-config
namespace: database
data:
postgresql.conf: |
# PostgreSQL configuration file
# Connection settings
listen_addresses = '*'
port = 5432
max_connections = 200
# Memory settings
shared_buffers = 256MB
effective_cache_size = 1GB
work_mem = 4MB
maintenance_work_mem = 64MB
# WAL settings
wal_level = replica
max_wal_size = 1GB
min_wal_size = 80MB
checkpoint_completion_target = 0.9
# Replication settings
max_wal_senders = 3
wal_keep_segments = 32
hot_standby = on
# Logging
log_destination = 'stderr'
logging_collector = on
log_directory = 'pg_log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_min_duration_statement = 1000
log_checkpoints = on
log_connections = on
log_disconnections = on
log_lock_waits = on
---
apiVersion: v1
kind: Secret
metadata:
name: postgres-secret
namespace: database
type: Opaque
data:
password: c3VwZXJfc2VjcmV0X3Bhc3N3b3Jk # base64 encoded
replication-password: cmVwbGljYXRpb25fcGFzc3dvcmQ= # base64 encoded
---
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-init
namespace: database
data:
01-init.sql: |
-- Initialize database schema
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements";
-- Create application user
CREATE USER app_user WITH PASSWORD 'app_password';
GRANT CONNECT ON DATABASE myapp TO app_user;
-- Create tables
CREATE TABLE IF NOT EXISTS users (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
username VARCHAR(50) UNIQUE NOT NULL,
email VARCHAR(100) UNIQUE NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create indexes
CREATE INDEX IF NOT EXISTS idx_users_username ON users(username);
CREATE INDEX IF NOT EXISTS idx_users_email ON users(email);
CREATE INDEX IF NOT EXISTS idx_users_created_at ON users(created_at);
-- Grant permissions
GRANT SELECT, INSERT, UPDATE, DELETE ON users TO app_user;
GRANT USAGE, SELECT ON SEQUENCE users_id_seq TO app_user;
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: postgres-pdb
namespace: database
spec:
minAvailable: 2
selector:
matchLabels:
app: postgres