Логирование, мониторинг, CI/CD, развёртывание в production
Развёртывание в production требует особого подхода. Изучите логирование, мониторинг, CI/CD и best practices для production-среды.
latest)# ✅ Хорошо — структурированные логи
import logging
import json
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
logger.info("Application started", extra={"version": "1.0.0"}){"timestamp": "2024-01-15T10:00:00Z", "level": "INFO", "service": "api", "message": "Application started", "version": "1.0.0"}# json-file (по умолчанию)
docker run -d \
--log-driver=json-file \
--log-opt max-size=10m \
--log-opt max-file=3 \
myapp:latest
# syslog
docker run -d \
--log-driver=syslog \
--log-opt syslog-address=udp://logserver:514 \
myapp:latest
# journald (systemd)
docker run -d \
--log-driver=journald \
myapp:latest
# fluentd (ELK stack)
docker run -d \
--log-driver=fluentd \
--log-opt fluentd-address=tcp://fluentd:24224 \
myapp:latest// /etc/docker/daemon.json
{
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-file": "3",
"compress": "true"
}
}# Применить настройки
sudo systemctl restart docker# docker-compose.yml
version: '3.8'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
environment:
- discovery.type=single-node
- xpack.security.enabled=false
volumes:
- es-data:/usr/share/elasticsearch/data
logstash:
image: docker.elastic.co/logstash/logstash:8.11.0
volumes:
- ./logstash/pipeline:/usr/share/logstash/pipeline
kibana:
image: docker.elastic.co/kibana/kibana:8.11.0
ports:
- "5601:5601"
app:
image: myapp:latest
logging:
driver: fluentd
options:
fluentd-address: localhost:24224
tag: app
volumes:
es-data:# docker-compose.yml
version: '3.8'
services:
loki:
image: grafana/loki:2.9.0
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
promtail:
image: grafana/promtail:2.9.0
volumes:
- /var/log:/var/log
- /var/lib/docker/containers:/var/lib/docker/containers:ro
command: -config.file=/etc/promtail/config.yml
grafana:
image: grafana/grafana:10.2.0
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin# Статистика в реальном времени
docker stats
# Однократный снимок
docker stats --no-stream
# Статистика конкретного контейнера
docker stats myapp# docker-compose.yml
version: '3.8'
services:
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
ports:
- "8080:8080"
volumes:
- /:/rootfs:ro
- /var/run:/var/run:ro
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro
- /dev/disk/:/dev/disk:ro
privileged: true
prometheus:
image: prom/prometheus:v2.47.0
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus-data:/prometheus
grafana:
image: grafana/grafana:10.2.0
ports:
- "3000:3000"
volumes:
- grafana-data:/var/lib/grafana
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
prometheus-data:
grafana-data:# prometheus.yml
global:
scrape_interval: 15s
scrape_configs:
- job_name: 'cadvisor'
static_configs:
- targets: ['cadvisor:8080']# alertmanager.yml
route:
receiver: 'slack'
receivers:
- name: 'slack'
slack_configs:
- api_url: 'https://hooks.slack.com/services/XXX/YYY/ZZZ'
channel: '#alerts'
title: 'Alert: {{ .GroupLabels.alertname }}'
text: '{{ range .Alerts }}{{ .Annotations.description }}{{ end }}'# .github/workflows/docker.yml
name: Build and Deploy
on:
push:
branches: [main]
tags:
- 'v*'
pull_request:
branches: [main]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Run tests
run: |
docker compose -f docker-compose.test.yml up --abort-on-container-exit
build:
needs: test
runs-on: ubuntu-latest
if: github.event_name == 'push'
steps:
- uses: actions/checkout@v3
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Build and push
uses: docker/build-push-action@v4
with:
push: true
tags: |
${{ secrets.DOCKER_USERNAME }}/myapp:${{ github.sha }}
${{ startsWith(github.ref, 'refs/tags/v') && format('{0}/myapp:latest', secrets.DOCKER_USERNAME) || '' }}
cache-from: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/myapp:buildcache
cache-to: type=registry,ref=${{ secrets.DOCKER_USERNAME }}/myapp:buildcache,mode=max
- name: Run Trivy scan
uses: aquasecurity/trivy-action@master
with:
image-ref: ${{ secrets.DOCKER_USERNAME }}/myapp:${{ github.sha }}
format: 'sarif'
output: 'trivy-results.sarif'
severity: 'CRITICAL,HIGH'
- name: Upload Trivy results
uses: github/codeql-action/upload-sarif@v2
with:
sarif_file: 'trivy-results.sarif'
deploy:
needs: build
runs-on: ubuntu-latest
if: startsWith(github.ref, 'refs/tags/v')
steps:
- uses: actions/checkout@v3
- name: Deploy to production
run: |
ssh ${{ secrets.PROD_HOST }} << 'EOF'
cd /opt/myapp
docker compose pull
docker compose up -d
EOF# .gitlab-ci.yml
stages:
- test
- build
- deploy
variables:
DOCKER_IMAGE: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
test:
stage: test
image: docker:24
services:
- docker:24-dind
script:
- docker compose -f docker-compose.test.yml up --abort-on-container-exit
build:
stage: build
image: docker:24
services:
- docker:24-dind
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- docker build -t $DOCKER_IMAGE .
- docker push $DOCKER_IMAGE
- docker tag $DOCKER_IMAGE $CI_REGISTRY_IMAGE:latest
- docker push $CI_REGISTRY_IMAGE:latest
deploy:
stage: deploy
image: alpine:latest
before_script:
- apk add --no-cache openssh-client
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | ssh-add -
script:
- ssh -o StrictHostKeyChecking=no $PROD_HOST "
cd /opt/myapp &&
docker compose pull &&
docker compose up -d
"
only:
- tags#!/bin/bash
# blue-green-deploy.sh
VERSION=$1
COLOR=$2 # blue или green
if [ "$COLOR" == "blue" ]; then
OTHER_COLOR="green"
else
OTHER_COLOR="blue"
fi
# Запуск новой версии
docker run -d \
--name myapp-$COLOR \
-p 8080:8000 \
myapp:$VERSION
# Проверка health
until curl -f http://localhost:8080/health; do
echo "Waiting for $COLOR to be healthy..."
sleep 5
done
# Переключение трафика (через proxy)
echo "Switching traffic to $COLOR"
# Остановка старой версии
docker stop myapp-$OTHER_COLOR
docker rm myapp-$OTHER_COLOR
echo "Deployment complete!"# docker-compose.yml
version: '3.8'
services:
app:
image: myapp:latest
deploy:
replicas: 3
update_config:
parallelism: 1
delay: 10s
failure_action: rollback
monitor: 30s
max_failure_ratio: 0.3
rollback_config:
parallelism: 1
delay: 10s# В Swarm mode
docker service create \
--name myapp \
--replicas 3 \
--update-parallelism 1 \
--update-delay 10s \
myapp:latest
# Rolling update
docker service update \
--image myapp:v2.0 \
myapp#!/bin/bash
# backup.sh
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_DIR=/backups
# Бэкап PostgreSQL
docker run --rm \
-v postgres-data:/data:ro \
-v $BACKUP_DIR:/backup \
alpine \
tar czf /backup/postgres-$DATE.tar.gz -C /data .
# Бэкап конфигурации
tar czf $BACKUP_DIR/config-$DATE.tar.gz /opt/myapp/config
# Удаление старых бэкапов (хранить 7 дней)
find $BACKUP_DIR -name "*.tar.gz" -mtime +7 -delete#!/bin/bash
# restore.sh
BACKUP_FILE=$1
# Остановка приложения
docker compose down
# Восстановление
docker run --rm \
-v postgres-data:/data \
-v $(dirname $BACKUP_FILE):/backup:ro \
alpine \
tar xzf /backup/$(basename $BACKUP_FILE) -C /data
# Запуск
docker compose up -d# Многоэтапная сборка
FROM node:18-alpine AS builder
WORKDIR /app
COPY package*.json ./
RUN npm ci
COPY . .
RUN npm run build
# Production
FROM node:18-alpine
# Метаданные
LABEL maintainer="devops@example.com"
LABEL version="1.0.0"
# Безопасность
RUN addgroup -S appgroup && adduser -S appuser -G appgroup
# Зависимости
WORKDIR /app
COPY /app/dist ./dist
COPY /app/node_modules ./node_modules
COPY /app/package.json ./
# Health check
HEALTHCHECK \
CMD node healthcheck.js
# Пользователь
USER appuser
# Порт
EXPOSE 3000
CMD ["node", "dist/index.js"]version: '3.8'
services:
app:
image: myapp:v1.0.0
deploy:
replicas: 3
resources:
limits:
cpus: '1'
memory: 512M
reservations:
cpus: '0.5'
memory: 256M
restart_policy:
condition: on-failure
delay: 5s
max_attempts: 3
update_config:
parallelism: 1
delay: 10s
environment:
- NODE_ENV=production
- LOG_LEVEL=info
secrets:
- db_password
- api_key
networks:
- frontend
- backend
depends_on:
db:
condition: service_healthy
logging:
driver: json-file
options:
max-size: "10m"
max-file: "3"
healthcheck:
test: ["CMD", "node", "healthcheck.js"]
interval: 30s
timeout: 3s
retries: 3
start_period: 10s
db:
image: postgres:15-alpine
environment:
- POSTGRES_USER=app
- POSTGRES_PASSWORD_FILE=/run/secrets/db_password
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- backend
healthcheck:
test: ["CMD-SHELL", "pg_isready -U app"]
interval: 10s
timeout: 5s
retries: 5
secrets:
db_password:
external: true
api_key:
external: true
networks:
frontend:
driver: bridge
backend:
driver: bridge
internal: true
volumes:
postgres-data:# Проверить логи
docker logs myapp --tail=100
# Проверить статус
docker inspect --format='{{.State.Status}}' myapp
docker inspect --format='{{.State.ExitCode}}' myapp
# Проверить использование ресурсов
docker stats myapp --no-stream
# Проверить health
docker inspect --format='{{.State.Health.Status}}' myapp# Мониторинг
docker stats --no-stream
# Ограничить память
docker update --memory=512m myapp
# Перезапустить с ограничениями
docker compose up -d --force-recreate# Проверить размер логов
du -sh /var/lib/docker/containers/*/
# Очистить логи
truncate -s 0 /var/lib/docker/containers/*/*-json.log
# Перезапустить с новыми настройками логирования
docker compose up -d --force-recreateКонтекст: E-commerce платформа с монолитным Django-приложением (50 000 строк кода, PostgreSQL, Redis).
Проблема:
Решение:
# docker-compose.prod.yml
version: '3.8'
services:
web:
build:
context: .
dockerfile: Dockerfile.prod
restart: unless-stopped
depends_on:
db:
condition: service_healthy
redis:
condition: service_started
networks:
- frontend
- backend
environment:
- DATABASE_URL=postgresql://app:secret@db:5432/shop
- REDIS_URL=redis://redis:6379/0
healthcheck:
test: ["CMD", "python", "manage.py", "check", "--deploy"]
interval: 30s
timeout: 10s
retries: 3
deploy:
resources:
limits:
cpus: '2'
memory: 2G
db:
image: postgres:15-alpine
restart: always
volumes:
- postgres-data:/var/lib/postgresql/data
- ./backups:/backups
networks:
- backend
healthcheck:
test: ["CMD-SHELL", "pg_isready -U app"]
interval: 10s
timeout: 5s
retries: 5
redis:
image: redis:7-alpine
restart: always
networks:
- backend
nginx:
image: nginx:alpine
ports:
- "443:443"
- "80:80"
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf:ro
- ./ssl:/etc/ssl:ro
depends_on:
- web
networks:
- frontend
volumes:
postgres-data:
networks:
frontend:
backend:
internal: trueCI/CD пайплайн:
# .github/workflows/deploy.yml
name: Deploy
on:
push:
branches: [main]
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build and push
run: |
docker build -t registry.example.com/shop:${{ github.sha }} .
docker push registry.example.com/shop:${{ github.sha }}
- name: Deploy to production
run: |
ssh deploy@server "
cd /opt/shop &&
docker compose pull &&
docker compose up -d --force-recreate
"
- name: Health check
run: |
curl -f https://shop.example.com/health || exit 1Результат:
| Метрика | До | После | Улучшение |
|---|---|---|---|
| Время развёртывания | 4-6 часов | 15 минут | 16-24x |
| Частота релизов | 1/месяц | 5/неделю | 20x |
| Время отката | 2 часа | 2 минуты | 60x |
| Инциденты | 8/месяц | 1/месяц | 8x |
Контекст: Платёжный шлюз с требованиями 99.99% доступности.
Проблема:
Решение:
#!/bin/bash
# blue-green-deploy.sh
set -e
IMAGE=$1
VERSION=$2
# Цвета окружений
CURRENT=$(curl -s https://api.example.com/health | jq -r '.environment')
NEXT=$([ "$CURRENT" = "blue" ] && echo "green" || echo "blue")
echo "Current: $CURRENT, Next: $NEXT"
# Развёртывание в неактивное окружение
echo "Deploying $IMAGE:$VERSION to $NEXT..."
docker stack deploy \
--compose-file docker-compose.$NEXT.yml \
--prune \
shop
# Ожидание готовности
echo "Waiting for health check..."
for i in {1..30}; do
STATUS=$(docker service ps shop_$NEXT --format '{{.DesiredState}}' | tail -1)
if [ "$STATUS" = "Running" ]; then
HEALTH=$(curl -s https://$NEXT.shop.internal/health | jq -r '.status')
if [ "$HEALTH" = "ok" ]; then
echo "Health check passed!"
break
fi
fi
sleep 10
done
# Переключение трафика
echo "Switching traffic to $NEXT..."
docker service update \
--label-add traefik.http.routers.shop.rule=Host\(`shop.example.com`\)\ \&\&\ TraefikService=shop_$NEXT \
shop_proxy
# Проверка
echo "Verifying..."
curl -f https://shop.example.com/health || exit 1
echo "Deployment complete!"Результат:
| Метрика | До | После | Улучшение |
|---|---|---|---|
| Доступность | 99.9% | 99.99% | 10x меньше инцидентов |
| Время развёртывания | 30 мин | 5 мин | 6x |
| Время отката | 15 мин | 30 сек | 30x |
| Тестирование в prod | Нет | Да (на green) | ✅ |
Контекст: Платформа с 50+ микросервисами, 200+ контейнеров.
Проблема:
Решение:
# docker-compose.logging.yml
version: '3.8'
services:
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch:8.11.0
environment:
- discovery.type=single-node
- xpack.security.enabled=false
- "ES_JAVA_OPTS=-Xms1g -Xmx1g"
volumes:
- es-data:/usr/share/elasticsearch/data
networks:
- logging
kibana:
image: docker.elastic.co/kibana/kibana:8.11.0
ports:
- "5601:5601"
depends_on:
- elasticsearch
networks:
- logging
fluentd:
image: fluent/fluentd:v1.16
volumes:
- ./fluentd/conf:/fluentd/etc
ports:
- "24224:24224"
networks:
- logging
# Конфигурация логирования для сервисов
# logging.driver: fluentd
# logging.options:
# fluentd-address: localhost:24224
# tag: "{{.ImageName}}/{{.Name}}"Конфигурация Fluentd:
# fluentd.conf
<source>
@type forward
port 24224
</source>
<filter **>
@type record_transformer
<record>
hostname "#{Socket.gethostname}"
environment production
</record>
</filter>
<match **>
@type elasticsearch
host elasticsearch
port 9200
logstash_format true
logstash_prefix docker
</match>Результат:
| Метрика | До | После | Улучшение |
|---|---|---|---|
| Поиск проблемы | 2-4 часа | 5-15 минут | 8-16x |
| Время на debugging | 40% времени | 10% времени | 4x |
| Tracing request | Невозможно | Kibana dashboards | ✅ |
Цель: Развернуть production-ready платформу с использованием всех изученных техник.
┌─────────────────────────────────────────────────────────────┐
│ Load Balancer │
│ (Traefik) │
│ Ports: 80, 443 │
└──────────────┬────────────────────────────────┬─────────────┘
│ │
┌──────────▼──────────┐ ┌─────────▼──────────┐
│ Frontend Service │ │ API Gateway │
│ (React + Nginx) │ │ (FastAPI) │
│ Port: 3000 │ │ Port: 8000 │
└─────────────────────┘ └─────────┬──────────┘
│
┌────────────────────────────────┼────────────────────────────────┐
│ │ │
┌─────────▼──────────┐ ┌─────────▼──────────┐ ┌─────────▼──────────┐
│ Auth Service │ │ Order Service │ │ Notification Svc │
│ (Node.js) │ │ (Python) │ │ (Go) │
└─────────┬──────────┘ └─────────┬──────────┘ └─────────┬──────────┘
│ │ │
└────────────────────────────────┼────────────────────────────────┘
│
┌────────────────────────────────┼────────────────────────────────┐
│ │ │
┌─────────▼──────────┐ ┌─────────▼──────────┐ ┌─────────▼──────────┐
│ PostgreSQL │ │ Redis │ │ RabbitMQ │
│ (Users, Orders) │ │ (Cache) │ │ (Message Bus) │
└────────────────────┘ └────────────────────┘ └────────────────────┘
capstone-project/
├── docker-compose.yml # Базовая конфигурация
├── docker-compose.prod.yml # Production переопределение
├── docker-compose.monitoring.yml # Monitoring профиль
├── .env # Переменные окружения (.gitignore)
├── .env.example # Пример для копирования
├── traefik/
│ ├── traefik.yml
│ └── dynamic/
│ └── middlewares.yml
├── services/
│ ├── frontend/
│ │ ├── Dockerfile
│ │ ├── package.json
│ │ └── src/
│ ├── api-gateway/
│ │ ├── Dockerfile
│ │ ├── requirements.txt
│ │ └── app/
│ ├── auth-service/
│ │ ├── Dockerfile
│ │ ├── package.json
│ │ └── src/
│ ├── order-service/
│ │ ├── Dockerfile
│ │ ├── requirements.txt
│ │ └── app/
│ └── notification-service/
│ ├── Dockerfile
│ ├── go.mod
│ └── main.go
├── monitoring/
│ ├── prometheus.yml
│ ├── grafana/
│ │ └── dashboards/
│ └── alertmanager/
└── scripts/
├── backup.sh
├── deploy.sh
└── health-check.sh
1. Инфраструктура:
2. Безопасность:
3. Надёжность:
4. Мониторинг:
5. CI/CD:
version: '3.8'
services:
# Reverse Proxy
traefik:
image: traefik:v2.10
command:
- "--api.dashboard=true"
- "--providers.docker=true"
- "--providers.docker.exposedbydefault=false"
- "--entrypoints.web.address=:80"
- "--entrypoints.websecure.address=:443"
ports:
- "80:80"
- "443:443"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- ./traefik/traefik.yml:/etc/traefik/traefik.yml:ro
networks:
- frontend
labels:
- "traefik.enable=true"
restart: unless-stopped
# Frontend
frontend:
build: ./services/frontend
labels:
- "traefik.enable=true"
- "traefik.http.routers.frontend.rule=Host(`example.com`)"
- "traefik.http.routers.frontend.entrypoints=websecure"
depends_on:
- api-gateway
networks:
- frontend
restart: unless-stopped
# API Gateway
api-gateway:
build: ./services/api-gateway
environment:
- AUTH_SERVICE_URL=http://auth-service:3001
- ORDER_SERVICE_URL=http://order-service:3002
labels:
- "traefik.enable=true"
- "traefik.http.routers.api.rule=Host(`api.example.com`)"
depends_on:
auth-service:
condition: service_healthy
order-service:
condition: service_healthy
networks:
- frontend
- backend
healthcheck:
test: ["CMD", "python", "-c", "import urllib.request; urllib.request.urlopen('http://localhost:8000/health')"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
# Auth Service
auth-service:
build: ./services/auth-service
environment:
- DATABASE_URL=postgresql://auth:secret@postgres:5432/auth
- REDIS_URL=redis://redis:6379/0
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_started
networks:
- backend
healthcheck:
test: ["CMD", "node", "healthcheck.js"]
interval: 30s
timeout: 5s
retries: 3
restart: unless-stopped
# Order Service
order-service:
build: ./services/order-service
environment:
- DATABASE_URL=postgresql://orders:secret@postgres:5432/orders
- RABBITMQ_URL=amqp://guest:guest@rabbitmq:5672
depends_on:
postgres:
condition: service_healthy
rabbitmq:
condition: service_healthy
networks:
- backend
healthcheck:
test: ["CMD", "python", "-c", "import psycopg2; psycopg2.connect('dbname=orders user=orders')"]
interval: 30s
timeout: 10s
retries: 3
restart: unless-stopped
# Notification Service
notification-service:
build: ./services/notification-service
environment:
- RABBITMQ_URL=amqp://guest:guest@rabbitmq:5672
depends_on:
rabbitmq:
condition: service_healthy
networks:
- backend
restart: unless-stopped
# PostgreSQL
postgres:
image: postgres:15-alpine
environment:
POSTGRES_USER: app
POSTGRES_PASSWORD_FILE: /run/secrets/postgres_password
volumes:
- postgres-data:/var/lib/postgresql/data
networks:
- backend
healthcheck:
test: ["CMD-SHELL", "pg_isready -U app"]
interval: 10s
timeout: 5s
retries: 5
restart: always
# Redis
redis:
image: redis:7-alpine
command: redis-server --appendonly yes
volumes:
- redis-data:/data
networks:
- backend
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 5
restart: always
# RabbitMQ
rabbitmq:
image: rabbitmq:3-management-alpine
environment:
RABBITMQ_DEFAULT_USER: guest
RABBITMQ_DEFAULT_PASS: guest
volumes:
- rabbitmq-data:/var/lib/rabbitmq
networks:
- backend
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
interval: 30s
timeout: 10s
retries: 5
restart: always
volumes:
postgres-data:
redis-data:
rabbitmq-data:
networks:
frontend:
driver: bridge
backend:
driver: bridge
internal: true
secrets:
postgres_password:
external: trueБазовые (обязательны):
docker compose up -dПродвинутые:
Expert:
| Критерий | Баллы | Описание |
|---|---|---|
| Функциональность | 30 | Все сервисы работают корректно |
| Безопасность | 25 | Secrets, network isolation, non-root user |
| Надёжность | 20 | Health checks, restart policies, depends_on |
| Оптимизация | 15 | Размер образов, multi-stage, кэширование |
| Мониторинг | 10 | Logging, metrics, alerting |
Итого: 100 баллов
Проходной балл: 70/100
В следующей теме вы изучите основы оркестрации: Kubernetes и Docker Swarm для управления кластерами контейнеров.
Вопросы ещё не добавлены
Вопросы для этой подтемы ещё не добавлены.