HAProxy в Docker, ingress controller, helm charts, auto-discovery
HAProxy в Docker, ingress controller, helm charts, auto-discovery.
FROM haproxy:2.8
# Копирование конфигурации
COPY haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg
# Копирование SSL сертификатов
COPY certs/ /etc/haproxy/certs/
# Проверка конфигурации
RUN haproxy -c -f /usr/local/etc/haproxy/haproxy.cfg
EXPOSE 80 443 8404
CMD ["haproxy", "-f", "/usr/local/etc/haproxy/haproxy.cfg"]version: '3.8'
services:
haproxy:
build: .
ports:
- "80:80"
- "443:443"
- "8404:8404"
volumes:
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
- ./certs:/etc/haproxy/certs:ro
restart: unless-stopped
healthcheck:
test: ["CMD", "haproxy", "-c", "-f", "/usr/local/etc/haproxy/haproxy.cfg"]
interval: 30s
timeout: 10s
retries: 3
web1:
image: nginx:alpine
labels:
- "haproxy.backend=true"
web2:
image: nginx:alpine
labels:
- "haproxy.backend=true"docker run -d \
--name haproxy \
-p 80:80 \
-p 443:443 \
-p 8404:8404 \
-v $(pwd)/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
haproxy:2.8# Добавить репозиторий
helm repo add haproxy https://haproxy-ingress.github.io/charts/haproxy-ingress
helm repo update
# Установить
helm install haproxy-ingress haproxy/haproxy-ingress \
--namespace ingress-nginx \
--create-namespace \
--set controller.replicaCount=2 \
--set controller.service.type=LoadBalancer# values.yaml
controller:
replicaCount: 3
service:
type: LoadBalancer
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
resources:
limits:
cpu: 1000m
memory: 1024Mi
requests:
cpu: 100m
memory: 256Mi
config:
ssl-redirect: "true"
use-forwarded-headers: "true"
timeout-connect: "5s"
timeout-server: "30s"
metrics:
enabled: true
serviceMonitor:
enabled: true
namespace: monitoring
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- haproxy-ingress
topologyKey: kubernetes.io/hostname
defaultBackend:
replicaCount: 2apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-ingress
annotations:
kubernetes.io/ingress.class: haproxy
haproxy.org/ssl-redirect: "true"
haproxy.org/balance-algorithm: roundrobin
haproxy.org/health-check-path: /health
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-service
port:
number: 80
- path: /api
pathType: Prefix
backend:
service:
name: api-service
port:
number: 8080
tls:
- hosts:
- example.com
secretName: example-tlsapiVersion: v1
kind: ConfigMap
metadata:
name: haproxy-ingress-tcp-services
namespace: ingress-nginx
data:
"3306": "default/mysql:3306"
"5432": "default/postgresql:5432"
"6379": "default/redis:6379"# Автоматическое обнаружение сервисов по аннотациям
apiVersion: v1
kind: Service
metadata:
name: web-service
annotations:
haproxy.org/enabled: "true"
haproxy.org/balance-algorithm: leastconn
haproxy.org/health-check-interval: 5s
spec:
selector:
app: web
ports:
- port: 80
targetPort: 8080# Stable версия
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-stable
annotations:
kubernetes.io/ingress.class: haproxy
haproxy.org/balance-algorithm: roundrobin
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-stable
port:
number: 80
# Canary версия (10% трафика)
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: web-canary
annotations:
kubernetes.io/ingress.class: haproxy
haproxy.org/canary: "true"
haproxy.org/canary-weight: "10"
spec:
rules:
- host: example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: web-canary
port:
number: 80apiVersion: apps/v1
kind: Deployment
metadata:
name: haproxy-dataplane
spec:
replicas: 1
selector:
matchLabels:
app: haproxy-dataplane
template:
metadata:
labels:
app: haproxy-dataplane
spec:
containers:
- name: dataplane
image: haproxytech/dataplane-api:latest
args:
- --config-file
- /etc/haproxy/haproxy.cfg
- --port
- "5555"
volumeMounts:
- name: haproxy-config
mountPath: /etc/haproxy
ports:
- containerPort: 5555
volumes:
- name: haproxy-config
configMap:
name: haproxy-config// Пример Go кода для auto-discovery
package main
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
func discoverServices() {
config, _ := rest.InClusterConfig()
clientset, _ := kubernetes.NewForConfig(config)
services, _ := clientset.CoreV1().
Services("default").
List(context.TODO(), metav1.ListOptions{
LabelSelector: "haproxy.backend=true",
})
for _, svc := range services.Items {
// Обновление конфигурации HAProxy через Dataplane API
updateBackend(svc.Name, svc.Spec.ClusterIP)
}
}apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: haproxy-ingress
namespace: monitoring
labels:
app: haproxy-ingress
spec:
selector:
matchLabels:
app: haproxy-ingress
endpoints:
- port: metrics
interval: 5s
path: /metricsapiVersion: v1
kind: ConfigMap
metadata:
name: haproxy-dashboard
namespace: monitoring
data:
haproxy-dashboard.json: |
{
"dashboard": {
"title": "HAProxy Ingress",
"panels": [
{
"title": "Requests per Second",
"targets": [
{
"expr": "rate(haproxy_frontend_requests_total[1m])"
}
]
}
]
}
}# values.yaml
controller:
replicaCount: 3
image:
repository: haproxytech/kubernetes-ingress
tag: "1.9.0"
service:
type: LoadBalancer
annotations:
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
service.beta.kubernetes.io/aws-load-balancer-internal: "false"
resources:
limits:
cpu: "2"
memory: 2Gi
requests:
cpu: 100m
memory: 256Mi
config:
ssl-redirect: "true"
use-forwarded-headers: "true"
timeout-connect: "5s"
timeout-server: "30s"
max-connections: "50000"
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app.kubernetes.io/name
operator: In
values:
- haproxy-ingress
topologyKey: kubernetes.io/hostname
podDisruptionBudget:
enabled: true
minAvailable: 2
metrics:
enabled: true
serviceMonitor:
enabled: true
namespace: monitoring
logging:
level: info
format: json
defaultBackend:
replicaCount: 2
image:
repository: haproxytech/kubernetes-ingress-defaultbackendapiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: api-ingress
annotations:
kubernetes.io/ingress.class: haproxy
haproxy.org/ssl-redirect: "true"
haproxy.org/rate-limit-requests: "100"
haproxy.org/rate-limit-window: "10s"
haproxy.org/rate-limit-status-code: "429"
spec:
rules:
- host: api.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: api-service
port:
number: 8080# Статус pod
kubectl get pods -n ingress-nginx
# Логи
kubectl logs -n ingress-nginx -l app.kubernetes.io/name=haproxy-ingress
# Описание ingress
kubectl describe ingress web-ingress -n default
# Проверка конфигурации HAProxy
kubectl exec -n ingress-nginx haproxy-ingress-controller-xxx -- \
haproxy -c -f /etc/haproxy/haproxy.cfg# Endpoints сервиса
kubectl get endpoints web-service
# Проверка connectivity
kubectl exec -n ingress-nginx haproxy-ingress-controller-xxx -- \
curl http://web-service.default.svc.cluster.local/health# ✅ Хорошо
resources:
limits:
cpu: 1000m
memory: 1024Mi
requests:
cpu: 100m
memory: 256Mi
# ❌ Плохо (нет limits)
# resources: {}# ✅ Хорошо
podDisruptionBudget:
enabled: true
minAvailable: 2
# ❌ Плохо (возможны простои при обновлении)
# podDisruptionBudget:
# enabled: false# ✅ Хорошо (распределение по узлам)
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: kubernetes.io/hostname
# ❌ Плохо (все pod на одном узле)
# affinity: {}Изучим безопасность: WAF, rate limiting, DDoS защита.
Вопросы ещё не добавлены
Вопросы для этой подтемы ещё не добавлены.