Nginx Reverse Proxy Konfiguration
Nginx als Reverse Proxy einrichten
Ein Reverse Proxy nimmt Anfragen entgegen und leitet sie an Backend-Server weiter. Nginx ist dafür ideal: schnell, stabil und einfach zu konfigurieren. Perfekt für Docker, Node.js-Apps oder Load Balancing.
Was macht ein Reverse Proxy?
- SSL-Terminierung: HTTPS am Proxy, HTTP zum Backend
- Load Balancing: Anfragen auf mehrere Server verteilen
- Caching: Statische Inhalte zwischenspeichern
- Sicherheit: Backend-Server verstecken
- Komprimierung: Gzip/Brotli am Proxy
Nginx installieren
sudo apt update
sudo apt install nginx -y
sudo systemctl enable nginx
Einfacher Reverse Proxy
sudo nano /etc/nginx/sites-available/app.example.com
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_cache_bypass $http_upgrade;
}
}
# Aktivieren
sudo ln -s /etc/nginx/sites-available/app.example.com /etc/nginx/sites-enabled/
sudo nginx -t
sudo systemctl reload nginx
Mit SSL (Let's Encrypt)
server {
listen 80;
server_name app.example.com;
return 301 https://$server_name$request_uri;
}
server {
listen 443 ssl http2;
server_name app.example.com;
ssl_certificate /etc/letsencrypt/live/app.example.com/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/app.example.com/privkey.pem;
# SSL-Einstellungen
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_session_cache shared:SSL:10m;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
Load Balancing
upstream backend {
# Round-Robin (Standard)
server 192.168.1.101:3000;
server 192.168.1.102:3000;
server 192.168.1.103:3000;
}
# Oder mit Gewichtung
upstream backend_weighted {
server 192.168.1.101:3000 weight=3;
server 192.168.1.102:3000 weight=2;
server 192.168.1.103:3000 weight=1;
}
# Oder IP-Hash (Session-Sticky)
upstream backend_sticky {
ip_hash;
server 192.168.1.101:3000;
server 192.168.1.102:3000;
}
# Oder Least Connections
upstream backend_least {
least_conn;
server 192.168.1.101:3000;
server 192.168.1.102:3000;
}
server {
listen 80;
server_name app.example.com;
location / {
proxy_pass http://backend;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
}
}
Proxy für Docker Container
# docker-compose.yml startet App auf Port 8080
server {
listen 80;
server_name docker-app.example.com;
location / {
proxy_pass http://127.0.0.1:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
WebSocket-Support
location /socket.io/ {
proxy_pass http://127.0.0.1:3000;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $host;
proxy_read_timeout 86400;
}
Mehrere Apps auf einem Server
# App 1
server {
listen 80;
server_name app1.example.com;
location / {
proxy_pass http://127.0.0.1:3001;
}
}
# App 2
server {
listen 80;
server_name app2.example.com;
location / {
proxy_pass http://127.0.0.1:3002;
}
}
# App 3 - Pfad-basiert
server {
listen 80;
server_name example.com;
location /api/ {
proxy_pass http://127.0.0.1:4000/;
}
location /admin/ {
proxy_pass http://127.0.0.1:5000/;
}
location / {
proxy_pass http://127.0.0.1:3000;
}
}
Caching am Proxy
# Cache-Zone definieren (in http-Block oder nginx.conf)
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m max_size=1g inactive=60m;
server {
listen 80;
server_name cached.example.com;
location / {
proxy_pass http://127.0.0.1:3000;
proxy_cache my_cache;
proxy_cache_valid 200 60m;
proxy_cache_valid 404 1m;
add_header X-Cache-Status $upstream_cache_status;
}
}
Timeouts konfigurieren
location / {
proxy_pass http://127.0.0.1:3000;
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
# Für große Uploads
client_max_body_size 100M;
}
Health Checks
upstream backend {
server 192.168.1.101:3000 max_fails=3 fail_timeout=30s;
server 192.168.1.102:3000 max_fails=3 fail_timeout=30s;
server 192.168.1.103:3000 backup; # Nur bei Ausfall
}
Debugging
# Welcher Backend-Server hat geantwortet?
add_header X-Upstream-Server $upstream_addr;
# Logs mit Upstream-Info
log_format proxy '$remote_addr - $upstream_addr - $request';
# Error-Log für Proxy-Probleme
error_log /var/log/nginx/proxy-error.log debug;
Weitere Hilfe
- 📖 Nginx Proxy Dokumentation
- 📧 E-Mail: support@enjyn.de