Rate Limiting Implementierung
Rate Limiting: APIs vor Überlastung schützen
Rate Limiting schützt Ihre API vor Missbrauch und Überlastung. Lernen Sie verschiedene Algorithmen und deren Implementierung.
Warum Rate Limiting?
Ohne Rate Limiting:
User → 10.000 Requests/s → API → 💥 Überlastet
Mit Rate Limiting:
User → 10.000 Requests/s → Rate Limiter → 100 Requests/s → API ✓
↓
429 Too Many Requests (Rest)
Algorithmen
| Algorithmus | Beschreibung | Burst |
|---|---|---|
| Fixed Window | Festes Zeitfenster | 2x am Fensterrand |
| Sliding Window | Gleitendes Fenster | Gleichmäßiger |
| Token Bucket | Tokens regenerieren | Kontrolliert |
| Leaky Bucket | Konstanter Abfluss | Nein |
Token Bucket (empfohlen)
# Konzept: # - Bucket hat Kapazität (z.B. 100 Tokens) # - Tokens werden mit Rate R nachgefüllt # - Jeder Request verbraucht 1 Token # - Kein Token → Request abgelehnt ┌─────────────────────────┐ │ Bucket (100 Tokens) │ │ ████████████░░░░░░░░░ │ 70 Tokens verfügbar │ │ │ ↑ +10/s (Refill) │ │ ↓ -1/Request │ └─────────────────────────┘
Implementierung (Node.js)
class TokenBucket {
constructor(capacity, refillRate) {
this.capacity = capacity;
this.tokens = capacity;
this.refillRate = refillRate; // Tokens pro Sekunde
this.lastRefill = Date.now();
}
refill() {
const now = Date.now();
const elapsed = (now - this.lastRefill) / 1000;
this.tokens = Math.min(
this.capacity,
this.tokens + elapsed * this.refillRate
);
this.lastRefill = now;
}
tryConsume(tokens = 1) {
this.refill();
if (this.tokens >= tokens) {
this.tokens -= tokens;
return true;
}
return false;
}
}
// Verwendung
const buckets = new Map();
function rateLimiter(userId) {
if (!buckets.has(userId)) {
buckets.set(userId, new TokenBucket(100, 10)); // 100 max, 10/s
}
return buckets.get(userId).tryConsume();
}
// Middleware
app.use((req, res, next) => {
const userId = req.user?.id || req.ip;
if (!rateLimiter(userId)) {
return res.status(429).json({
error: 'Too Many Requests',
retryAfter: 10
});
}
next();
});
Redis Rate Limiter
const Redis = require('ioredis');
const redis = new Redis();
async function rateLimiter(key, limit, windowSec) {
const current = await redis.incr(key);
if (current === 1) {
await redis.expire(key, windowSec);
}
if (current > limit) {
const ttl = await redis.ttl(key);
return { allowed: false, retryAfter: ttl };
}
return { allowed: true, remaining: limit - current };
}
// Sliding Window mit Redis
async function slidingWindowLimiter(key, limit, windowMs) {
const now = Date.now();
const windowStart = now - windowMs;
const pipeline = redis.pipeline();
// Alte Einträge entfernen
pipeline.zremrangebyscore(key, 0, windowStart);
// Neuen Request hinzufügen
pipeline.zadd(key, now, `${now}-${Math.random()}`);
// Anzahl im Fenster
pipeline.zcard(key);
// TTL setzen
pipeline.expire(key, Math.ceil(windowMs / 1000));
const results = await pipeline.exec();
const count = results[2][1];
if (count > limit) {
return { allowed: false, count };
}
return { allowed: true, remaining: limit - count };
}
// Express Middleware
app.use(async (req, res, next) => {
const key = `ratelimit:${req.ip}`;
const result = await slidingWindowLimiter(key, 100, 60000); // 100/min
res.set('X-RateLimit-Limit', 100);
res.set('X-RateLimit-Remaining', result.remaining || 0);
if (!result.allowed) {
res.set('Retry-After', 60);
return res.status(429).json({ error: 'Rate limit exceeded' });
}
next();
});
Nginx Rate Limiting
# nginx.conf
http {
# Zone definieren
limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s;
server {
# API: 10 req/s mit Burst 20
location /api/ {
limit_req zone=api burst=20 nodelay;
limit_req_status 429;
proxy_pass http://backend;
}
# Login: Strenger (Brute Force Schutz)
location /login {
limit_req zone=login burst=5;
limit_req_status 429;
proxy_pass http://backend;
}
}
}
Express Rate Limit Package
npm install express-rate-limit rate-limit-redis
const rateLimit = require('express-rate-limit');
const RedisStore = require('rate-limit-redis');
const Redis = require('ioredis');
const redis = new Redis();
// Standard Limiter
const limiter = rateLimit({
windowMs: 15 * 60 * 1000, // 15 Minuten
max: 100, // 100 Requests pro Fenster
standardHeaders: true, // RateLimit-* Headers
legacyHeaders: false,
// Redis für verteilte Systeme
store: new RedisStore({
sendCommand: (...args) => redis.call(...args),
}),
message: {
error: 'Too many requests',
retryAfter: 900
}
});
app.use('/api/', limiter);
// Unterschiedliche Limits pro Route
const authLimiter = rateLimit({
windowMs: 60 * 60 * 1000, // 1 Stunde
max: 5, // 5 Versuche
skipSuccessfulRequests: true
});
app.post('/login', authLimiter, loginHandler);
app.post('/register', authLimiter, registerHandler);
Response Headers
HTTP/1.1 200 OK
X-RateLimit-Limit: 100
X-RateLimit-Remaining: 95
X-RateLimit-Reset: 1699999999
HTTP/1.1 429 Too Many Requests
Retry-After: 60
X-RateLimit-Limit: 100
X-RateLimit-Remaining: 0
X-RateLimit-Reset: 1699999999
{
"error": {
"code": "RATE_LIMIT_EXCEEDED",
"message": "Too many requests. Please retry after 60 seconds."
}
}
💡 Best Practices:
- Unterschiedliche Limits für verschiedene Endpoints
- Strenge Limits für Auth-Endpoints (Brute Force)
- Header zur Info für Clients
- Redis für verteilte Systeme