Suppression rate limiters inutiles et optimisation polling LLM

Problèmes résolus:
- Polling toutes les 3s vers /api/llm/limit (1200 req/h par user)
- Rate limiters par IP bloquaient légitimement les traductions
- globalLimiter (200/15min) et translationLimiter (10/min) redondants

Changements:
- Suppression setInterval 3s dans index.html
- Mise à jour compteur LLM uniquement après traductions
- Suppression globalLimiter et translationLimiter
- Garde uniquement checkLLMLimit() (par API key, 20/jour)
- Fix affichage: utilise data.remaining de l'API

Résultat: système de quotas simple, clair et fonctionnel

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
StillHammer 2025-12-02 20:24:50 +08:00
parent f2143bb10b
commit 272a05b3fe
3 changed files with 20 additions and 54 deletions

View File

@ -884,9 +884,8 @@
} else {
// User with limited requests
counter.style.display = 'block';
const used = data.used || 0;
const limit = data.limit || 20;
const remaining = limit - used;
const remaining = data.remaining !== undefined ? data.remaining : (limit - (data.used || 0));
text.textContent = `Requêtes LLM restantes: ${remaining}/${limit}`;
}
} catch (error) {
@ -953,23 +952,8 @@
}
});
// Refresh LLM limit counter every 3 seconds if logged in
let limitErrorCount = 0;
const limitInterval = setInterval(() => {
if (getApiKey()) {
updateLLMLimit().catch(err => {
limitErrorCount++;
// Stop polling after 5 consecutive errors
if (limitErrorCount >= 5) {
console.warn('Too many errors loading LLM limit, stopping auto-refresh');
clearInterval(limitInterval);
}
});
} else {
// Reset error count if not logged in
limitErrorCount = 0;
}
}, 3000);
// Note: LLM limit counter is updated after each translation
// No need for automatic polling every few seconds
});
// Authenticated fetch wrapper with auto-logout on 401/403
@ -1421,6 +1405,10 @@
} else {
document.getElementById('layer1-content').innerHTML = `<span class="error">Erreur: ${data.error}</span>`;
}
// Update LLM limit counter after translation
updateLLMLimit().catch(err => console.error('Error updating LLM limit:', err));
} catch (error) {
document.getElementById('layer1-content').innerHTML = `<span class="error">Erreur: ${error.message}</span>`;
} finally {
@ -1533,6 +1521,10 @@
} else {
document.getElementById('cf2fr-layer1-content').innerHTML = `<span class="error">Erreur LLM: ${llmData.error}</span>`;
}
// Update LLM limit counter after translation
updateLLMLimit().catch(err => console.error('Error updating LLM limit:', err));
} catch (error) {
document.getElementById('cf2fr-layer1-content').innerHTML = `<span class="error">Erreur: ${error.message}</span>`;
} finally {

View File

@ -1,32 +1,8 @@
const rateLimit = require('express-rate-limit');
// Rate limiter global par IP
const globalLimiter = rateLimit({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 200, // max 200 requêtes par IP
standardHeaders: true,
legacyHeaders: false,
skip: (req) => {
// Skip pour les endpoints qui doivent être appelés très fréquemment
return req.path === '/api/llm/limit';
},
message: { error: 'Too many requests from this IP, please try again later.' }
});
// Rate limiter pour les traductions (plus strict)
const translationLimiter = rateLimit({
windowMs: 60 * 1000, // 1 minute
max: 10, // max 10 traductions par minute
standardHeaders: true,
legacyHeaders: false,
skip: (req) => {
// Skip si l'utilisateur est admin
return req.user && req.user.role === 'admin';
},
message: { error: 'Too many translation requests. Please wait a moment.' }
});
// Rate limiter pour les endpoints sensibles (admin)
// Note: Pour les traductions et requêtes LLM, on utilise checkLLMLimit() dans auth.js
// qui gère les limites par API key (plus flexible et précis que les rate limiters par IP)
const adminLimiter = rateLimit({
windowMs: 5 * 60 * 1000, // 5 minutes
max: 50,
@ -36,7 +12,5 @@ const adminLimiter = rateLimit({
});
module.exports = {
globalLimiter,
translationLimiter,
adminLimiter
};

View File

@ -17,7 +17,7 @@ const { translateConfluentToFrench, translateConfluentDetailed } = require('./co
// Security modules
const { authenticate, requireAdmin, createToken, listTokens, disableToken, enableToken, deleteToken, getGlobalStats, trackLLMUsage, checkLLMLimit } = require('./auth');
const { globalLimiter, translationLimiter, adminLimiter } = require('./rateLimiter');
const { adminLimiter } = require('./rateLimiter');
const { requestLogger, getLogs, getLogStats } = require('./logger');
const app = express();
@ -26,7 +26,7 @@ const PORT = process.env.PORT || 3000;
// Middlewares
app.use(express.json());
app.use(requestLogger); // Log toutes les requêtes
app.use(globalLimiter); // Rate limiting global
// Rate limiting: on utilise uniquement checkLLMLimit() par API key, pas de rate limit global par IP
// Route protégée pour admin.html (AVANT express.static)
// Vérifie l'auth seulement si API key présente, sinon laisse passer (le JS client vérifiera)
@ -378,7 +378,7 @@ app.post('/api/analyze/coverage', authenticate, (req, res) => {
});
// Translation endpoint (NOUVEAU SYSTÈME CONTEXTUEL)
app.post('/translate', authenticate, translationLimiter, async (req, res) => {
app.post('/translate', authenticate, async (req, res) => {
const { text, target, provider, model, temperature = 1.0, useLexique = true, customAnthropicKey, customOpenAIKey } = req.body;
if (!text || !target || !provider || !model) {
@ -586,7 +586,7 @@ function parseTranslationResponse(response) {
}
// Raw translation endpoint (for debugging - returns unprocessed LLM output) - SECURED
app.post('/api/translate/raw', authenticate, translationLimiter, async (req, res) => {
app.post('/api/translate/raw', authenticate, async (req, res) => {
const { text, target, provider, model, useLexique = true, customAnthropicKey, customOpenAIKey } = req.body;
if (!text || !target || !provider || !model) {
@ -700,7 +700,7 @@ app.post('/api/translate/raw', authenticate, translationLimiter, async (req, res
});
// Batch translation endpoint - SECURED
app.post('/api/translate/batch', authenticate, translationLimiter, async (req, res) => {
app.post('/api/translate/batch', authenticate, async (req, res) => {
const { words, target = 'ancien' } = req.body;
if (!words || !Array.isArray(words)) {
@ -727,7 +727,7 @@ app.post('/api/translate/batch', authenticate, translationLimiter, async (req, r
});
// Confluent → French translation endpoint (traduction brute) - SECURED
app.post('/api/translate/conf2fr', authenticate, translationLimiter, (req, res) => {
app.post('/api/translate/conf2fr', authenticate, (req, res) => {
const { text, variant = 'ancien', detailed = false } = req.body;
if (!text) {
@ -755,7 +755,7 @@ app.post('/api/translate/conf2fr', authenticate, translationLimiter, (req, res)
});
// NEW: Confluent → French with LLM refinement
app.post('/api/translate/conf2fr/llm', authenticate, translationLimiter, async (req, res) => {
app.post('/api/translate/conf2fr/llm', authenticate, async (req, res) => {
const { text, variant = 'ancien', provider = 'anthropic', model = 'claude-sonnet-4-20250514', customAnthropicKey, customOpenAIKey } = req.body;
if (!text) {