Added plan.md with complete architecture for format-agnostic content generation: - Support for Markdown, HTML, Plain Text, JSON formats - New FormatExporter module with neutral data structure - Integration strategy with existing ContentAssembly and ArticleStorage - Bonus features: SEO metadata generation, readability scoring, WordPress Gutenberg format - Implementation roadmap with 4 phases (6h total estimated) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
583 lines
18 KiB
JavaScript
583 lines
18 KiB
JavaScript
// ========================================
|
|
// FICHIER: LLMManager.js
|
|
// Description: Hub central pour tous les appels LLM (Version Node.js)
|
|
// Support: Claude, OpenAI, Gemini, Deepseek, Moonshot, Mistral
|
|
// ========================================
|
|
|
|
const fetch = globalThis.fetch.bind(globalThis);
|
|
const { logSh } = require('./ErrorReporting');
|
|
|
|
// ============= CONFIGURATION CENTRALISÉE =============
|
|
|
|
const LLM_CONFIG = {
|
|
openai: {
|
|
apiKey: process.env.OPENAI_API_KEY,
|
|
endpoint: 'https://api.openai.com/v1/chat/completions',
|
|
model: 'gpt-4o-mini',
|
|
headers: {
|
|
'Authorization': 'Bearer {API_KEY}',
|
|
'Content-Type': 'application/json'
|
|
},
|
|
temperature: 0.7,
|
|
timeout: 300000, // 5 minutes
|
|
retries: 3
|
|
},
|
|
|
|
claude: {
|
|
apiKey: process.env.ANTHROPIC_API_KEY,
|
|
endpoint: 'https://api.anthropic.com/v1/messages',
|
|
model: 'claude-sonnet-4-20250514',
|
|
headers: {
|
|
'x-api-key': '{API_KEY}',
|
|
'Content-Type': 'application/json',
|
|
'anthropic-version': '2023-06-01'
|
|
},
|
|
temperature: 0.7,
|
|
timeout: 300000, // 5 minutes
|
|
retries: 6
|
|
},
|
|
|
|
gemini: {
|
|
apiKey: process.env.GOOGLE_API_KEY,
|
|
endpoint: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent',
|
|
model: 'gemini-2.5-flash',
|
|
headers: {
|
|
'Content-Type': 'application/json'
|
|
},
|
|
temperature: 0.7,
|
|
maxTokens: 6000,
|
|
timeout: 300000, // 5 minutes
|
|
retries: 3
|
|
},
|
|
|
|
deepseek: {
|
|
apiKey: process.env.DEEPSEEK_API_KEY,
|
|
endpoint: 'https://api.deepseek.com/v1/chat/completions',
|
|
model: 'deepseek-chat',
|
|
headers: {
|
|
'Authorization': 'Bearer {API_KEY}',
|
|
'Content-Type': 'application/json'
|
|
},
|
|
temperature: 0.7,
|
|
timeout: 300000, // 5 minutes
|
|
retries: 3
|
|
},
|
|
|
|
moonshot: {
|
|
apiKey: process.env.MOONSHOT_API_KEY,
|
|
endpoint: 'https://api.moonshot.ai/v1/chat/completions',
|
|
model: 'moonshot-v1-32k',
|
|
headers: {
|
|
'Authorization': 'Bearer {API_KEY}',
|
|
'Content-Type': 'application/json'
|
|
},
|
|
temperature: 0.7,
|
|
timeout: 300000, // 5 minutes
|
|
retries: 3
|
|
},
|
|
|
|
mistral: {
|
|
apiKey: process.env.MISTRAL_API_KEY,
|
|
endpoint: 'https://api.mistral.ai/v1/chat/completions',
|
|
model: 'mistral-small-latest',
|
|
headers: {
|
|
'Authorization': 'Bearer {API_KEY}',
|
|
'Content-Type': 'application/json'
|
|
},
|
|
max_tokens: 5000,
|
|
temperature: 0.7,
|
|
timeout: 300000, // 5 minutes
|
|
retries: 3
|
|
}
|
|
};
|
|
|
|
// Alias pour compatibilité avec le code existant
|
|
LLM_CONFIG.gpt4 = LLM_CONFIG.openai;
|
|
|
|
// ============= HELPER FUNCTIONS =============
|
|
|
|
const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
|
|
|
|
// ============= INTERFACE UNIVERSELLE =============
|
|
|
|
/**
|
|
* Fonction principale pour appeler n'importe quel LLM
|
|
* @param {string} llmProvider - claude|openai|gemini|deepseek|moonshot|mistral
|
|
* @param {string} prompt - Le prompt à envoyer
|
|
* @param {object} options - Options personnalisées (température, tokens, etc.)
|
|
* @param {object} personality - Personnalité pour contexte système
|
|
* @returns {Promise<string>} - Réponse générée
|
|
*/
|
|
async function callLLM(llmProvider, prompt, options = {}, personality = null) {
|
|
const startTime = Date.now();
|
|
|
|
try {
|
|
// Vérifier si le provider existe
|
|
if (!LLM_CONFIG[llmProvider]) {
|
|
throw new Error(`Provider LLM inconnu: ${llmProvider}`);
|
|
}
|
|
|
|
// Vérifier si l'API key est configurée
|
|
const config = LLM_CONFIG[llmProvider];
|
|
if (!config.apiKey || config.apiKey.startsWith('VOTRE_CLE_')) {
|
|
throw new Error(`Clé API manquante pour ${llmProvider}`);
|
|
}
|
|
|
|
logSh(`🤖 Appel LLM: ${llmProvider.toUpperCase()} (${config.model}) | Personnalité: ${personality?.nom || 'aucune'}`, 'DEBUG');
|
|
|
|
// 📢 AFFICHAGE PROMPT COMPLET POUR DEBUG AVEC INFO IA
|
|
logSh(`\n🔍 ===== PROMPT ENVOYÉ À ${llmProvider.toUpperCase()} (${config.model}) | PERSONNALITÉ: ${personality?.nom || 'AUCUNE'} =====`, 'PROMPT');
|
|
logSh(prompt, 'PROMPT');
|
|
|
|
// 📤 LOG LLM REQUEST COMPLET
|
|
logSh(`📤 LLM REQUEST [${llmProvider.toUpperCase()}] (${config.model}) | Personnalité: ${personality?.nom || 'AUCUNE'}`, 'LLM');
|
|
logSh(prompt, 'LLM');
|
|
|
|
// Préparer la requête selon le provider
|
|
const requestData = buildRequestData(llmProvider, prompt, options, personality);
|
|
|
|
// Effectuer l'appel avec retry logic
|
|
const response = await callWithRetry(llmProvider, requestData, config);
|
|
|
|
// Parser la réponse selon le format du provider
|
|
const content = parseResponse(llmProvider, response);
|
|
|
|
// 📥 LOG LLM RESPONSE COMPLET
|
|
logSh(`📥 LLM RESPONSE [${llmProvider.toUpperCase()}] (${config.model}) | Durée: ${Date.now() - startTime}ms`, 'LLM');
|
|
logSh(content, 'LLM');
|
|
|
|
const duration = Date.now() - startTime;
|
|
logSh(`✅ ${llmProvider.toUpperCase()} (${personality?.nom || 'sans personnalité'}) réponse en ${duration}ms`, 'INFO');
|
|
|
|
// Enregistrer les stats d'usage
|
|
await recordUsageStats(llmProvider, prompt.length, content.length, duration);
|
|
|
|
return content;
|
|
|
|
} catch (error) {
|
|
const duration = Date.now() - startTime;
|
|
logSh(`❌ Erreur ${llmProvider.toUpperCase()} (${personality?.nom || 'sans personnalité'}): ${error.toString()}`, 'ERROR');
|
|
|
|
// Enregistrer l'échec
|
|
await recordUsageStats(llmProvider, prompt.length, 0, duration, error.toString());
|
|
|
|
throw error;
|
|
}
|
|
}
|
|
|
|
// ============= CONSTRUCTION DES REQUÊTES =============
|
|
|
|
function buildRequestData(provider, prompt, options, personality) {
|
|
const config = LLM_CONFIG[provider];
|
|
const temperature = options.temperature || config.temperature;
|
|
const maxTokens = options.maxTokens || config.maxTokens;
|
|
|
|
// Construire le système prompt si personnalité fournie
|
|
const systemPrompt = personality ?
|
|
`Tu es ${personality.nom}. ${personality.description}. Style: ${personality.style}` :
|
|
'Tu es un assistant expert.';
|
|
|
|
switch (provider) {
|
|
case 'openai':
|
|
case 'deepseek':
|
|
case 'moonshot':
|
|
case 'mistral':
|
|
return {
|
|
model: config.model,
|
|
messages: [
|
|
{ role: 'system', content: systemPrompt },
|
|
{ role: 'user', content: prompt }
|
|
],
|
|
max_tokens: maxTokens,
|
|
temperature: temperature,
|
|
stream: false
|
|
};
|
|
|
|
case 'claude':
|
|
return {
|
|
model: config.model,
|
|
max_tokens: maxTokens,
|
|
temperature: temperature,
|
|
system: systemPrompt,
|
|
messages: [
|
|
{ role: 'user', content: prompt }
|
|
]
|
|
};
|
|
|
|
case 'gemini':
|
|
return {
|
|
contents: [{
|
|
parts: [{
|
|
text: `${systemPrompt}\n\n${prompt}`
|
|
}]
|
|
}],
|
|
generationConfig: {
|
|
temperature: temperature,
|
|
maxOutputTokens: maxTokens
|
|
}
|
|
};
|
|
|
|
default:
|
|
throw new Error(`Format de requête non supporté pour ${provider}`);
|
|
}
|
|
}
|
|
|
|
// ============= APPELS AVEC RETRY =============
|
|
|
|
async function callWithRetry(provider, requestData, config) {
|
|
let lastError;
|
|
|
|
for (let attempt = 1; attempt <= config.retries; attempt++) {
|
|
try {
|
|
logSh(`🔄 Tentative ${attempt}/${config.retries} pour ${provider.toUpperCase()}`, 'DEBUG');
|
|
|
|
// Préparer les headers avec la clé API
|
|
const headers = {};
|
|
Object.keys(config.headers).forEach(key => {
|
|
headers[key] = config.headers[key].replace('{API_KEY}', config.apiKey);
|
|
});
|
|
|
|
// URL avec clé API pour Gemini (cas spécial)
|
|
let url = config.endpoint;
|
|
if (provider === 'gemini') {
|
|
url += `?key=${config.apiKey}`;
|
|
}
|
|
|
|
const options = {
|
|
method: 'POST',
|
|
headers: headers,
|
|
body: JSON.stringify(requestData),
|
|
timeout: config.timeout
|
|
};
|
|
|
|
const response = await fetch(url, options);
|
|
const responseText = await response.text();
|
|
|
|
if (response.ok) {
|
|
return JSON.parse(responseText);
|
|
} else if (response.status === 429) {
|
|
// Rate limiting - attendre plus longtemps
|
|
const waitTime = Math.pow(2, attempt) * 1000; // Exponential backoff
|
|
logSh(`⏳ Rate limit ${provider.toUpperCase()}, attente ${waitTime}ms`, 'WARNING');
|
|
await sleep(waitTime);
|
|
continue;
|
|
} else {
|
|
throw new Error(`HTTP ${response.status}: ${responseText}`);
|
|
}
|
|
|
|
} catch (error) {
|
|
lastError = error;
|
|
|
|
if (attempt < config.retries) {
|
|
const waitTime = 1000 * attempt;
|
|
logSh(`⚠ Erreur tentative ${attempt}: ${error.toString()}, retry dans ${waitTime}ms`, 'WARNING');
|
|
await sleep(waitTime);
|
|
}
|
|
}
|
|
}
|
|
|
|
throw new Error(`Échec après ${config.retries} tentatives: ${lastError.toString()}`);
|
|
}
|
|
|
|
// ============= PARSING DES RÉPONSES =============
|
|
|
|
function parseResponse(provider, responseData) {
|
|
try {
|
|
switch (provider) {
|
|
case 'openai':
|
|
case 'deepseek':
|
|
case 'moonshot':
|
|
case 'mistral':
|
|
return responseData.choices[0].message.content.trim();
|
|
|
|
case 'claude':
|
|
return responseData.content[0].text.trim();
|
|
|
|
case 'gemini':
|
|
const candidate = responseData.candidates[0];
|
|
|
|
// Vérifications multiples pour Gemini 2.5
|
|
if (candidate && candidate.content && candidate.content.parts && candidate.content.parts[0] && candidate.content.parts[0].text) {
|
|
return candidate.content.parts[0].text.trim();
|
|
} else if (candidate && candidate.text) {
|
|
return candidate.text.trim();
|
|
} else if (candidate && candidate.content && candidate.content.text) {
|
|
return candidate.content.text.trim();
|
|
} else {
|
|
// Debug : logger la structure complète
|
|
logSh('Gemini structure complète: ' + JSON.stringify(responseData), 'DEBUG');
|
|
return '[Gemini: pas de texte généré - problème modèle]';
|
|
}
|
|
default:
|
|
throw new Error(`Parser non supporté pour ${provider}`);
|
|
}
|
|
} catch (error) {
|
|
logSh(`❌ Erreur parsing ${provider}: ${error.toString()}`, 'ERROR');
|
|
logSh(`Response brute: ${JSON.stringify(responseData)}`, 'DEBUG');
|
|
throw new Error(`Impossible de parser la réponse ${provider}: ${error.toString()}`);
|
|
}
|
|
}
|
|
|
|
// ============= GESTION DES STATISTIQUES =============
|
|
|
|
async function recordUsageStats(provider, promptTokens, responseTokens, duration, error = null) {
|
|
try {
|
|
// TODO: Adapter selon votre système de stockage Node.js
|
|
// Peut être une base de données, un fichier, MongoDB, etc.
|
|
const statsData = {
|
|
timestamp: new Date(),
|
|
provider: provider,
|
|
model: LLM_CONFIG[provider].model,
|
|
promptTokens: promptTokens,
|
|
responseTokens: responseTokens,
|
|
duration: duration,
|
|
error: error || ''
|
|
};
|
|
|
|
// Exemple: log vers console ou fichier
|
|
logSh(`📊 Stats: ${JSON.stringify(statsData)}`, 'DEBUG');
|
|
|
|
// TODO: Implémenter sauvegarde réelle (DB, fichier, etc.)
|
|
|
|
} catch (statsError) {
|
|
// Ne pas faire planter le workflow si les stats échouent
|
|
logSh(`⚠ Erreur enregistrement stats: ${statsError.toString()}`, 'WARNING');
|
|
}
|
|
}
|
|
|
|
// ============= FONCTIONS UTILITAIRES =============
|
|
|
|
/**
|
|
* Tester la connectivité de tous les LLMs
|
|
*/
|
|
async function testAllLLMs() {
|
|
const testPrompt = "Dis bonjour en 5 mots maximum.";
|
|
const results = {};
|
|
|
|
const allProviders = Object.keys(LLM_CONFIG);
|
|
|
|
for (const provider of allProviders) {
|
|
try {
|
|
logSh(`🧪 Test ${provider}...`, 'INFO');
|
|
|
|
const response = await callLLM(provider, testPrompt);
|
|
results[provider] = {
|
|
status: 'SUCCESS',
|
|
response: response,
|
|
model: LLM_CONFIG[provider].model
|
|
};
|
|
|
|
} catch (error) {
|
|
results[provider] = {
|
|
status: 'ERROR',
|
|
error: error.toString(),
|
|
model: LLM_CONFIG[provider].model
|
|
};
|
|
}
|
|
|
|
// Petit délai entre tests
|
|
await sleep(500);
|
|
}
|
|
|
|
logSh(`📊 Tests terminés: ${JSON.stringify(results, null, 2)}`, 'INFO');
|
|
return results;
|
|
}
|
|
|
|
/**
|
|
* Obtenir les providers disponibles (avec clés API valides)
|
|
*/
|
|
function getAvailableProviders() {
|
|
const available = [];
|
|
|
|
Object.keys(LLM_CONFIG).forEach(provider => {
|
|
const config = LLM_CONFIG[provider];
|
|
if (config.apiKey && !config.apiKey.startsWith('VOTRE_CLE_')) {
|
|
available.push(provider);
|
|
}
|
|
});
|
|
|
|
return available;
|
|
}
|
|
|
|
/**
|
|
* Obtenir des statistiques d'usage par provider
|
|
*/
|
|
async function getUsageStats() {
|
|
try {
|
|
// TODO: Adapter selon votre système de stockage
|
|
// Pour l'instant retourne un message par défaut
|
|
return { message: 'Statistiques non implémentées en Node.js' };
|
|
|
|
} catch (error) {
|
|
return { error: error.toString() };
|
|
}
|
|
}
|
|
|
|
// ============= MIGRATION DE L'ANCIEN CODE =============
|
|
|
|
/**
|
|
* Fonction de compatibilité pour remplacer votre ancien callOpenAI()
|
|
* Maintient la même signature pour ne pas casser votre code existant
|
|
*/
|
|
async function callOpenAI(prompt, personality) {
|
|
return await callLLM('openai', prompt, {}, personality);
|
|
}
|
|
|
|
// ============= EXPORTS POUR TESTS =============
|
|
|
|
/**
|
|
* Fonction de test rapide
|
|
*/
|
|
async function testLLMManager() {
|
|
logSh('🚀 Test du LLM Manager Node.js...', 'INFO');
|
|
|
|
// Test des providers disponibles
|
|
const available = getAvailableProviders();
|
|
logSh('Providers disponibles: ' + available.join(', ') + ' (' + available.length + '/6)', 'INFO');
|
|
|
|
// Test d'appel simple sur chaque provider disponible
|
|
for (const provider of available) {
|
|
try {
|
|
logSh(`🧪 Test ${provider}...`, 'DEBUG');
|
|
const startTime = Date.now();
|
|
|
|
const response = await callLLM(provider, 'Dis juste "Test OK"');
|
|
const duration = Date.now() - startTime;
|
|
|
|
logSh(`✅ Test ${provider} réussi: "${response}" (${duration}ms)`, 'INFO');
|
|
|
|
} catch (error) {
|
|
logSh(`❌ Test ${provider} échoué: ${error.toString()}`, 'ERROR');
|
|
}
|
|
|
|
// Petit délai pour éviter rate limits
|
|
await sleep(500);
|
|
}
|
|
|
|
// Test spécifique OpenAI (compatibilité avec ancien code)
|
|
try {
|
|
logSh('🎯 Test spécifique OpenAI (compatibilité)...', 'DEBUG');
|
|
const response = await callLLM('openai', 'Dis juste "Test OK"');
|
|
logSh('✅ Test OpenAI compatibilité: ' + response, 'INFO');
|
|
} catch (error) {
|
|
logSh('❌ Test OpenAI compatibilité échoué: ' + error.toString(), 'ERROR');
|
|
}
|
|
|
|
// Afficher les stats d'usage
|
|
try {
|
|
logSh('📊 Récupération statistiques d\'usage...', 'DEBUG');
|
|
const stats = await getUsageStats();
|
|
|
|
if (stats.error) {
|
|
logSh('⚠ Erreur récupération stats: ' + stats.error, 'WARNING');
|
|
} else if (stats.message) {
|
|
logSh('📊 Stats: ' + stats.message, 'INFO');
|
|
} else {
|
|
// Formatter les stats pour les logs
|
|
Object.keys(stats).forEach(provider => {
|
|
const s = stats[provider];
|
|
logSh(`📈 ${provider}: ${s.calls} appels, ${s.successRate}% succès, ${s.avgDuration}ms moyen`, 'INFO');
|
|
});
|
|
}
|
|
} catch (error) {
|
|
logSh('❌ Erreur lors de la récupération des stats: ' + error.toString(), 'ERROR');
|
|
}
|
|
|
|
// Résumé final
|
|
const workingCount = available.length;
|
|
const totalProviders = Object.keys(LLM_CONFIG).length;
|
|
|
|
if (workingCount === totalProviders) {
|
|
logSh(`✅ Test LLM Manager COMPLET: ${workingCount}/${totalProviders} providers opérationnels`, 'INFO');
|
|
} else if (workingCount >= 2) {
|
|
logSh(`✅ Test LLM Manager PARTIEL: ${workingCount}/${totalProviders} providers opérationnels (suffisant pour DNA Mixing)`, 'INFO');
|
|
} else {
|
|
logSh(`❌ Test LLM Manager INSUFFISANT: ${workingCount}/${totalProviders} providers opérationnels (minimum 2 requis)`, 'ERROR');
|
|
}
|
|
|
|
logSh('🏁 Test LLM Manager terminé', 'INFO');
|
|
}
|
|
|
|
/**
|
|
* Version complète avec test de tous les providers (même non configurés)
|
|
*/
|
|
async function testLLMManagerComplete() {
|
|
logSh('🚀 Test COMPLET du LLM Manager (tous providers)...', 'INFO');
|
|
|
|
const allProviders = Object.keys(LLM_CONFIG);
|
|
logSh(`Providers configurés: ${allProviders.join(', ')}`, 'INFO');
|
|
|
|
const results = {
|
|
configured: 0,
|
|
working: 0,
|
|
failed: 0
|
|
};
|
|
|
|
for (const provider of allProviders) {
|
|
const config = LLM_CONFIG[provider];
|
|
|
|
// Vérifier si configuré
|
|
if (!config.apiKey || config.apiKey.startsWith('VOTRE_CLE_')) {
|
|
logSh(`⚙️ ${provider}: NON CONFIGURÉ (clé API manquante)`, 'WARNING');
|
|
continue;
|
|
}
|
|
|
|
results.configured++;
|
|
|
|
try {
|
|
logSh(`🧪 Test ${provider} (${config.model})...`, 'DEBUG');
|
|
const startTime = Date.now();
|
|
|
|
const response = await callLLM(provider, 'Réponds "OK" seulement.', { maxTokens: 100 });
|
|
const duration = Date.now() - startTime;
|
|
|
|
results.working++;
|
|
logSh(`✅ ${provider}: "${response.trim()}" (${duration}ms)`, 'INFO');
|
|
|
|
} catch (error) {
|
|
results.failed++;
|
|
logSh(`❌ ${provider}: ${error.toString()}`, 'ERROR');
|
|
}
|
|
|
|
// Délai entre tests
|
|
await sleep(700);
|
|
}
|
|
|
|
// Résumé final complet
|
|
logSh(`📊 RÉSUMÉ FINAL:`, 'INFO');
|
|
logSh(` • Providers total: ${allProviders.length}`, 'INFO');
|
|
logSh(` • Configurés: ${results.configured}`, 'INFO');
|
|
logSh(` • Fonctionnels: ${results.working}`, 'INFO');
|
|
logSh(` • En échec: ${results.failed}`, 'INFO');
|
|
|
|
const status = results.working >= 4 ? 'EXCELLENT' :
|
|
results.working >= 2 ? 'BON' : 'INSUFFISANT';
|
|
|
|
logSh(`🏆 STATUS: ${status} (${results.working} LLMs opérationnels)`,
|
|
status === 'INSUFFISANT' ? 'ERROR' : 'INFO');
|
|
|
|
logSh('🏁 Test LLM Manager COMPLET terminé', 'INFO');
|
|
|
|
return {
|
|
total: allProviders.length,
|
|
configured: results.configured,
|
|
working: results.working,
|
|
failed: results.failed,
|
|
status: status
|
|
};
|
|
}
|
|
|
|
// ============= EXPORTS MODULE =============
|
|
|
|
module.exports = {
|
|
callLLM,
|
|
callOpenAI,
|
|
testAllLLMs,
|
|
getAvailableProviders,
|
|
getUsageStats,
|
|
testLLMManager,
|
|
testLLMManagerComplete,
|
|
LLM_CONFIG
|
|
};
|
|
|