Added plan.md with complete architecture for format-agnostic content generation: - Support for Markdown, HTML, Plain Text, JSON formats - New FormatExporter module with neutral data structure - Integration strategy with existing ContentAssembly and ArticleStorage - Bonus features: SEO metadata generation, readability scoring, WordPress Gutenberg format - Implementation roadmap with 4 phases (6h total estimated) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
495 lines
16 KiB
JavaScript
495 lines
16 KiB
JavaScript
// ========================================
|
|
// TRANSITION LAYER - COUCHE TRANSITIONS MODULAIRE
|
|
// Responsabilité: Amélioration fluidité modulaire réutilisable
|
|
// LLM: Gemini (fluidité linguistique optimale)
|
|
// ========================================
|
|
|
|
const { callLLM } = require('../LLMManager');
|
|
const { logSh } = require('../ErrorReporting');
|
|
const { tracer } = require('../trace');
|
|
const { chunkArray, sleep } = require('./SelectiveUtils');
|
|
|
|
/**
|
|
* COUCHE TRANSITIONS MODULAIRE
|
|
*/
|
|
class TransitionLayer {
|
|
constructor() {
|
|
this.name = 'TransitionEnhancement';
|
|
this.defaultLLM = 'gemini';
|
|
this.priority = 2; // Priorité moyenne - appliqué après technique
|
|
}
|
|
|
|
/**
|
|
* MAIN METHOD - Appliquer amélioration transitions
|
|
*/
|
|
async apply(content, config = {}) {
|
|
return await tracer.run('TransitionLayer.apply()', async () => {
|
|
const {
|
|
llmProvider = this.defaultLLM,
|
|
intensity = 1.0, // 0.0-2.0 intensité d'amélioration
|
|
analysisMode = true, // Analyser avant d'appliquer
|
|
csvData = null,
|
|
preserveStructure = true,
|
|
targetIssues = null // Issues spécifiques à corriger
|
|
} = config;
|
|
|
|
await tracer.annotate({
|
|
transitionLayer: true,
|
|
llmProvider,
|
|
intensity,
|
|
elementsCount: Object.keys(content).length,
|
|
mc0: csvData?.mc0
|
|
});
|
|
|
|
const startTime = Date.now();
|
|
logSh(`🔗 TRANSITION LAYER: Amélioration fluidité (${llmProvider})`, 'INFO');
|
|
logSh(` 📊 ${Object.keys(content).length} éléments | Intensité: ${intensity}`, 'INFO');
|
|
|
|
try {
|
|
let enhancedContent = {};
|
|
let elementsProcessed = 0;
|
|
let elementsEnhanced = 0;
|
|
|
|
if (analysisMode) {
|
|
// 1. Analyser éléments nécessitant amélioration transitions
|
|
const analysis = await this.analyzeTransitionNeeds(content, csvData, targetIssues);
|
|
|
|
logSh(` 📋 Analyse: ${analysis.candidates.length}/${Object.keys(content).length} éléments candidats`, 'DEBUG');
|
|
|
|
if (analysis.candidates.length === 0) {
|
|
logSh(`✅ TRANSITION LAYER: Fluidité déjà optimale`, 'INFO');
|
|
return {
|
|
content,
|
|
stats: {
|
|
processed: Object.keys(content).length,
|
|
enhanced: 0,
|
|
analysisSkipped: true,
|
|
duration: Date.now() - startTime
|
|
}
|
|
};
|
|
}
|
|
|
|
// 2. Améliorer les éléments sélectionnés
|
|
const improvedResults = await this.enhanceTransitionElements(
|
|
analysis.candidates,
|
|
csvData,
|
|
{ llmProvider, intensity, preserveStructure }
|
|
);
|
|
|
|
// 3. Merger avec contenu original
|
|
enhancedContent = { ...content };
|
|
Object.keys(improvedResults).forEach(tag => {
|
|
if (improvedResults[tag] !== content[tag]) {
|
|
enhancedContent[tag] = improvedResults[tag];
|
|
elementsEnhanced++;
|
|
}
|
|
});
|
|
|
|
elementsProcessed = analysis.candidates.length;
|
|
|
|
} else {
|
|
// Mode direct : améliorer tous les éléments longs
|
|
const longElements = Object.entries(content)
|
|
.filter(([tag, text]) => text.length > 150)
|
|
.map(([tag, text]) => ({ tag, content: text, issues: ['amélioration_générale'] }));
|
|
|
|
if (longElements.length === 0) {
|
|
return { content, stats: { processed: 0, enhanced: 0, duration: Date.now() - startTime } };
|
|
}
|
|
|
|
const improvedResults = await this.enhanceTransitionElements(
|
|
longElements,
|
|
csvData,
|
|
{ llmProvider, intensity, preserveStructure }
|
|
);
|
|
|
|
enhancedContent = { ...content };
|
|
Object.keys(improvedResults).forEach(tag => {
|
|
if (improvedResults[tag] !== content[tag]) {
|
|
enhancedContent[tag] = improvedResults[tag];
|
|
elementsEnhanced++;
|
|
}
|
|
});
|
|
|
|
elementsProcessed = longElements.length;
|
|
}
|
|
|
|
const duration = Date.now() - startTime;
|
|
const stats = {
|
|
processed: elementsProcessed,
|
|
enhanced: elementsEnhanced,
|
|
total: Object.keys(content).length,
|
|
enhancementRate: (elementsEnhanced / Math.max(elementsProcessed, 1)) * 100,
|
|
duration,
|
|
llmProvider,
|
|
intensity
|
|
};
|
|
|
|
logSh(`✅ TRANSITION LAYER TERMINÉE: ${elementsEnhanced}/${elementsProcessed} fluidifiés (${duration}ms)`, 'INFO');
|
|
|
|
await tracer.event('Transition layer appliquée', stats);
|
|
|
|
return { content: enhancedContent, stats };
|
|
|
|
} catch (error) {
|
|
const duration = Date.now() - startTime;
|
|
logSh(`❌ TRANSITION LAYER ÉCHOUÉE après ${duration}ms: ${error.message}`, 'ERROR');
|
|
|
|
// Fallback gracieux : retourner contenu original
|
|
logSh(`🔄 Fallback: contenu original préservé`, 'WARNING');
|
|
return {
|
|
content,
|
|
stats: { fallback: true, duration },
|
|
error: error.message
|
|
};
|
|
}
|
|
}, { content: Object.keys(content), config });
|
|
}
|
|
|
|
/**
|
|
* ANALYSER BESOINS TRANSITIONS
|
|
*/
|
|
async analyzeTransitionNeeds(content, csvData, targetIssues = null) {
|
|
logSh(`🔍 Analyse besoins transitions`, 'DEBUG');
|
|
|
|
const analysis = {
|
|
candidates: [],
|
|
globalScore: 0,
|
|
issuesFound: {
|
|
repetitiveConnectors: 0,
|
|
abruptTransitions: 0,
|
|
uniformSentences: 0,
|
|
formalityImbalance: 0
|
|
}
|
|
};
|
|
|
|
// Analyser chaque élément
|
|
Object.entries(content).forEach(([tag, text]) => {
|
|
const elementAnalysis = this.analyzeTransitionElement(text, csvData);
|
|
|
|
if (elementAnalysis.needsImprovement) {
|
|
analysis.candidates.push({
|
|
tag,
|
|
content: text,
|
|
issues: elementAnalysis.issues,
|
|
score: elementAnalysis.score,
|
|
improvements: elementAnalysis.improvements
|
|
});
|
|
|
|
analysis.globalScore += elementAnalysis.score;
|
|
|
|
// Compter types d'issues
|
|
elementAnalysis.issues.forEach(issue => {
|
|
if (analysis.issuesFound.hasOwnProperty(issue)) {
|
|
analysis.issuesFound[issue]++;
|
|
}
|
|
});
|
|
}
|
|
});
|
|
|
|
analysis.globalScore = analysis.globalScore / Math.max(Object.keys(content).length, 1);
|
|
|
|
logSh(` 📊 Score global transitions: ${analysis.globalScore.toFixed(2)}`, 'DEBUG');
|
|
logSh(` 🔍 Issues trouvées: ${JSON.stringify(analysis.issuesFound)}`, 'DEBUG');
|
|
|
|
return analysis;
|
|
}
|
|
|
|
/**
|
|
* AMÉLIORER ÉLÉMENTS TRANSITIONS SÉLECTIONNÉS
|
|
*/
|
|
async enhanceTransitionElements(candidates, csvData, config) {
|
|
logSh(`🔄 Amélioration ${candidates.length} éléments transitions`, 'DEBUG');
|
|
|
|
const results = {};
|
|
const chunks = chunkArray(candidates, 6); // Chunks plus petits pour Gemini
|
|
|
|
for (let chunkIndex = 0; chunkIndex < chunks.length; chunkIndex++) {
|
|
const chunk = chunks[chunkIndex];
|
|
|
|
try {
|
|
logSh(` 📦 Chunk transitions ${chunkIndex + 1}/${chunks.length}: ${chunk.length} éléments`, 'DEBUG');
|
|
|
|
const enhancementPrompt = this.createTransitionEnhancementPrompt(chunk, csvData, config);
|
|
|
|
const response = await callLLM(config.llmProvider, enhancementPrompt, {
|
|
temperature: 0.6, // Créativité modérée pour fluidité
|
|
maxTokens: 2500
|
|
}, csvData?.personality);
|
|
|
|
const chunkResults = this.parseTransitionResponse(response, chunk);
|
|
Object.assign(results, chunkResults);
|
|
|
|
logSh(` ✅ Chunk transitions ${chunkIndex + 1}: ${Object.keys(chunkResults).length} fluidifiés`, 'DEBUG');
|
|
|
|
// Délai entre chunks
|
|
if (chunkIndex < chunks.length - 1) {
|
|
await sleep(1500);
|
|
}
|
|
|
|
} catch (error) {
|
|
logSh(` ❌ Chunk transitions ${chunkIndex + 1} échoué: ${error.message}`, 'ERROR');
|
|
|
|
// Fallback: conserver contenu original
|
|
chunk.forEach(element => {
|
|
results[element.tag] = element.content;
|
|
});
|
|
}
|
|
}
|
|
|
|
return results;
|
|
}
|
|
|
|
// ============= HELPER METHODS =============
|
|
|
|
/**
|
|
* Analyser élément transition individuel
|
|
*/
|
|
analyzeTransitionElement(text, csvData) {
|
|
const sentences = text.split(/[.!?]+/).filter(s => s.trim().length > 10);
|
|
|
|
if (sentences.length < 2) {
|
|
return { needsImprovement: false, score: 0, issues: [], improvements: [] };
|
|
}
|
|
|
|
let score = 0;
|
|
const issues = [];
|
|
const improvements = [];
|
|
|
|
// 1. Analyser connecteurs répétitifs
|
|
const repetitiveScore = this.analyzeRepetitiveConnectors(text);
|
|
if (repetitiveScore > 0.3) {
|
|
score += 0.3;
|
|
issues.push('repetitiveConnectors');
|
|
improvements.push('varier_connecteurs');
|
|
}
|
|
|
|
// 2. Analyser transitions abruptes
|
|
const abruptScore = this.analyzeAbruptTransitions(sentences);
|
|
if (abruptScore > 0.4) {
|
|
score += 0.4;
|
|
issues.push('abruptTransitions');
|
|
improvements.push('ajouter_transitions_fluides');
|
|
}
|
|
|
|
// 3. Analyser uniformité des phrases
|
|
const uniformityScore = this.analyzeSentenceUniformity(sentences);
|
|
if (uniformityScore < 0.3) {
|
|
score += 0.2;
|
|
issues.push('uniformSentences');
|
|
improvements.push('varier_longueurs_phrases');
|
|
}
|
|
|
|
// 4. Analyser équilibre formalité
|
|
const formalityScore = this.analyzeFormalityBalance(text);
|
|
if (formalityScore > 0.5) {
|
|
score += 0.1;
|
|
issues.push('formalityImbalance');
|
|
improvements.push('équilibrer_registre_langue');
|
|
}
|
|
|
|
return {
|
|
needsImprovement: score > 0.3,
|
|
score,
|
|
issues,
|
|
improvements
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Analyser connecteurs répétitifs
|
|
*/
|
|
analyzeRepetitiveConnectors(text) {
|
|
const commonConnectors = ['par ailleurs', 'en effet', 'de plus', 'cependant', 'ainsi', 'donc'];
|
|
let totalConnectors = 0;
|
|
let repetitions = 0;
|
|
|
|
commonConnectors.forEach(connector => {
|
|
const matches = (text.match(new RegExp(`\\b${connector}\\b`, 'gi')) || []);
|
|
totalConnectors += matches.length;
|
|
if (matches.length > 1) repetitions += matches.length - 1;
|
|
});
|
|
|
|
return totalConnectors > 0 ? repetitions / totalConnectors : 0;
|
|
}
|
|
|
|
/**
|
|
* Analyser transitions abruptes
|
|
*/
|
|
analyzeAbruptTransitions(sentences) {
|
|
if (sentences.length < 2) return 0;
|
|
|
|
let abruptCount = 0;
|
|
|
|
for (let i = 1; i < sentences.length; i++) {
|
|
const current = sentences[i].trim().toLowerCase();
|
|
const hasConnector = this.hasTransitionWord(current);
|
|
|
|
if (!hasConnector && current.length > 30) {
|
|
abruptCount++;
|
|
}
|
|
}
|
|
|
|
return abruptCount / (sentences.length - 1);
|
|
}
|
|
|
|
/**
|
|
* Analyser uniformité des phrases
|
|
*/
|
|
analyzeSentenceUniformity(sentences) {
|
|
if (sentences.length < 2) return 1;
|
|
|
|
const lengths = sentences.map(s => s.trim().length);
|
|
const avgLength = lengths.reduce((a, b) => a + b, 0) / lengths.length;
|
|
const variance = lengths.reduce((acc, len) => acc + Math.pow(len - avgLength, 2), 0) / lengths.length;
|
|
const stdDev = Math.sqrt(variance);
|
|
|
|
return Math.min(1, stdDev / avgLength);
|
|
}
|
|
|
|
/**
|
|
* Analyser équilibre formalité
|
|
*/
|
|
analyzeFormalityBalance(text) {
|
|
const formalIndicators = ['il convient de', 'par conséquent', 'néanmoins', 'toutefois', 'cependant'];
|
|
const casualIndicators = ['du coup', 'bon', 'franchement', 'nickel', 'sympa'];
|
|
|
|
let formalCount = 0;
|
|
let casualCount = 0;
|
|
|
|
formalIndicators.forEach(indicator => {
|
|
if (text.toLowerCase().includes(indicator)) formalCount++;
|
|
});
|
|
|
|
casualIndicators.forEach(indicator => {
|
|
if (text.toLowerCase().includes(indicator)) casualCount++;
|
|
});
|
|
|
|
const total = formalCount + casualCount;
|
|
if (total === 0) return 0;
|
|
|
|
// Déséquilibre si trop d'un côté
|
|
return Math.abs(formalCount - casualCount) / total;
|
|
}
|
|
|
|
/**
|
|
* Vérifier présence mots de transition
|
|
*/
|
|
hasTransitionWord(sentence) {
|
|
const transitionWords = [
|
|
'par ailleurs', 'en effet', 'de plus', 'cependant', 'ainsi', 'donc',
|
|
'ensuite', 'puis', 'également', 'aussi', 'néanmoins', 'toutefois',
|
|
'd\'ailleurs', 'en outre', 'par contre', 'en revanche'
|
|
];
|
|
|
|
return transitionWords.some(word => sentence.includes(word));
|
|
}
|
|
|
|
/**
|
|
* Créer prompt amélioration transitions
|
|
*/
|
|
createTransitionEnhancementPrompt(chunk, csvData, config) {
|
|
const personality = csvData?.personality;
|
|
|
|
let prompt = `MISSION: Améliore UNIQUEMENT les transitions et fluidité de ces contenus.
|
|
|
|
CONTEXTE: Article SEO ${csvData?.mc0 || 'signalétique personnalisée'}
|
|
${personality ? `PERSONNALITÉ: ${personality.nom} (${personality.style} web professionnel)` : ''}
|
|
${personality?.connecteursPref ? `CONNECTEURS PRÉFÉRÉS: ${personality.connecteursPref}` : ''}
|
|
INTENSITÉ: ${config.intensity} (0.5=léger, 1.0=standard, 1.5=intensif)
|
|
|
|
CONTENUS À FLUIDIFIER:
|
|
|
|
${chunk.map((item, i) => `[${i + 1}] TAG: ${item.tag}
|
|
PROBLÈMES: ${item.issues.join(', ')}
|
|
CONTENU: "${item.content}"`).join('\n\n')}
|
|
|
|
OBJECTIFS FLUIDITÉ:
|
|
- Connecteurs plus naturels et variés${personality?.connecteursPref ? `: ${personality.connecteursPref}` : ''}
|
|
- Transitions fluides entre idées et paragraphes
|
|
- Variation naturelle longueurs phrases
|
|
- ÉVITE répétitions excessives ("du coup", "par ailleurs", "en effet")
|
|
- Style ${personality?.style || 'professionnel'} mais naturel web
|
|
|
|
CONSIGNES STRICTES:
|
|
- NE CHANGE PAS le fond du message ni les informations
|
|
- GARDE même structure générale et longueur approximative (±20%)
|
|
- Améliore SEULEMENT la fluidité et les enchaînements
|
|
- RESPECTE le style ${personality?.nom || 'professionnel'}${personality?.style ? ` (${personality.style})` : ''}
|
|
- ÉVITE sur-correction qui rendrait artificiel
|
|
|
|
TECHNIQUES FLUIDITÉ:
|
|
- Varier connecteurs logiques sans répétition
|
|
- Alterner phrases courtes (8-12 mots) et moyennes (15-20 mots)
|
|
- Utiliser pronoms et reprises pour cohésion
|
|
- Ajouter transitions implicites par reformulation
|
|
- Équilibrer registre soutenu/accessible
|
|
|
|
FORMAT RÉPONSE:
|
|
[1] Contenu avec transitions améliorées
|
|
[2] Contenu avec transitions améliorées
|
|
etc...
|
|
|
|
IMPORTANT: Réponse DIRECTE par les contenus fluidifiés, pas d'explication.`;
|
|
|
|
return prompt;
|
|
}
|
|
|
|
/**
|
|
* Parser réponse transitions
|
|
*/
|
|
parseTransitionResponse(response, chunk) {
|
|
const results = {};
|
|
const regex = /\[(\d+)\]\s*([^[]*?)(?=\n\[\d+\]|$)/gs;
|
|
let match;
|
|
let index = 0;
|
|
|
|
while ((match = regex.exec(response)) && index < chunk.length) {
|
|
let fluidContent = match[2].trim();
|
|
const element = chunk[index];
|
|
|
|
// Nettoyer contenu fluidifié
|
|
fluidContent = this.cleanTransitionContent(fluidContent);
|
|
|
|
if (fluidContent && fluidContent.length > 10) {
|
|
results[element.tag] = fluidContent;
|
|
logSh(`✅ Fluidifié [${element.tag}]: "${fluidContent.substring(0, 60)}..."`, 'DEBUG');
|
|
} else {
|
|
results[element.tag] = element.content; // Fallback
|
|
logSh(`⚠️ Fallback transitions [${element.tag}]: amélioration invalide`, 'WARNING');
|
|
}
|
|
|
|
index++;
|
|
}
|
|
|
|
// Compléter les manquants
|
|
while (index < chunk.length) {
|
|
const element = chunk[index];
|
|
results[element.tag] = element.content;
|
|
index++;
|
|
}
|
|
|
|
return results;
|
|
}
|
|
|
|
/**
|
|
* Nettoyer contenu transitions généré
|
|
*/
|
|
cleanTransitionContent(content) {
|
|
if (!content) return content;
|
|
|
|
// Supprimer préfixes indésirables
|
|
content = content.replace(/^(voici\s+)?le\s+contenu\s+(fluidifié|amélioré)\s*[:.]?\s*/gi, '');
|
|
content = content.replace(/^(avec\s+)?transitions\s+améliorées\s*[:.]?\s*/gi, '');
|
|
content = content.replace(/^(bon,?\s*)?(alors,?\s*)?/, '');
|
|
|
|
// Nettoyer formatage
|
|
content = content.replace(/\*\*[^*]+\*\*/g, ''); // Gras markdown
|
|
content = content.replace(/\s{2,}/g, ' '); // Espaces multiples
|
|
content = content.trim();
|
|
|
|
return content;
|
|
}
|
|
}
|
|
|
|
module.exports = { TransitionLayer }; |