- New modular pipeline architecture allowing custom workflow combinations
- Per-step LLM provider configuration (Claude, OpenAI, Gemini, Deepseek, Moonshot, Mistral)
- Visual pipeline builder and runner interfaces with drag-and-drop
- 10 predefined pipeline templates (minimal-test to originality-bypass)
- Pipeline CRUD operations via ConfigManager and REST API
- Fix variable resolution in instructions (HTML tags were breaking {{variables}})
- Fix hardcoded LLM providers in AdversarialCore
- Add TESTS_LLM_PROVIDER.md documentation with validation results
- Update dashboard to disable legacy config editor
API Endpoints:
- POST /api/pipeline/save, execute, validate, estimate
- GET /api/pipeline/list, modules, templates
Backward compatible with legacy modular workflow system.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude <noreply@anthropic.com>
473 lines
15 KiB
JavaScript
473 lines
15 KiB
JavaScript
/**
|
|
* PipelineExecutor.js
|
|
*
|
|
* Moteur d'exécution des pipelines modulaires flexibles.
|
|
* Orchestre l'exécution séquentielle des modules avec gestion d'état.
|
|
*/
|
|
|
|
const { logSh } = require('../ErrorReporting');
|
|
const { tracer } = require('../trace');
|
|
const { PipelineDefinition } = require('./PipelineDefinition');
|
|
const { getPersonalities, readInstructionsData, selectPersonalityWithAI } = require('../BrainConfig');
|
|
const { extractElements, buildSmartHierarchy } = require('../ElementExtraction');
|
|
const { generateMissingKeywords } = require('../MissingKeywords');
|
|
|
|
// Modules d'exécution
|
|
const { generateSimple } = require('../selective-enhancement/SelectiveUtils');
|
|
const { applySelectiveLayer } = require('../selective-enhancement/SelectiveCore');
|
|
const { applyPredefinedStack: applySelectiveStack } = require('../selective-enhancement/SelectiveLayers');
|
|
const { applyAdversarialLayer } = require('../adversarial-generation/AdversarialCore');
|
|
const { applyPredefinedStack: applyAdversarialStack } = require('../adversarial-generation/AdversarialLayers');
|
|
const { applyHumanSimulationLayer } = require('../human-simulation/HumanSimulationCore');
|
|
const { applyPredefinedSimulation } = require('../human-simulation/HumanSimulationLayers');
|
|
const { applyPatternBreakingLayer } = require('../pattern-breaking/PatternBreakingCore');
|
|
const { applyPatternBreakingStack } = require('../pattern-breaking/PatternBreakingLayers');
|
|
|
|
/**
|
|
* Classe PipelineExecutor
|
|
*/
|
|
class PipelineExecutor {
|
|
constructor() {
|
|
this.currentContent = null;
|
|
this.executionLog = [];
|
|
this.checkpoints = [];
|
|
this.metadata = {
|
|
startTime: null,
|
|
endTime: null,
|
|
totalDuration: 0,
|
|
personality: null
|
|
};
|
|
}
|
|
|
|
/**
|
|
* Exécute un pipeline complet
|
|
*/
|
|
async execute(pipelineConfig, rowNumber, options = {}) {
|
|
return tracer.run('PipelineExecutor.execute', async () => {
|
|
logSh(`🚀 Démarrage pipeline "${pipelineConfig.name}" (${pipelineConfig.pipeline.length} étapes)`, 'INFO');
|
|
|
|
// Validation
|
|
const validation = PipelineDefinition.validate(pipelineConfig);
|
|
if (!validation.valid) {
|
|
throw new Error(`Pipeline invalide: ${validation.errors.join(', ')}`);
|
|
}
|
|
|
|
this.metadata.startTime = Date.now();
|
|
this.executionLog = [];
|
|
this.checkpoints = [];
|
|
|
|
// Charger les données
|
|
const csvData = await this.loadData(rowNumber);
|
|
|
|
// Exécuter les étapes
|
|
const enabledSteps = pipelineConfig.pipeline.filter(s => s.enabled !== false);
|
|
|
|
for (let i = 0; i < enabledSteps.length; i++) {
|
|
const step = enabledSteps[i];
|
|
|
|
try {
|
|
logSh(`▶ Étape ${step.step}/${pipelineConfig.pipeline.length}: ${step.module} (${step.mode})`, 'INFO');
|
|
|
|
const stepStartTime = Date.now();
|
|
const result = await this.executeStep(step, csvData, options);
|
|
const stepDuration = Date.now() - stepStartTime;
|
|
|
|
// Log l'étape
|
|
this.executionLog.push({
|
|
step: step.step,
|
|
module: step.module,
|
|
mode: step.mode,
|
|
intensity: step.intensity,
|
|
duration: stepDuration,
|
|
modifications: result.modifications || 0,
|
|
success: true,
|
|
timestamp: new Date().toISOString()
|
|
});
|
|
|
|
// Mise à jour du contenu
|
|
if (result.content) {
|
|
this.currentContent = result.content;
|
|
}
|
|
|
|
// Checkpoint si demandé
|
|
if (step.saveCheckpoint) {
|
|
this.checkpoints.push({
|
|
step: step.step,
|
|
content: this.currentContent,
|
|
timestamp: new Date().toISOString()
|
|
});
|
|
logSh(`💾 Checkpoint sauvegardé (étape ${step.step})`, 'DEBUG');
|
|
}
|
|
|
|
logSh(`✔ Étape ${step.step} terminée (${stepDuration}ms, ${result.modifications || 0} modifs)`, 'INFO');
|
|
|
|
} catch (error) {
|
|
logSh(`✖ Erreur étape ${step.step}: ${error.message}`, 'ERROR');
|
|
|
|
this.executionLog.push({
|
|
step: step.step,
|
|
module: step.module,
|
|
mode: step.mode,
|
|
success: false,
|
|
error: error.message,
|
|
timestamp: new Date().toISOString()
|
|
});
|
|
|
|
// Propager l'erreur ou continuer selon options
|
|
if (options.stopOnError !== false) {
|
|
throw error;
|
|
}
|
|
}
|
|
}
|
|
|
|
this.metadata.endTime = Date.now();
|
|
this.metadata.totalDuration = this.metadata.endTime - this.metadata.startTime;
|
|
|
|
logSh(`✅ Pipeline terminé: ${this.metadata.totalDuration}ms`, 'INFO');
|
|
|
|
return {
|
|
success: true,
|
|
finalContent: this.currentContent,
|
|
executionLog: this.executionLog,
|
|
checkpoints: this.checkpoints,
|
|
metadata: {
|
|
...this.metadata,
|
|
pipelineName: pipelineConfig.name,
|
|
totalSteps: enabledSteps.length,
|
|
successfulSteps: this.executionLog.filter(l => l.success).length
|
|
}
|
|
};
|
|
}, { pipelineName: pipelineConfig.name, rowNumber });
|
|
}
|
|
|
|
/**
|
|
* Charge les données depuis Google Sheets
|
|
*/
|
|
async loadData(rowNumber) {
|
|
return tracer.run('PipelineExecutor.loadData', async () => {
|
|
const csvData = await readInstructionsData(rowNumber);
|
|
|
|
// Charger personnalité si besoin
|
|
const personalities = await getPersonalities();
|
|
const personality = await selectPersonalityWithAI(
|
|
csvData.mc0,
|
|
csvData.t0,
|
|
personalities
|
|
);
|
|
|
|
csvData.personality = personality;
|
|
this.metadata.personality = personality.nom;
|
|
|
|
logSh(`📊 Données chargées: ${csvData.mc0}, personnalité: ${personality.nom}`, 'DEBUG');
|
|
|
|
return csvData;
|
|
}, { rowNumber });
|
|
}
|
|
|
|
/**
|
|
* Exécute une étape individuelle
|
|
*/
|
|
async executeStep(step, csvData, options) {
|
|
return tracer.run(`PipelineExecutor.executeStep.${step.module}`, async () => {
|
|
|
|
switch (step.module) {
|
|
case 'generation':
|
|
return await this.runGeneration(step, csvData);
|
|
|
|
case 'selective':
|
|
return await this.runSelective(step, csvData);
|
|
|
|
case 'adversarial':
|
|
return await this.runAdversarial(step, csvData);
|
|
|
|
case 'human':
|
|
return await this.runHumanSimulation(step, csvData);
|
|
|
|
case 'pattern':
|
|
return await this.runPatternBreaking(step, csvData);
|
|
|
|
default:
|
|
throw new Error(`Module inconnu: ${step.module}`);
|
|
}
|
|
|
|
}, { step: step.step, module: step.module, mode: step.mode });
|
|
}
|
|
|
|
/**
|
|
* Exécute la génération initiale
|
|
*/
|
|
async runGeneration(step, csvData) {
|
|
return tracer.run('PipelineExecutor.runGeneration', async () => {
|
|
|
|
if (this.currentContent) {
|
|
logSh('⚠️ Contenu déjà généré, génération ignorée', 'WARN');
|
|
return { content: this.currentContent, modifications: 0 };
|
|
}
|
|
|
|
// Étape 1: Extraire les éléments depuis le template XML
|
|
const elements = await extractElements(csvData.xmlTemplate, csvData);
|
|
logSh(`✓ Extraction: ${elements.length} éléments extraits`, 'DEBUG');
|
|
|
|
// Étape 2: Générer les mots-clés manquants
|
|
const finalElements = await generateMissingKeywords(elements, csvData);
|
|
|
|
// Étape 3: Construire la hiérarchie
|
|
const elementsArray = Array.isArray(finalElements) ? finalElements :
|
|
(finalElements && typeof finalElements === 'object') ? Object.values(finalElements) : [];
|
|
const hierarchy = await buildSmartHierarchy(elementsArray);
|
|
logSh(`✓ Hiérarchie: ${Object.keys(hierarchy).length} sections`, 'DEBUG');
|
|
|
|
// Étape 4: Génération simple avec LLM configurable
|
|
const llmProvider = step.parameters?.llmProvider || 'claude';
|
|
const result = await generateSimple(hierarchy, csvData, { llmProvider });
|
|
|
|
logSh(`✓ Génération: ${Object.keys(result.content || {}).length} éléments créés avec ${llmProvider}`, 'DEBUG');
|
|
|
|
return {
|
|
content: result.content,
|
|
modifications: Object.keys(result.content || {}).length
|
|
};
|
|
|
|
}, { mode: step.mode });
|
|
}
|
|
|
|
/**
|
|
* Exécute l'enhancement sélectif
|
|
*/
|
|
async runSelective(step, csvData) {
|
|
return tracer.run('PipelineExecutor.runSelective', async () => {
|
|
|
|
if (!this.currentContent) {
|
|
throw new Error('Aucun contenu à améliorer. Génération requise avant selective enhancement');
|
|
}
|
|
|
|
// Configuration de la couche
|
|
const llmProvider = step.parameters?.llmProvider || 'openai';
|
|
const config = {
|
|
csvData,
|
|
personality: csvData.personality,
|
|
intensity: step.intensity || 1.0,
|
|
llmProvider: llmProvider,
|
|
...step.parameters
|
|
};
|
|
|
|
let result;
|
|
|
|
// Utiliser le stack si c'est un mode prédéfini
|
|
const predefinedStacks = ['lightEnhancement', 'standardEnhancement', 'fullEnhancement', 'personalityFocus', 'fluidityFocus', 'adaptive'];
|
|
|
|
if (predefinedStacks.includes(step.mode)) {
|
|
result = await applySelectiveStack(this.currentContent, step.mode, config);
|
|
} else {
|
|
// Sinon utiliser la couche directe
|
|
result = await applySelectiveLayer(this.currentContent, config);
|
|
}
|
|
|
|
logSh(`✓ Selective: modifications appliquées avec ${llmProvider}`, 'DEBUG');
|
|
|
|
return {
|
|
content: result.content || result,
|
|
modifications: result.modificationsCount || 0
|
|
};
|
|
|
|
}, { mode: step.mode, intensity: step.intensity });
|
|
}
|
|
|
|
/**
|
|
* Exécute l'adversarial generation
|
|
*/
|
|
async runAdversarial(step, csvData) {
|
|
return tracer.run('PipelineExecutor.runAdversarial', async () => {
|
|
|
|
if (!this.currentContent) {
|
|
throw new Error('Aucun contenu à traiter. Génération requise avant adversarial');
|
|
}
|
|
|
|
if (step.mode === 'none') {
|
|
logSh('Adversarial mode = none, ignoré', 'DEBUG');
|
|
return { content: this.currentContent, modifications: 0 };
|
|
}
|
|
|
|
const llmProvider = step.parameters?.llmProvider || 'gemini';
|
|
const config = {
|
|
csvData,
|
|
detectorTarget: step.parameters?.detector || 'general',
|
|
method: step.parameters?.method || 'regeneration',
|
|
intensity: step.intensity || 1.0,
|
|
llmProvider: llmProvider
|
|
};
|
|
|
|
let result;
|
|
|
|
// Mapper les noms user-friendly vers les vrais noms de stacks
|
|
const stackMapping = {
|
|
'light': 'lightDefense',
|
|
'standard': 'standardDefense',
|
|
'heavy': 'heavyDefense',
|
|
'adaptive': 'adaptive'
|
|
};
|
|
|
|
// Utiliser le stack si c'est un mode prédéfini
|
|
if (stackMapping[step.mode]) {
|
|
const stackName = stackMapping[step.mode];
|
|
|
|
if (stackName === 'adaptive') {
|
|
// Mode adaptatif utilise la couche directe
|
|
result = await applyAdversarialLayer(this.currentContent, config);
|
|
} else {
|
|
result = await applyAdversarialStack(this.currentContent, stackName, config);
|
|
}
|
|
} else {
|
|
// Sinon utiliser la couche directe
|
|
result = await applyAdversarialLayer(this.currentContent, config);
|
|
}
|
|
|
|
logSh(`✓ Adversarial: modifications appliquées avec ${llmProvider}`, 'DEBUG');
|
|
|
|
return {
|
|
content: result.content || result,
|
|
modifications: result.modificationsCount || 0
|
|
};
|
|
|
|
}, { mode: step.mode, detector: step.parameters?.detector });
|
|
}
|
|
|
|
/**
|
|
* Exécute la simulation humaine
|
|
*/
|
|
async runHumanSimulation(step, csvData) {
|
|
return tracer.run('PipelineExecutor.runHumanSimulation', async () => {
|
|
|
|
if (!this.currentContent) {
|
|
throw new Error('Aucun contenu à traiter. Génération requise avant human simulation');
|
|
}
|
|
|
|
if (step.mode === 'none') {
|
|
logSh('Human simulation mode = none, ignoré', 'DEBUG');
|
|
return { content: this.currentContent, modifications: 0 };
|
|
}
|
|
|
|
const llmProvider = step.parameters?.llmProvider || 'mistral';
|
|
const config = {
|
|
csvData,
|
|
personality: csvData.personality,
|
|
intensity: step.intensity || 1.0,
|
|
fatigueLevel: step.parameters?.fatigueLevel || 0.5,
|
|
errorRate: step.parameters?.errorRate || 0.3,
|
|
llmProvider: llmProvider
|
|
};
|
|
|
|
let result;
|
|
|
|
// Utiliser le stack si c'est un mode prédéfini
|
|
const predefinedModes = ['lightSimulation', 'standardSimulation', 'heavySimulation', 'adaptiveSimulation', 'personalityFocus', 'temporalFocus'];
|
|
|
|
if (predefinedModes.includes(step.mode)) {
|
|
result = await applyPredefinedSimulation(this.currentContent, step.mode, config);
|
|
} else {
|
|
// Sinon utiliser la couche directe
|
|
result = await applyHumanSimulationLayer(this.currentContent, config);
|
|
}
|
|
|
|
logSh(`✓ Human Simulation: modifications appliquées avec ${llmProvider}`, 'DEBUG');
|
|
|
|
return {
|
|
content: result.content || result,
|
|
modifications: result.modificationsCount || 0
|
|
};
|
|
|
|
}, { mode: step.mode, intensity: step.intensity });
|
|
}
|
|
|
|
/**
|
|
* Exécute le pattern breaking
|
|
*/
|
|
async runPatternBreaking(step, csvData) {
|
|
return tracer.run('PipelineExecutor.runPatternBreaking', async () => {
|
|
|
|
if (!this.currentContent) {
|
|
throw new Error('Aucun contenu à traiter. Génération requise avant pattern breaking');
|
|
}
|
|
|
|
if (step.mode === 'none') {
|
|
logSh('Pattern breaking mode = none, ignoré', 'DEBUG');
|
|
return { content: this.currentContent, modifications: 0 };
|
|
}
|
|
|
|
const llmProvider = step.parameters?.llmProvider || 'deepseek';
|
|
const config = {
|
|
csvData,
|
|
personality: csvData.personality,
|
|
intensity: step.intensity || 1.0,
|
|
focus: step.parameters?.focus || 'both',
|
|
llmProvider: llmProvider
|
|
};
|
|
|
|
let result;
|
|
|
|
// Utiliser le stack si c'est un mode prédéfini
|
|
const predefinedModes = ['lightPatternBreaking', 'standardPatternBreaking', 'heavyPatternBreaking', 'adaptivePatternBreaking', 'syntaxFocus', 'connectorsFocus'];
|
|
|
|
if (predefinedModes.includes(step.mode)) {
|
|
result = await applyPatternBreakingStack(step.mode, this.currentContent, config);
|
|
} else {
|
|
// Sinon utiliser la couche directe
|
|
result = await applyPatternBreakingLayer(this.currentContent, config);
|
|
}
|
|
|
|
logSh(`✓ Pattern Breaking: modifications appliquées avec ${llmProvider}`, 'DEBUG');
|
|
|
|
return {
|
|
content: result.content || result,
|
|
modifications: result.modificationsCount || 0
|
|
};
|
|
|
|
}, { mode: step.mode, intensity: step.intensity });
|
|
}
|
|
|
|
/**
|
|
* Obtient le contenu actuel
|
|
*/
|
|
getCurrentContent() {
|
|
return this.currentContent;
|
|
}
|
|
|
|
/**
|
|
* Obtient le log d'exécution
|
|
*/
|
|
getExecutionLog() {
|
|
return this.executionLog;
|
|
}
|
|
|
|
/**
|
|
* Obtient les checkpoints sauvegardés
|
|
*/
|
|
getCheckpoints() {
|
|
return this.checkpoints;
|
|
}
|
|
|
|
/**
|
|
* Obtient les métadonnées d'exécution
|
|
*/
|
|
getMetadata() {
|
|
return this.metadata;
|
|
}
|
|
|
|
/**
|
|
* Reset l'état de l'executor
|
|
*/
|
|
reset() {
|
|
this.currentContent = null;
|
|
this.executionLog = [];
|
|
this.checkpoints = [];
|
|
this.metadata = {
|
|
startTime: null,
|
|
endTime: null,
|
|
totalDuration: 0,
|
|
personality: null
|
|
};
|
|
}
|
|
}
|
|
|
|
module.exports = { PipelineExecutor };
|