Initial commit
This commit is contained in:
commit
239a7161c2
19
.env.example
Normal file
19
.env.example
Normal file
@ -0,0 +1,19 @@
|
||||
# Configuration DigitalOcean
|
||||
DO_API_TOKEN=your_digitalocean_api_token_here
|
||||
DO_SPACES_KEY=your_spaces_key_here
|
||||
DO_SPACES_SECRET=your_spaces_secret_here
|
||||
DO_SPACES_REGION=fra1
|
||||
DO_SPACES_BUCKET=autocollant
|
||||
|
||||
# Configuration Google Sheets
|
||||
GOOGLE_SERVICE_ACCOUNT_EMAIL=your-service-account@project.iam.gserviceaccount.com
|
||||
GOOGLE_PRIVATE_KEY="-----BEGIN PRIVATE KEY-----\nYOUR_PRIVATE_KEY_HERE\n-----END PRIVATE KEY-----\n"
|
||||
GOOGLE_SHEETS_ID=1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c
|
||||
|
||||
# Logging
|
||||
LOG_LEVEL=INFO
|
||||
ENABLE_SHEETS_LOGGING=false
|
||||
|
||||
# Email (optionnel)
|
||||
EMAIL_USER=your-email@gmail.com
|
||||
EMAIL_APP_PASSWORD=your_app_password
|
||||
90
.gitignore
vendored
Normal file
90
.gitignore
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
# === FICHIERS SENSIBLES ===
|
||||
# Variables d'environnement
|
||||
.env
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
# Clés de service et credentials
|
||||
*.json
|
||||
!package*.json
|
||||
!lib/package.json
|
||||
# Si tu as d'autres fichiers JSON légitimes à garder, les ajouter avec !filename.json
|
||||
|
||||
# === LOGS ===
|
||||
# Tous les fichiers de log
|
||||
logs/
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
lerna-debug.log*
|
||||
|
||||
# === NODE.JS ===
|
||||
# Dependencies
|
||||
node_modules/
|
||||
jspm_packages/
|
||||
|
||||
# Optional npm cache directory
|
||||
.npm
|
||||
|
||||
# Optional eslint cache
|
||||
.eslintcache
|
||||
|
||||
# Optional REPL history
|
||||
.node_repl_history
|
||||
|
||||
# Output of 'npm pack'
|
||||
*.tgz
|
||||
|
||||
# Yarn Integrity file
|
||||
.yarn-integrity
|
||||
|
||||
# === FICHIERS TEMPORAIRES/DEBUG ===
|
||||
# Tes fichiers de test temporaires
|
||||
test_*.js
|
||||
*_test.js
|
||||
*_debug.js
|
||||
test-*.js
|
||||
|
||||
# HTML généré (logs viewer)
|
||||
logs-viewer.html
|
||||
|
||||
# === DOSSIERS DE DONNÉES ===
|
||||
# Si config/ et data/ contiennent des données sensibles
|
||||
config/
|
||||
data/
|
||||
|
||||
# === OUTILS DE DÉVELOPPEMENT ===
|
||||
# IDE et éditeurs
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# === CLAUDE ===
|
||||
# Configuration locale Claude (garde les settings globaux si besoin)
|
||||
.claude/settings.local.json
|
||||
|
||||
# === OPTIONNEL ===
|
||||
# Décommente si tu veux ignorer package-lock.json (débat d'équipe)
|
||||
# package-lock.json
|
||||
|
||||
# Coverage directory used by tools like istanbul
|
||||
coverage/
|
||||
*.lcov
|
||||
|
||||
# Temporary folders
|
||||
tmp/
|
||||
temp/
|
||||
315
CLAUDE.md
Normal file
315
CLAUDE.md
Normal file
@ -0,0 +1,315 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
This is a Node.js-based SEO content generation server that was converted from Google Apps Script. The system generates SEO-optimized content using multiple LLMs with sophisticated anti-detection mechanisms and Content DNA Mixing techniques.
|
||||
|
||||
### 🎯 Current Status - PHASE 2 COMPLETE ✅
|
||||
- **Full Google Sheets Integration**: ✅ **OPERATIONAL**
|
||||
- 15 AI personalities with random selection (60% variability)
|
||||
- Complete data pipeline from Google Sheets (Instructions, Personnalites)
|
||||
- XML template system with default fallback
|
||||
- Organic content compilation and storage
|
||||
|
||||
- **Multi-LLM Enhancement Pipeline**: ✅ **FULLY OPERATIONAL**
|
||||
- 6 LLM providers: Claude, OpenAI, Gemini, Deepseek, Moonshot, Mistral
|
||||
- 4-stage enhancement pipeline: Claude → GPT-4 → Gemini → Mistral
|
||||
- Direct generation bypass for 16+ elements
|
||||
- Average execution: 60-90 seconds for full multi-LLM processing
|
||||
|
||||
- **Anti-Detection System**: ✅ **ADVANCED**
|
||||
- Random personality selection from 15 profiles (9 selected per run)
|
||||
- Temperature = 1.0 for maximum variability
|
||||
- Multiple writing styles and vocabularies
|
||||
- Content DNA mixing across 4 AI models per element
|
||||
|
||||
### 🚀 Core Features Implemented
|
||||
|
||||
1. **Google Sheets Integration**
|
||||
- Complete authentication via environment variables
|
||||
- Read from "Instructions" sheet (slug, CSV data, XML templates)
|
||||
- Read from "Personnalites" sheet (15 AI personalities)
|
||||
- Write to "Generated_Articles" sheet (compiled text only, no XML)
|
||||
|
||||
2. **Advanced Personality System**
|
||||
- 15 diverse personalities: technical, creative, commercial, multilingual
|
||||
- Random selection of 60% personalities per generation
|
||||
- AI-powered intelligent selection within random subset
|
||||
- Maximum style variability for anti-detection
|
||||
|
||||
3. **XML Template Processing**
|
||||
- Default XML template with 16 content elements
|
||||
- Instruction extraction with fixed regex ({{variables}} vs {instructions})
|
||||
- Base64 and plain text template support
|
||||
- Automatic fallback when filenames detected
|
||||
|
||||
4. **Multi-LLM Content Generation**
|
||||
- Direct element generation (bypasses faulty hierarchy)
|
||||
- Missing keywords auto-generation
|
||||
- 4-stage enhancement pipeline
|
||||
- Organic content compilation maintaining natural flow
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Production Workflow Execution
|
||||
```bash
|
||||
# Execute real production workflow from Google Sheets
|
||||
node -e "const main = require('./lib/Main'); main.handleFullWorkflow({ rowNumber: 2, source: 'production' });"
|
||||
|
||||
# Test with different rows
|
||||
node -e "const main = require('./lib/Main'); main.handleFullWorkflow({ rowNumber: 3, source: 'production' });"
|
||||
```
|
||||
|
||||
### Basic Operations
|
||||
- `npm start` - Start the production server on port 3000
|
||||
- `npm run dev` - Start the development server (same as start)
|
||||
- `node server.js` - Direct server startup
|
||||
|
||||
### Testing Commands
|
||||
|
||||
#### Google Sheets Integration Tests
|
||||
```bash
|
||||
# Test personality loading from Google Sheets
|
||||
node -e "const {getPersonalities} = require('./lib/BrainConfig'); getPersonalities().then(p => console.log(`${p.length} personalities loaded`));"
|
||||
|
||||
# Test CSV data loading
|
||||
node -e "const {readInstructionsData} = require('./lib/BrainConfig'); readInstructionsData(2).then(d => console.log('Data:', d));"
|
||||
|
||||
# Test random personality selection
|
||||
node -e "const {selectPersonalityWithAI, getPersonalities} = require('./lib/BrainConfig'); getPersonalities().then(p => selectPersonalityWithAI('test', 'test', p)).then(r => console.log('Selected:', r.nom));"
|
||||
```
|
||||
|
||||
#### LLM Connectivity Tests
|
||||
- `node -e "require('./lib/LLMManager').testLLMManager()"` - Test basic LLM connectivity
|
||||
- `node -e "require('./lib/LLMManager').testLLMManagerComplete()"` - Full LLM provider test suite
|
||||
|
||||
#### Complete System Test
|
||||
```bash
|
||||
node -e "
|
||||
const main = require('./lib/Main');
|
||||
const testData = {
|
||||
csvData: {
|
||||
mc0: 'plaque personnalisée',
|
||||
t0: 'Créer une plaque personnalisée unique',
|
||||
personality: { nom: 'Marc', style: 'professionnel' },
|
||||
tMinus1: 'décoration personnalisée',
|
||||
mcPlus1: 'plaque gravée,plaque métal,plaque bois,plaque acrylique',
|
||||
tPlus1: 'Plaque Gravée Premium,Plaque Métal Moderne,Plaque Bois Naturel,Plaque Acrylique Design'
|
||||
},
|
||||
xmlTemplate: Buffer.from(\`<?xml version='1.0' encoding='UTF-8'?>
|
||||
<article>
|
||||
<h1>|Titre_Principal{{T0}}{Rédige un titre H1 accrocheur}|</h1>
|
||||
<intro>|Introduction{{MC0}}{Rédige une introduction engageante}|</intro>
|
||||
</article>\`).toString('base64'),
|
||||
source: 'node_server_test'
|
||||
};
|
||||
main.handleFullWorkflow(testData);
|
||||
"
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Workflow (lib/Main.js)
|
||||
1. **Data Preparation** - Read from Google Sheets (CSV + XML template)
|
||||
2. **Element Extraction** - Parse 16+ XML elements with instructions
|
||||
3. **Missing Keywords Generation** - Auto-complete missing data
|
||||
4. **Direct Content Generation** - Bypass hierarchy, generate all elements
|
||||
5. **Multi-LLM Enhancement** - 4-stage processing (Claude → GPT-4 → Gemini → Mistral)
|
||||
6. **Content Assembly** - Inject content back into XML template
|
||||
7. **Organic Compilation & Storage** - Save clean text to Google Sheets
|
||||
|
||||
### Google Sheets Integration (lib/BrainConfig.js, lib/ArticleStorage.js)
|
||||
**Authentication**: Environment variables (GOOGLE_SERVICE_ACCOUNT_EMAIL, GOOGLE_PRIVATE_KEY)
|
||||
|
||||
**Data Sources**:
|
||||
- **Instructions Sheet**: Columns A-I (slug, T0, MC0, T-1, L-1, MC+1, T+1, L+1, XML)
|
||||
- **Personnalites Sheet**: 15 personalities with complete profiles
|
||||
- **Generated_Articles Sheet**: Compiled text output with metadata
|
||||
|
||||
### Personality System (lib/BrainConfig.js:265-340)
|
||||
**Random Selection Process**:
|
||||
1. Load 15 personalities from Google Sheets
|
||||
2. Fisher-Yates shuffle for true randomness
|
||||
3. Select 60% (9 personalities) per generation
|
||||
4. AI chooses best match within random subset
|
||||
5. Temperature = 1.0 for maximum variability
|
||||
|
||||
**15 Available Personalities**:
|
||||
- Marc (technical), Sophie (déco), Laurent (commercial), Julie (architecture)
|
||||
- Kévin (terrain), Amara (engineering), Mamadou (artisan), Émilie (digital)
|
||||
- Pierre-Henri (heritage), Yasmine (greentech), Fabrice (metallurgy)
|
||||
- Chloé (content), Linh (manufacturing), Minh (design), Thierry (creole)
|
||||
|
||||
### Multi-LLM Pipeline (lib/ContentGeneration.js)
|
||||
1. **Base Generation** (Claude Sonnet-4) - Initial content creation
|
||||
2. **Technical Enhancement** (GPT-4o-mini) - Add precision and terminology
|
||||
3. **Transition Enhancement** (Gemini) - Improve flow (if available)
|
||||
4. **Personality Style** (Mistral) - Apply personality-specific voice
|
||||
|
||||
### Key Components Status
|
||||
|
||||
#### lib/LLMManager.js ✅
|
||||
- 6 LLM providers operational: Claude, OpenAI, Gemini, Deepseek, Moonshot, Mistral
|
||||
- Retry logic and rate limiting implemented
|
||||
- Provider rotation and fallback chains
|
||||
- **Note**: Gemini geo-blocked in some regions (fallback to other providers)
|
||||
|
||||
#### lib/BrainConfig.js ✅
|
||||
- **FULLY MIGRATED** to Google Sheets integration
|
||||
- Random personality selection implemented
|
||||
- Environment variable authentication
|
||||
- Default XML template system for filename fallbacks
|
||||
|
||||
#### lib/ElementExtraction.js ✅
|
||||
- Fixed regex for instruction parsing: `{{variables}}` vs `{instructions}`
|
||||
- 16+ element extraction capability
|
||||
- Direct generation mode operational
|
||||
|
||||
#### lib/ArticleStorage.js ✅
|
||||
- Organic text compilation (maintains natural hierarchy)
|
||||
- Google Sheets storage (compiled text only, no XML)
|
||||
- Automatic slug generation and metadata tracking
|
||||
- French timestamp formatting
|
||||
|
||||
#### lib/ErrorReporting.js ✅
|
||||
- Centralized logging system
|
||||
- Email notifications (requires credential setup)
|
||||
|
||||
## Current System Status (2025-09-01)
|
||||
|
||||
### ✅ **Fully Operational**
|
||||
- **Google Sheets Integration**: Complete data pipeline
|
||||
- **15 AI Personalities**: Random selection with 100% variability tested
|
||||
- **Multi-LLM Generation**: 6 providers, 4-stage enhancement
|
||||
- **Direct Element Generation**: 16+ elements processed
|
||||
- **Organic Content Storage**: Clean text compilation
|
||||
- **Anti-Detection System**: Maximum style diversity
|
||||
|
||||
### 🔶 **Partially Operational**
|
||||
- **Email Notifications**: Implemented but needs credentials setup
|
||||
- **Gemini Integration**: Geo-blocked in some regions (5/6 LLMs operational)
|
||||
|
||||
### ⚠️ **Known Issues**
|
||||
- Email SMTP credentials need configuration in .env
|
||||
- Some XML tag replacements may need optimization (rare validation errors)
|
||||
- Gemini API blocked by geolocation (non-critical - 5 other providers work)
|
||||
|
||||
### 🎯 **Production Ready Features**
|
||||
- **Real-time execution**: 60-90 seconds for complete multi-LLM workflow
|
||||
- **Google Sheets automation**: Full read/write integration
|
||||
- **Anti-detection guarantee**: 15 personalities × random selection × 4 LLM stages
|
||||
- **Content quality**: Organic compilation maintains natural readability
|
||||
- **Scalability**: Direct Node.js execution, no web interface dependency
|
||||
|
||||
## Migration Status: Google Apps Script → Node.js
|
||||
|
||||
### ✅ **100% Migrated**
|
||||
- Google Sheets API integration
|
||||
- Multi-LLM content generation
|
||||
- Personality selection system
|
||||
- XML template processing
|
||||
- Content assembly and storage
|
||||
- Workflow orchestration
|
||||
- Error handling and logging
|
||||
|
||||
### 🔶 **Configuration Needed**
|
||||
- Email notification credentials
|
||||
- Optional: VPN for Gemini access
|
||||
|
||||
### 📊 **Performance Metrics**
|
||||
- **Execution time**: 60-90 seconds (full multi-LLM pipeline)
|
||||
- **Success rate**: 97%+ workflow completion
|
||||
- **Personality variability**: 100% tested (5/5 different personalities in consecutive runs)
|
||||
- **Content quality**: Natural, human-like output with organic flow
|
||||
- **Anti-detection**: Multiple writing styles, vocabularies, and tones per generation
|
||||
|
||||
## Workflow Sources
|
||||
- **production** - Real Google Sheets data processing
|
||||
- **test_random_personality** - Testing with personality randomization
|
||||
- **node_server** - Direct API processing
|
||||
- Legacy: `make_com`, `digital_ocean_autonomous`
|
||||
|
||||
## Key Dependencies
|
||||
- **googleapis** : Google Sheets API integration
|
||||
- **axios** : HTTP client for LLM APIs
|
||||
- **dotenv** : Environment variable management
|
||||
- **express** : Web server framework
|
||||
- **nodemailer** : Email notifications (needs setup)
|
||||
|
||||
## File Structure
|
||||
- **server.js** : Express server with basic endpoints
|
||||
- **lib/Main.js** : Core workflow orchestration
|
||||
- **lib/BrainConfig.js** : Google Sheets integration + personality system
|
||||
- **lib/LLMManager.js** : Multi-LLM provider management
|
||||
- **lib/ContentGeneration.js** : Content generation and enhancement
|
||||
- **lib/ElementExtraction.js** : XML parsing and element extraction
|
||||
- **lib/ArticleStorage.js** : Google Sheets storage and compilation
|
||||
- **lib/ErrorReporting.js** : Logging and error handling
|
||||
- **.env** : Environment configuration (Google credentials, API keys)
|
||||
|
||||
## Important Notes for Future Development
|
||||
- **Personality system is now random-based**: 60% of 15 personalities selected per run
|
||||
- **All data comes from Google Sheets**: No more JSON files or hardcoded data
|
||||
- **Default XML template**: Auto-generated when column I contains filename
|
||||
- **Temperature = 1.0**: Maximum variability in AI selection
|
||||
- **Direct element generation**: Bypasses hierarchy system for reliability
|
||||
- **Organic compilation**: Maintains natural text flow in final output
|
||||
- **5/6 LLM providers operational**: Gemini geo-blocked, others fully functional
|
||||
|
||||
## LogSh - Centralized Logging System
|
||||
|
||||
### **Architecture**
|
||||
- **Centralized logging**: All logs must go through LogSh function in ErrorReporting.js
|
||||
- **Multi-output streams**: Console (pretty format) + File (JSON) + WebSocket (real-time)
|
||||
- **No console or custom loggers**: Do not use console.* or alternate logger modules
|
||||
|
||||
### **Log Levels and Usage**
|
||||
- **TRACE**: Hierarchical workflow execution with parameters (▶ ✔ ✖ symbols)
|
||||
- **DEBUG**: Detailed debugging information (visible in files with debug level)
|
||||
- **INFO**: Standard operational messages
|
||||
- **WARN**: Warning conditions
|
||||
- **ERROR**: Error conditions with stack traces
|
||||
|
||||
### **File Logging**
|
||||
- **Format**: JSON structured logs in timestamped files
|
||||
- **Location**: logs/seo-generator-YYYY-MM-DD_HH-MM-SS.log
|
||||
- **Flush behavior**: Immediate flush on every log call to prevent buffer loss
|
||||
- **Level**: DEBUG and above (includes all TRACE logs)
|
||||
|
||||
### **Real-time Logging**
|
||||
- **WebSocket server**: Port 8081 for live log viewing
|
||||
- **Auto-launch**: logs-viewer.html opens in Edge browser automatically
|
||||
- **Features**: Search, filtering by level, scroll preservation, compact UI
|
||||
|
||||
### **Trace System**
|
||||
- **Hierarchical execution tracking**: Using AsyncLocalStorage for span context
|
||||
- **Function parameters**: All tracer.run() calls include relevant parameters
|
||||
- **Format**: Function names with file prefixes (e.g., "Main.handleFullWorkflow()")
|
||||
- **Performance timing**: Start/end with duration measurements
|
||||
- **Error handling**: Automatic stack trace logging on failures
|
||||
|
||||
### **Log Viewer Features**
|
||||
- **Real-time updates**: WebSocket connection to Node.js server
|
||||
- **Level filtering**: Toggle TRACE/DEBUG/INFO/WARN/ERROR visibility
|
||||
- **Search functionality**: Regex search with match highlighting
|
||||
- **Proportional scrolling**: Maintains relative position when filtering
|
||||
- **Compact UI**: Optimized for full viewport utilization
|
||||
|
||||
## Unused Audit Tool
|
||||
- **Location**: tools/audit-unused.cjs (manual run only)
|
||||
- **Reports**: Dead files, broken relative imports, unused exports
|
||||
- **Use sparingly**: Run before cleanup or release; keep with `// @keep:export Name`
|
||||
|
||||
## 📦 Bundling Tool
|
||||
|
||||
`pack-lib.cjs` creates a single `code.js` from all files in `lib/`.
|
||||
Each file is concatenated with an ASCII header showing its path. Imports/exports are kept, so the bundle is for **reading/audit only**, not execution.
|
||||
|
||||
### Usage
|
||||
```bash
|
||||
node pack-lib.cjs # default → code.js
|
||||
node pack-lib.cjs --out out.js # custom output
|
||||
node pack-lib.cjs --order alpha
|
||||
node pack-lib.cjs --entry lib/test-manual.js
|
||||
107
TODO.md
Normal file
107
TODO.md
Normal file
@ -0,0 +1,107 @@
|
||||
# TODO - AMÉLIORATIONS WORKFLOW CRITIQUES
|
||||
|
||||
## 🚨 PRIORITÉ 1 - INTÉGRATION IA DANS PERSONNALITÉS
|
||||
|
||||
### PROBLÈME ACTUEL
|
||||
- Les fallback IA ont été supprimés pour éviter la dégradation de qualité
|
||||
- Si une IA échoue (Claude, OpenAI, Gemini, Mistral), le workflow s'arrête brutalement
|
||||
- Aucune stratégie de récupération intelligente
|
||||
|
||||
### SOLUTION REQUISE
|
||||
**Intégrer les préférences IA directement dans les profils de personnalités Google Sheets**
|
||||
|
||||
#### Nouveaux champs à ajouter dans la sheet "Personnalités" :
|
||||
- `LLM_Prefere` : LLM principal pour cette personnalité (ex: "claude", "openai")
|
||||
- `LLM_Fallback` : LLM de secours si le principal échoue
|
||||
- `Temperature` : Température spécifique à la personnalité (0.7-1.0)
|
||||
- `Style_Prompt` : Instructions spécifiques pour adapter le prompt au style
|
||||
|
||||
#### Exemple de données :
|
||||
```
|
||||
Marc | Expert technique | ... | claude | openai | 0.8 | "Utilise un vocabulaire technique précis"
|
||||
Sophie | Passionnée | ... | gemini | mistral | 1.0 | "Sois chaleureux et utilise des anecdotes"
|
||||
```
|
||||
|
||||
#### Logique de fallback intelligent :
|
||||
1. Utiliser `LLM_Prefere` de la personnalité
|
||||
2. Si échec → utiliser `LLM_Fallback` de la personnalité
|
||||
3. Si échec → **relancer avec nouvelle personnalité** (voir ci-dessous)
|
||||
|
||||
---
|
||||
|
||||
## 🔄 PRIORITÉ 2 - RELANCE AVEC NOUVELLE PERSONNALITÉ
|
||||
|
||||
### PROBLÈME ACTUEL
|
||||
- Si l'enhancement d'une personnalité échoue, le workflow s'arrête
|
||||
- Perte complète du travail déjà effectué
|
||||
|
||||
### SOLUTION REQUISE
|
||||
**Système de relance avec sélection d'une nouvelle personnalité**
|
||||
|
||||
#### Stratégie de récupération :
|
||||
1. **Détection d'échec** : Capturer les erreurs IA lors de l'enhancement
|
||||
2. **Sauvegarde état** : Garder le contenu généré jusqu'à l'étape qui a échoué
|
||||
3. **Nouvelle sélection** : Choisir une personnalité différente avec LLM disponible
|
||||
4. **Reprise partielle** : Reprendre seulement l'étape qui a échoué, pas tout le workflow
|
||||
|
||||
#### Implémentation suggérée :
|
||||
```javascript
|
||||
async function enhanceWithPersonalityRecovery(content, personality, attempt = 1) {
|
||||
try {
|
||||
return await enhance(content, personality);
|
||||
} catch (error) {
|
||||
if (attempt < 3) {
|
||||
const newPersonality = selectAlternativePersonality(personality);
|
||||
logSh(`🔄 Relance tentative ${attempt + 1} avec ${newPersonality.nom}`, 'INFO');
|
||||
return await enhanceWithPersonalityRecovery(content, newPersonality, attempt + 1);
|
||||
}
|
||||
throw new Error(`FATAL: Échec après 3 tentatives avec personnalités différentes`);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 📋 PRIORITÉ 3 - AUTRES AMÉLIORATIONS
|
||||
|
||||
### A. Monitoring des échecs IA
|
||||
- **Logging détaillé** : Quel LLM échoue, quand, pourquoi
|
||||
- **Métriques de fiabilité** : Taux de succès par LLM et par personnalité
|
||||
- **Alertes proactives** : Notification si un LLM devient indisponible
|
||||
|
||||
### B. Configuration dynamique
|
||||
- **Tests de connectivité** : Vérifier la disponibilité des LLM avant le workflow
|
||||
- **Sélection intelligente** : Éviter les LLM connus comme indisponibles
|
||||
- **Mise en cache** : Cache des réponses LLM pour éviter les redondances
|
||||
|
||||
### C. Rollback partiel
|
||||
- **Sauvegarde étapes** : Sauvegarder le résultat de chaque étape du workflow
|
||||
- **Reprise granulaire** : Reprendre à l'étape qui a échoué, pas depuis le début
|
||||
|
||||
---
|
||||
|
||||
## ⚡ IMPLÉMENTATION IMMÉDIATE
|
||||
|
||||
### Étape 1 : Modifier Google Sheets
|
||||
1. Ajouter colonnes dans sheet "Personnalités"
|
||||
2. Remplir les données pour les 15 personnalités existantes
|
||||
3. Tester la lecture des nouvelles colonnes
|
||||
|
||||
### Étape 2 : Adapter le code
|
||||
1. **BrainConfig.js** : Lire les nouveaux champs LLM des personnalités
|
||||
2. **SelectiveEnhancement.js** : Utiliser les LLM préférés par personnalité
|
||||
3. **LLMManager.js** : Ajouter logique de fallback par personnalité
|
||||
|
||||
### Étape 3 : Tests
|
||||
1. Tester avec LLM indisponible volontairement
|
||||
2. Vérifier la relance avec nouvelle personnalité
|
||||
3. Valider la qualité du contenu avec les fallbacks
|
||||
|
||||
---
|
||||
|
||||
## 🎯 RÉSULTAT ATTENDU
|
||||
|
||||
- **99% de disponibilité** : Le workflow ne s'arrête plus pour des problèmes IA temporaires
|
||||
- **Qualité préservée** : Chaque personnalité utilise son LLM optimal
|
||||
- **Récupération intelligente** : Échec d'une personnalité = essai avec une autre
|
||||
- **Zero perte de travail** : Reprise à l'étape d'échec, pas restart complet
|
||||
32
debug_instructions.js
Normal file
32
debug_instructions.js
Normal file
@ -0,0 +1,32 @@
|
||||
// Test debug pour voir l'extraction des instructions
|
||||
|
||||
const templateTest = `|Titre_Principal{{T0}}{Rédige un titre H1 accrocheur de maximum 10 mots pour {{MC0}}. Style {{personality.style}}}|`;
|
||||
|
||||
console.log('🔍 TEST EXTRACTION INSTRUCTIONS');
|
||||
console.log('Template test:', templateTest);
|
||||
|
||||
// Reproduction de la logique ElementExtraction.js
|
||||
const regex = /\|([^|]+)\|/g;
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(templateTest)) !== null) {
|
||||
const fullMatch = match[1]; // Tout entre les |pipes|
|
||||
console.log('FullMatch:', fullMatch);
|
||||
|
||||
// Extraction des composants (ligne 23-25 ElementExtraction.js)
|
||||
const nameMatch = fullMatch.match(/^([^{]+)/);
|
||||
const variablesMatch = fullMatch.match(/\{\{([^}]+)\}\}/g);
|
||||
const instructionsMatch = fullMatch.match(/\{([^}]+)\}/);
|
||||
|
||||
console.log('nameMatch:', nameMatch ? nameMatch[1] : null);
|
||||
console.log('variablesMatch:', variablesMatch);
|
||||
console.log('instructionsMatch:', instructionsMatch ? instructionsMatch[1] : null);
|
||||
|
||||
console.log('\n--- PROBLÈME IDENTIFIÉ ---');
|
||||
console.log('La regex instructionsMatch cherche {single} mais on a {{double}} ET {single}');
|
||||
console.log('Il faut une regex qui évite les {{double}} braces');
|
||||
|
||||
// Test regex corrigée
|
||||
const instructionsMatchFixed = fullMatch.match(/\{(?!\{)([^}]+)(?<!\})\}/);
|
||||
console.log('instructionsMatchFixed:', instructionsMatchFixed ? instructionsMatchFixed[1] : null);
|
||||
}
|
||||
54
launch_real.js
Normal file
54
launch_real.js
Normal file
@ -0,0 +1,54 @@
|
||||
// ========================================
|
||||
// SCRIPT: launch_real.js
|
||||
// LANCEMENT SANS TIMEOUT - PROCESSUS COMPLET
|
||||
// ========================================
|
||||
|
||||
const { processRealData } = require('./process_real');
|
||||
const { logSh } = require('./lib/ErrorReporting'); // Using unified logSh from ErrorReporting
|
||||
|
||||
async function launchWithoutTimeout() {
|
||||
logSh('🚀 === LANCEMENT PROCESSUS COMPLET LIGNE 2 ===', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('⏱️ SANS TIMEOUT - Laisse le temps au processus', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('📋 37 éléments à traiter avec 6 LLMs', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('⏳ Durée estimée: 3-5 minutes', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
try {
|
||||
const startTime = Date.now();
|
||||
|
||||
const result = await processRealData(2);
|
||||
|
||||
const duration = Math.round((Date.now() - startTime) / 1000);
|
||||
|
||||
logSh('', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('🎉 === PROCESSUS TERMINÉ ===', 'INFO'); // Using logSh instead of console.log
|
||||
logSh(`⏱️ Durée totale: ${duration}s`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh('📋 Résultat:', 'INFO'); // Using logSh instead of console.log
|
||||
logSh(` ✅ Success: ${result.success}`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(` 📊 Éléments: ${result.elementsGenerated}`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(` 📝 Mots: ${result.stats?.wordCount || 'N/A'}`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(` 🔍 Validation: ${result.validationReport?.status || 'N/A'}`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(` 💾 Article ID: ${result.articleStorage?.articleId || 'N/A'}`, 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
if (result.articleStorage?.articleId) {
|
||||
logSh('', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('🎯 ARTICLE SAUVÉGARDÉ DANS GOOGLE SHEETS !', 'INFO'); // Using logSh instead of console.log
|
||||
logSh(' Va voir ton Google Sheet pour l\'article généré', 'INFO'); // Using logSh instead of console.log
|
||||
}
|
||||
|
||||
logSh('', 'INFO'); // Using logSh instead of console.log
|
||||
logSh('📋 Logs détaillés: logs/seo-generator-2025-08-31.log', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
} catch (error) {
|
||||
console.error('');
|
||||
logSh('❌ ERREUR: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
logSh('📋 Voir logs pour détails', 'ERROR'); // Using logSh instead of console.error
|
||||
}
|
||||
}
|
||||
|
||||
// Lancer directement
|
||||
if (require.main === module) {
|
||||
launchWithoutTimeout();
|
||||
}
|
||||
|
||||
module.exports = { launchWithoutTimeout };
|
||||
709
lib/ArticleStorage.js
Normal file
709
lib/ArticleStorage.js
Normal file
@ -0,0 +1,709 @@
|
||||
// ========================================
|
||||
// FICHIER: ArticleStorage.js
|
||||
// Description: Système de sauvegarde articles avec texte compilé uniquement
|
||||
// ========================================
|
||||
|
||||
require('dotenv').config();
|
||||
const { google } = require('googleapis');
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
|
||||
// Configuration Google Sheets
|
||||
const SHEET_CONFIG = {
|
||||
sheetId: '1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c'
|
||||
};
|
||||
|
||||
/**
|
||||
* NOUVELLE FONCTION : Compiler le contenu de manière organique
|
||||
* Respecte la hiérarchie et les associations naturelles
|
||||
*/
|
||||
async function compileGeneratedTextsOrganic(generatedTexts, elements) {
|
||||
if (!generatedTexts || Object.keys(generatedTexts).length === 0) {
|
||||
return '';
|
||||
}
|
||||
|
||||
logSh(`🌱 Compilation ORGANIQUE de ${Object.keys(generatedTexts).length} éléments...`, 'DEBUG');
|
||||
|
||||
let compiledParts = [];
|
||||
|
||||
// 1. DÉTECTER et GROUPER les sections organiques
|
||||
const organicSections = buildOrganicSections(generatedTexts, elements);
|
||||
|
||||
// 2. COMPILER dans l'ordre naturel
|
||||
organicSections.forEach(section => {
|
||||
if (section.type === 'header_with_content') {
|
||||
// H1, H2, H3 avec leur contenu associé
|
||||
if (section.title) {
|
||||
compiledParts.push(cleanIndividualContent(section.title));
|
||||
}
|
||||
if (section.content) {
|
||||
compiledParts.push(cleanIndividualContent(section.content));
|
||||
}
|
||||
}
|
||||
else if (section.type === 'standalone_content') {
|
||||
// Contenu sans titre associé
|
||||
compiledParts.push(cleanIndividualContent(section.content));
|
||||
}
|
||||
else if (section.type === 'faq_pair') {
|
||||
// Paire question + réponse
|
||||
if (section.question && section.answer) {
|
||||
compiledParts.push(cleanIndividualContent(section.question));
|
||||
compiledParts.push(cleanIndividualContent(section.answer));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// 3. Joindre avec espacement naturel
|
||||
const finalText = compiledParts.join('\n\n');
|
||||
|
||||
logSh(`✅ Compilation organique terminée: ${finalText.length} caractères`, 'INFO');
|
||||
return finalText.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construire les sections organiques en analysant les associations
|
||||
*/
|
||||
function buildOrganicSections(generatedTexts, elements) {
|
||||
const sections = [];
|
||||
const usedTags = new Set();
|
||||
|
||||
// 1. ANALYSER l'ordre original des éléments
|
||||
const originalOrder = elements ? elements.map(el => el.originalTag) : Object.keys(generatedTexts);
|
||||
|
||||
logSh(`📋 Analyse de ${originalOrder.length} éléments dans l'ordre original...`, 'DEBUG');
|
||||
|
||||
// 2. DÉTECTER les associations naturelles
|
||||
for (let i = 0; i < originalOrder.length; i++) {
|
||||
const currentTag = originalOrder[i];
|
||||
const currentContent = generatedTexts[currentTag];
|
||||
|
||||
if (!currentContent || usedTags.has(currentTag)) continue;
|
||||
|
||||
const currentType = identifyElementType(currentTag);
|
||||
|
||||
if (currentType === 'titre_h1' || currentType === 'titre_h2' || currentType === 'titre_h3') {
|
||||
// CHERCHER le contenu associé qui suit
|
||||
const associatedContent = findAssociatedContent(originalOrder, i, generatedTexts, usedTags);
|
||||
|
||||
sections.push({
|
||||
type: 'header_with_content',
|
||||
title: currentContent,
|
||||
content: associatedContent.content,
|
||||
titleTag: currentTag,
|
||||
contentTag: associatedContent.tag
|
||||
});
|
||||
|
||||
usedTags.add(currentTag);
|
||||
if (associatedContent.tag) {
|
||||
usedTags.add(associatedContent.tag);
|
||||
}
|
||||
|
||||
logSh(` ✓ Section: ${currentType} + contenu associé`, 'DEBUG');
|
||||
}
|
||||
else if (currentType === 'faq_question') {
|
||||
// CHERCHER la réponse correspondante
|
||||
const matchingAnswer = findMatchingFAQAnswer(currentTag, generatedTexts);
|
||||
|
||||
if (matchingAnswer) {
|
||||
sections.push({
|
||||
type: 'faq_pair',
|
||||
question: currentContent,
|
||||
answer: matchingAnswer.content,
|
||||
questionTag: currentTag,
|
||||
answerTag: matchingAnswer.tag
|
||||
});
|
||||
|
||||
usedTags.add(currentTag);
|
||||
usedTags.add(matchingAnswer.tag);
|
||||
|
||||
logSh(` ✓ Paire FAQ: ${currentTag} + ${matchingAnswer.tag}`, 'DEBUG');
|
||||
}
|
||||
}
|
||||
else if (currentType !== 'faq_reponse') {
|
||||
// CONTENU STANDALONE (pas une réponse FAQ déjà traitée)
|
||||
sections.push({
|
||||
type: 'standalone_content',
|
||||
content: currentContent,
|
||||
contentTag: currentTag
|
||||
});
|
||||
|
||||
usedTags.add(currentTag);
|
||||
logSh(` ✓ Contenu standalone: ${currentType}`, 'DEBUG');
|
||||
}
|
||||
}
|
||||
|
||||
logSh(`🏗️ ${sections.length} sections organiques construites`, 'INFO');
|
||||
return sections;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trouver le contenu associé à un titre (paragraphe qui suit)
|
||||
*/
|
||||
function findAssociatedContent(originalOrder, titleIndex, generatedTexts, usedTags) {
|
||||
// Chercher dans les éléments suivants
|
||||
for (let j = titleIndex + 1; j < originalOrder.length; j++) {
|
||||
const nextTag = originalOrder[j];
|
||||
const nextContent = generatedTexts[nextTag];
|
||||
|
||||
if (!nextContent || usedTags.has(nextTag)) continue;
|
||||
|
||||
const nextType = identifyElementType(nextTag);
|
||||
|
||||
// Si on trouve un autre titre, on s'arrête
|
||||
if (nextType === 'titre_h1' || nextType === 'titre_h2' || nextType === 'titre_h3') {
|
||||
break;
|
||||
}
|
||||
|
||||
// Si on trouve du contenu (texte, intro), c'est probablement associé
|
||||
if (nextType === 'texte' || nextType === 'intro') {
|
||||
return {
|
||||
content: nextContent,
|
||||
tag: nextTag
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
return { content: null, tag: null };
|
||||
}
|
||||
|
||||
/**
|
||||
* Extraire le numéro d'une FAQ : |Faq_q_1| ou |Faq_a_2| → "1" ou "2"
|
||||
*/
|
||||
function extractFAQNumber(tag) {
|
||||
const match = tag.match(/(\d+)/);
|
||||
return match ? match[1] : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Trouver la réponse FAQ correspondant à une question
|
||||
*/
|
||||
function findMatchingFAQAnswer(questionTag, generatedTexts) {
|
||||
// Extraire le numéro : |Faq_q_1| → 1
|
||||
const questionNumber = extractFAQNumber(questionTag);
|
||||
|
||||
if (!questionNumber) return null;
|
||||
|
||||
// Chercher la réponse correspondante
|
||||
for (const tag in generatedTexts) {
|
||||
const tagType = identifyElementType(tag);
|
||||
|
||||
if (tagType === 'faq_reponse') {
|
||||
const answerNumber = extractFAQNumber(tag);
|
||||
|
||||
if (answerNumber === questionNumber) {
|
||||
return {
|
||||
content: generatedTexts[tag],
|
||||
tag: tag
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Nouvelle fonction de sauvegarde avec compilation organique
|
||||
*/
|
||||
async function saveGeneratedArticleOrganic(articleData, csvData, config = {}) {
|
||||
try {
|
||||
logSh('💾 Sauvegarde article avec compilation organique...', 'INFO');
|
||||
|
||||
const sheets = await getSheetsClient();
|
||||
|
||||
// Vérifier si la sheet existe, sinon la créer
|
||||
let articlesSheet = await getOrCreateSheet(sheets, 'Generated_Articles');
|
||||
|
||||
// ===== COMPILATION ORGANIQUE =====
|
||||
const compiledText = await compileGeneratedTextsOrganic(
|
||||
articleData.generatedTexts,
|
||||
articleData.originalElements // Passer les éléments originaux si disponibles
|
||||
);
|
||||
|
||||
logSh(`📝 Texte compilé organiquement: ${compiledText.length} caractères`, 'INFO');
|
||||
|
||||
// Métadonnées avec format français
|
||||
const now = new Date();
|
||||
const frenchTimestamp = formatDateToFrench(now);
|
||||
|
||||
// UTILISER le slug du CSV (colonne A du Google Sheet source)
|
||||
// Le slug doit venir de csvData.slug (récupéré via getBrainConfig)
|
||||
const slug = csvData.slug || generateSlugFromContent(csvData.mc0, csvData.t0);
|
||||
|
||||
const metadata = {
|
||||
timestamp: frenchTimestamp,
|
||||
slug: slug,
|
||||
mc0: csvData.mc0,
|
||||
t0: csvData.t0,
|
||||
personality: csvData.personality?.nom || 'Unknown',
|
||||
antiDetectionLevel: config.antiDetectionLevel || 'MVP',
|
||||
elementsCount: Object.keys(articleData.generatedTexts || {}).length,
|
||||
textLength: compiledText.length,
|
||||
wordCount: countWords(compiledText),
|
||||
llmUsed: config.llmUsed || 'openai',
|
||||
validationStatus: articleData.validationReport?.status || 'unknown'
|
||||
};
|
||||
|
||||
// Préparer la ligne de données
|
||||
const row = [
|
||||
metadata.timestamp,
|
||||
metadata.slug,
|
||||
metadata.mc0,
|
||||
metadata.t0,
|
||||
metadata.personality,
|
||||
metadata.antiDetectionLevel,
|
||||
compiledText, // ← TEXTE ORGANIQUE
|
||||
metadata.textLength,
|
||||
metadata.wordCount,
|
||||
metadata.elementsCount,
|
||||
metadata.llmUsed,
|
||||
metadata.validationStatus,
|
||||
'', '', '', '',
|
||||
JSON.stringify({
|
||||
csvData: csvData,
|
||||
config: config,
|
||||
stats: metadata
|
||||
})
|
||||
];
|
||||
|
||||
// DEBUG: Vérifier le slug généré
|
||||
logSh(`💾 Sauvegarde avec slug: "${metadata.slug}" (colonne B)`, 'DEBUG');
|
||||
|
||||
// Ajouter la ligne aux données
|
||||
await sheets.spreadsheets.values.append({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
range: 'Generated_Articles!A:Q',
|
||||
valueInputOption: 'USER_ENTERED',
|
||||
resource: {
|
||||
values: [row]
|
||||
}
|
||||
});
|
||||
|
||||
// Récupérer le numéro de ligne pour l'ID article
|
||||
const response = await sheets.spreadsheets.values.get({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
range: 'Generated_Articles!A:A'
|
||||
});
|
||||
|
||||
const articleId = response.data.values ? response.data.values.length - 1 : 1;
|
||||
|
||||
logSh(`✅ Article organique sauvé: ID ${articleId}, ${metadata.wordCount} mots`, 'INFO');
|
||||
|
||||
return {
|
||||
articleId: articleId,
|
||||
textLength: metadata.textLength,
|
||||
wordCount: metadata.wordCount,
|
||||
sheetRow: response.data.values ? response.data.values.length : 2
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur sauvegarde organique: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Générer un slug à partir du contenu MC0 et T0
|
||||
*/
|
||||
function generateSlugFromContent(mc0, t0) {
|
||||
if (!mc0 && !t0) return 'article-generated';
|
||||
|
||||
const source = mc0 || t0;
|
||||
return source
|
||||
.toString()
|
||||
.toLowerCase()
|
||||
.replace(/[àáâäã]/g, 'a')
|
||||
.replace(/[èéêë]/g, 'e')
|
||||
.replace(/[ìíîï]/g, 'i')
|
||||
.replace(/[òóôöõ]/g, 'o')
|
||||
.replace(/[ùúûü]/g, 'u')
|
||||
.replace(/[ç]/g, 'c')
|
||||
.replace(/[ñ]/g, 'n')
|
||||
.replace(/[^a-z0-9\s-]/g, '') // Enlever caractères spéciaux
|
||||
.replace(/\s+/g, '-') // Espaces -> tirets
|
||||
.replace(/-+/g, '-') // Éviter doubles tirets
|
||||
.replace(/^-+|-+$/g, '') // Enlever tirets début/fin
|
||||
.substring(0, 50); // Limiter longueur
|
||||
}
|
||||
|
||||
/**
|
||||
* Identifier le type d'élément par son tag
|
||||
*/
|
||||
function identifyElementType(tag) {
|
||||
const cleanTag = tag.toLowerCase().replace(/[|{}]/g, '');
|
||||
|
||||
if (cleanTag.includes('titre_h1') || cleanTag.includes('h1')) return 'titre_h1';
|
||||
if (cleanTag.includes('titre_h2') || cleanTag.includes('h2')) return 'titre_h2';
|
||||
if (cleanTag.includes('titre_h3') || cleanTag.includes('h3')) return 'titre_h3';
|
||||
if (cleanTag.includes('intro')) return 'intro';
|
||||
if (cleanTag.includes('faq_q') || cleanTag.includes('faq_question')) return 'faq_question';
|
||||
if (cleanTag.includes('faq_a') || cleanTag.includes('faq_reponse')) return 'faq_reponse';
|
||||
|
||||
return 'texte'; // Par défaut
|
||||
}
|
||||
|
||||
/**
|
||||
* Nettoyer un contenu individuel
|
||||
*/
|
||||
function cleanIndividualContent(content) {
|
||||
if (!content) return '';
|
||||
|
||||
let cleaned = content.toString();
|
||||
|
||||
// 1. Supprimer les balises HTML
|
||||
cleaned = cleaned.replace(/<[^>]*>/g, '');
|
||||
|
||||
// 2. Décoder les entités HTML
|
||||
cleaned = cleaned.replace(/</g, '<');
|
||||
cleaned = cleaned.replace(/>/g, '>');
|
||||
cleaned = cleaned.replace(/&/g, '&');
|
||||
cleaned = cleaned.replace(/"/g, '"');
|
||||
cleaned = cleaned.replace(/'/g, "'");
|
||||
cleaned = cleaned.replace(/ /g, ' ');
|
||||
|
||||
// 3. Nettoyer les espaces
|
||||
cleaned = cleaned.replace(/\s+/g, ' ');
|
||||
cleaned = cleaned.replace(/\n\s+/g, '\n');
|
||||
|
||||
// 4. Supprimer les caractères de contrôle étranges
|
||||
cleaned = cleaned.replace(/[\x00-\x1F\x7F-\x9F]/g, '');
|
||||
|
||||
return cleaned.trim();
|
||||
}
|
||||
|
||||
/**
|
||||
* Créer la sheet de stockage avec headers appropriés
|
||||
*/
|
||||
async function createArticlesStorageSheet(sheets) {
|
||||
logSh('🗄️ Création sheet Generated_Articles...', 'INFO');
|
||||
|
||||
try {
|
||||
// Créer la nouvelle sheet
|
||||
await sheets.spreadsheets.batchUpdate({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
resource: {
|
||||
requests: [{
|
||||
addSheet: {
|
||||
properties: {
|
||||
title: 'Generated_Articles'
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
});
|
||||
|
||||
// Headers
|
||||
const headers = [
|
||||
'Timestamp',
|
||||
'Slug',
|
||||
'MC0',
|
||||
'T0',
|
||||
'Personality',
|
||||
'AntiDetection_Level',
|
||||
'Compiled_Text', // ← COLONNE PRINCIPALE
|
||||
'Text_Length',
|
||||
'Word_Count',
|
||||
'Elements_Count',
|
||||
'LLM_Used',
|
||||
'Validation_Status',
|
||||
'GPTZero_Score', // Scores détecteurs (à remplir)
|
||||
'Originality_Score',
|
||||
'CopyLeaks_Score',
|
||||
'Human_Quality_Score',
|
||||
'Full_Metadata_JSON' // Backup complet
|
||||
];
|
||||
|
||||
// Ajouter les headers
|
||||
await sheets.spreadsheets.values.update({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
range: 'Generated_Articles!A1:Q1',
|
||||
valueInputOption: 'USER_ENTERED',
|
||||
resource: {
|
||||
values: [headers]
|
||||
}
|
||||
});
|
||||
|
||||
// Formatter les headers
|
||||
await sheets.spreadsheets.batchUpdate({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
resource: {
|
||||
requests: [{
|
||||
repeatCell: {
|
||||
range: {
|
||||
sheetId: await getSheetIdByName(sheets, 'Generated_Articles'),
|
||||
startRowIndex: 0,
|
||||
endRowIndex: 1,
|
||||
startColumnIndex: 0,
|
||||
endColumnIndex: headers.length
|
||||
},
|
||||
cell: {
|
||||
userEnteredFormat: {
|
||||
textFormat: {
|
||||
bold: true
|
||||
},
|
||||
backgroundColor: {
|
||||
red: 0.878,
|
||||
green: 0.878,
|
||||
blue: 0.878
|
||||
},
|
||||
horizontalAlignment: 'CENTER'
|
||||
}
|
||||
},
|
||||
fields: 'userEnteredFormat(textFormat,backgroundColor,horizontalAlignment)'
|
||||
}
|
||||
}]
|
||||
}
|
||||
});
|
||||
|
||||
logSh('✅ Sheet Generated_Articles créée avec succès', 'INFO');
|
||||
return true;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur création sheet: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Formater date au format français DD/MM/YYYY HH:mm:ss
|
||||
*/
|
||||
function formatDateToFrench(date) {
|
||||
// Utiliser toLocaleString avec le format français
|
||||
return date.toLocaleString('fr-FR', {
|
||||
day: '2-digit',
|
||||
month: '2-digit',
|
||||
year: 'numeric',
|
||||
hour: '2-digit',
|
||||
minute: '2-digit',
|
||||
second: '2-digit',
|
||||
hour12: false,
|
||||
timeZone: 'Europe/Paris'
|
||||
}).replace(',', '');
|
||||
}
|
||||
|
||||
/**
|
||||
* Compter les mots dans un texte
|
||||
*/
|
||||
function countWords(text) {
|
||||
if (!text || text.trim() === '') return 0;
|
||||
return text.trim().split(/\s+/).length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Récupérer un article sauvé par ID
|
||||
*/
|
||||
async function getStoredArticle(articleId) {
|
||||
try {
|
||||
const sheets = await getSheetsClient();
|
||||
|
||||
const rowNumber = articleId + 2; // +2 car header + 0-indexing
|
||||
const response = await sheets.spreadsheets.values.get({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
range: `Generated_Articles!A${rowNumber}:Q${rowNumber}`
|
||||
});
|
||||
|
||||
if (!response.data.values || response.data.values.length === 0) {
|
||||
throw new Error(`Article ${articleId} non trouvé`);
|
||||
}
|
||||
|
||||
const data = response.data.values[0];
|
||||
|
||||
return {
|
||||
articleId: articleId,
|
||||
timestamp: data[0],
|
||||
slug: data[1],
|
||||
mc0: data[2],
|
||||
t0: data[3],
|
||||
personality: data[4],
|
||||
antiDetectionLevel: data[5],
|
||||
compiledText: data[6], // ← TEXTE PUR
|
||||
textLength: data[7],
|
||||
wordCount: data[8],
|
||||
elementsCount: data[9],
|
||||
llmUsed: data[10],
|
||||
validationStatus: data[11],
|
||||
gptZeroScore: data[12],
|
||||
originalityScore: data[13],
|
||||
copyLeaksScore: data[14],
|
||||
humanScore: data[15],
|
||||
fullMetadata: data[16] ? JSON.parse(data[16]) : null
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur récupération article ${articleId}: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lister les derniers articles générés
|
||||
*/
|
||||
async function getRecentArticles(limit = 10) {
|
||||
try {
|
||||
const sheets = await getSheetsClient();
|
||||
|
||||
const response = await sheets.spreadsheets.values.get({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
range: 'Generated_Articles!A:L'
|
||||
});
|
||||
|
||||
if (!response.data.values || response.data.values.length <= 1) {
|
||||
return []; // Pas de données ou seulement headers
|
||||
}
|
||||
|
||||
const data = response.data.values.slice(1); // Exclure headers
|
||||
const startIndex = Math.max(0, data.length - limit);
|
||||
const recentData = data.slice(startIndex);
|
||||
|
||||
return recentData.map((row, index) => ({
|
||||
articleId: startIndex + index,
|
||||
timestamp: row[0],
|
||||
slug: row[1],
|
||||
mc0: row[2],
|
||||
personality: row[4],
|
||||
antiDetectionLevel: row[5],
|
||||
wordCount: row[8],
|
||||
validationStatus: row[11]
|
||||
})).reverse(); // Plus récents en premier
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur liste articles récents: ${error.toString()}`, 'ERROR');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Mettre à jour les scores de détection d'un article
|
||||
*/
|
||||
async function updateDetectionScores(articleId, scores) {
|
||||
try {
|
||||
const sheets = await getSheetsClient();
|
||||
const rowNumber = articleId + 2;
|
||||
|
||||
const updates = [];
|
||||
|
||||
// Colonnes des scores : M, N, O (GPTZero, Originality, CopyLeaks)
|
||||
if (scores.gptzero !== undefined) {
|
||||
updates.push({
|
||||
range: `Generated_Articles!M${rowNumber}`,
|
||||
values: [[scores.gptzero]]
|
||||
});
|
||||
}
|
||||
if (scores.originality !== undefined) {
|
||||
updates.push({
|
||||
range: `Generated_Articles!N${rowNumber}`,
|
||||
values: [[scores.originality]]
|
||||
});
|
||||
}
|
||||
if (scores.copyleaks !== undefined) {
|
||||
updates.push({
|
||||
range: `Generated_Articles!O${rowNumber}`,
|
||||
values: [[scores.copyleaks]]
|
||||
});
|
||||
}
|
||||
|
||||
if (updates.length > 0) {
|
||||
await sheets.spreadsheets.values.batchUpdate({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId,
|
||||
resource: {
|
||||
valueInputOption: 'USER_ENTERED',
|
||||
data: updates
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
logSh(`✅ Scores détection mis à jour pour article ${articleId}`, 'INFO');
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur maj scores article ${articleId}: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// ============= HELPERS GOOGLE SHEETS =============
|
||||
|
||||
/**
|
||||
* Obtenir le client Google Sheets authentifié
|
||||
*/
|
||||
async function getSheetsClient() {
|
||||
const auth = new google.auth.GoogleAuth({
|
||||
credentials: {
|
||||
client_email: process.env.GOOGLE_SERVICE_ACCOUNT_EMAIL,
|
||||
private_key: process.env.GOOGLE_PRIVATE_KEY?.replace(/\\n/g, '\n')
|
||||
},
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
|
||||
const authClient = await auth.getClient();
|
||||
const sheets = google.sheets({ version: 'v4', auth: authClient });
|
||||
|
||||
return sheets;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtenir ou créer une sheet
|
||||
*/
|
||||
async function getOrCreateSheet(sheets, sheetName) {
|
||||
try {
|
||||
// Vérifier si la sheet existe
|
||||
const response = await sheets.spreadsheets.get({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId
|
||||
});
|
||||
|
||||
const existingSheet = response.data.sheets.find(
|
||||
sheet => sheet.properties.title === sheetName
|
||||
);
|
||||
|
||||
if (existingSheet) {
|
||||
return existingSheet;
|
||||
} else {
|
||||
// Créer la sheet si elle n'existe pas
|
||||
if (sheetName === 'Generated_Articles') {
|
||||
await createArticlesStorageSheet(sheets);
|
||||
return await getOrCreateSheet(sheets, sheetName); // Récursif pour récupérer la sheet créée
|
||||
}
|
||||
throw new Error(`Sheet ${sheetName} non supportée pour création automatique`);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur accès/création sheet ${sheetName}: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtenir l'ID d'une sheet par son nom
|
||||
*/
|
||||
async function getSheetIdByName(sheets, sheetName) {
|
||||
const response = await sheets.spreadsheets.get({
|
||||
spreadsheetId: SHEET_CONFIG.sheetId
|
||||
});
|
||||
|
||||
const sheet = response.data.sheets.find(
|
||||
s => s.properties.title === sheetName
|
||||
);
|
||||
|
||||
return sheet ? sheet.properties.sheetId : null;
|
||||
}
|
||||
|
||||
// ============= EXPORTS =============
|
||||
|
||||
module.exports = {
|
||||
compileGeneratedTextsOrganic,
|
||||
buildOrganicSections,
|
||||
findAssociatedContent,
|
||||
extractFAQNumber,
|
||||
findMatchingFAQAnswer,
|
||||
saveGeneratedArticleOrganic,
|
||||
identifyElementType,
|
||||
cleanIndividualContent,
|
||||
createArticlesStorageSheet,
|
||||
formatDateToFrench,
|
||||
countWords,
|
||||
getStoredArticle,
|
||||
getRecentArticles,
|
||||
updateDetectionScores,
|
||||
getSheetsClient,
|
||||
getOrCreateSheet,
|
||||
getSheetIdByName,
|
||||
generateSlugFromContent
|
||||
};
|
||||
543
lib/BrainConfig.js
Normal file
543
lib/BrainConfig.js
Normal file
@ -0,0 +1,543 @@
|
||||
// ========================================
|
||||
// FICHIER: BrainConfig.js - Version Node.js
|
||||
// Description: Configuration cerveau + sélection personnalité IA
|
||||
// ========================================
|
||||
|
||||
require('dotenv').config();
|
||||
const axios = require('axios');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
|
||||
// Import de la fonction logSh (assumant qu'elle existe dans votre projet Node.js)
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
|
||||
// Configuration
|
||||
const CONFIG = {
|
||||
openai: {
|
||||
apiKey: process.env.OPENAI_API_KEY || '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',
|
||||
endpoint: 'https://api.openai.com/v1/chat/completions'
|
||||
},
|
||||
dataSource: {
|
||||
type: process.env.DATA_SOURCE_TYPE || 'json', // 'json', 'csv', 'database'
|
||||
instructionsPath: './data/instructions.json',
|
||||
personalitiesPath: './data/personalities.json'
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* FONCTION PRINCIPALE - Équivalent getBrainConfig()
|
||||
* @param {number|object} data - Numéro de ligne ou données directes
|
||||
* @returns {object} Configuration avec données CSV + personnalité
|
||||
*/
|
||||
async function getBrainConfig(data) {
|
||||
try {
|
||||
logSh("🧠 Début getBrainConfig Node.js", "INFO");
|
||||
|
||||
// 1. RÉCUPÉRER LES DONNÉES CSV
|
||||
let csvData;
|
||||
if (typeof data === 'number') {
|
||||
// Numéro de ligne fourni - lire depuis fichier
|
||||
csvData = await readInstructionsData(data);
|
||||
} else if (typeof data === 'object' && data.rowNumber) {
|
||||
csvData = await readInstructionsData(data.rowNumber);
|
||||
} else {
|
||||
// Données déjà fournies
|
||||
csvData = data;
|
||||
}
|
||||
|
||||
logSh(`✅ CSV récupéré: ${csvData.mc0}`, "INFO");
|
||||
|
||||
// 2. RÉCUPÉRER LES PERSONNALITÉS
|
||||
const personalities = await getPersonalities();
|
||||
logSh(`✅ ${personalities.length} personnalités chargées`, "INFO");
|
||||
|
||||
// 3. SÉLECTIONNER LA MEILLEURE PERSONNALITÉ VIA IA
|
||||
const selectedPersonality = await selectPersonalityWithAI(
|
||||
csvData.mc0,
|
||||
csvData.t0,
|
||||
personalities
|
||||
);
|
||||
|
||||
logSh(`✅ Personnalité sélectionnée: ${selectedPersonality.nom}`, "INFO");
|
||||
|
||||
return {
|
||||
success: true,
|
||||
data: {
|
||||
...csvData,
|
||||
personality: selectedPersonality,
|
||||
timestamp: new Date().toISOString()
|
||||
}
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur getBrainConfig: ${error.message}`, "ERROR");
|
||||
return {
|
||||
success: false,
|
||||
error: error.message
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* LIRE DONNÉES INSTRUCTIONS depuis Google Sheets DIRECTEMENT
|
||||
* @param {number} rowNumber - Numéro de ligne (2 = première ligne de données)
|
||||
* @returns {object} Données CSV parsées
|
||||
*/
|
||||
async function readInstructionsData(rowNumber = 2) {
|
||||
try {
|
||||
logSh(`📊 Lecture Google Sheet ligne ${rowNumber}...`, 'INFO');
|
||||
|
||||
// NOUVEAU : Lecture directe depuis Google Sheets
|
||||
const { google } = require('googleapis');
|
||||
|
||||
// Configuration auth Google Sheets - FORCE utilisation fichier JSON pour éviter problème TLS
|
||||
const keyFilePath = path.join(__dirname, '..', 'seo-generator-470715-85d4a971c1af.json');
|
||||
const auth = new google.auth.GoogleAuth({
|
||||
keyFile: keyFilePath,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets.readonly']
|
||||
});
|
||||
logSh('🔑 Utilisation fichier JSON pour contourner problème TLS OAuth', 'INFO');
|
||||
|
||||
const sheets = google.sheets({ version: 'v4', auth });
|
||||
const SHEET_ID = process.env.GOOGLE_SHEETS_ID || '1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c';
|
||||
|
||||
// Récupérer la ligne spécifique (A à I au minimum)
|
||||
const response = await sheets.spreadsheets.values.get({
|
||||
spreadsheetId: SHEET_ID,
|
||||
range: `Instructions!A${rowNumber}:I${rowNumber}` // Ligne spécifique A-I
|
||||
});
|
||||
|
||||
if (!response.data.values || response.data.values.length === 0) {
|
||||
throw new Error(`Ligne ${rowNumber} non trouvée dans Google Sheet`);
|
||||
}
|
||||
|
||||
const row = response.data.values[0];
|
||||
logSh(`✅ Ligne ${rowNumber} récupérée: ${row.length} colonnes`, 'INFO');
|
||||
|
||||
const xmlTemplateValue = row[8] || '';
|
||||
let xmlTemplate = xmlTemplateValue;
|
||||
let xmlFileName = null;
|
||||
|
||||
// Si c'est un nom de fichier, garder le nom ET utiliser un template par défaut
|
||||
if (xmlTemplateValue && xmlTemplateValue.endsWith('.xml') && xmlTemplateValue.length < 100) {
|
||||
logSh(`🔧 XML filename detected (${xmlTemplateValue}), keeping filename for Digital Ocean`, 'INFO');
|
||||
xmlFileName = xmlTemplateValue; // Garder le nom du fichier pour Digital Ocean
|
||||
xmlTemplate = createDefaultXMLTemplate(); // Template par défaut pour le processing
|
||||
}
|
||||
|
||||
return {
|
||||
rowNumber: rowNumber,
|
||||
slug: row[0] || '', // Colonne A
|
||||
t0: row[1] || '', // Colonne B
|
||||
mc0: row[2] || '', // Colonne C
|
||||
tMinus1: row[3] || '', // Colonne D
|
||||
lMinus1: row[4] || '', // Colonne E
|
||||
mcPlus1: row[5] || '', // Colonne F
|
||||
tPlus1: row[6] || '', // Colonne G
|
||||
lPlus1: row[7] || '', // Colonne H
|
||||
xmlTemplate: xmlTemplate, // XML template pour processing
|
||||
xmlFileName: xmlFileName // Nom fichier pour Digital Ocean (si applicable)
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur lecture Google Sheet: ${error.message}`, "ERROR");
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* RÉCUPÉRER PERSONNALITÉS depuis l'onglet "Personnalites" du Google Sheet
|
||||
* @returns {Array} Liste des personnalités disponibles
|
||||
*/
|
||||
async function getPersonalities() {
|
||||
try {
|
||||
logSh('📊 Lecture personnalités depuis Google Sheet (onglet Personnalites)...', 'INFO');
|
||||
|
||||
// Configuration auth Google Sheets - FORCE utilisation fichier JSON pour éviter problème TLS
|
||||
const { google } = require('googleapis');
|
||||
const keyFilePath = path.join(__dirname, '..', 'seo-generator-470715-85d4a971c1af.json');
|
||||
const auth = new google.auth.GoogleAuth({
|
||||
keyFile: keyFilePath,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets.readonly']
|
||||
});
|
||||
logSh('🔑 Utilisation fichier JSON pour contourner problème TLS OAuth (personnalités)', 'INFO');
|
||||
|
||||
const sheets = google.sheets({ version: 'v4', auth });
|
||||
const SHEET_ID = process.env.GOOGLE_SHEETS_ID || '1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c';
|
||||
|
||||
// Récupérer toutes les personnalités (après la ligne d'en-tête)
|
||||
const response = await sheets.spreadsheets.values.get({
|
||||
spreadsheetId: SHEET_ID,
|
||||
range: 'Personnalites!A2:O' // Colonnes A à O pour inclure les nouvelles colonnes IA
|
||||
});
|
||||
|
||||
if (!response.data.values || response.data.values.length === 0) {
|
||||
throw new Error('Aucune personnalité trouvée dans l\'onglet Personnalites');
|
||||
}
|
||||
|
||||
const personalities = [];
|
||||
|
||||
// Traiter chaque ligne de personnalité
|
||||
response.data.values.forEach((row, index) => {
|
||||
if (row[0] && row[0].toString().trim() !== '') { // Si nom existe (colonne A)
|
||||
const personality = {
|
||||
nom: row[0]?.toString().trim() || '',
|
||||
description: row[1]?.toString().trim() || 'Expert généraliste',
|
||||
style: row[2]?.toString().trim() || 'professionnel',
|
||||
|
||||
// Configuration avancée depuis colonnes Google Sheet
|
||||
motsClesSecteurs: parseCSVField(row[3]),
|
||||
vocabulairePref: parseCSVField(row[4]),
|
||||
connecteursPref: parseCSVField(row[5]),
|
||||
erreursTypiques: parseCSVField(row[6]),
|
||||
longueurPhrases: row[7]?.toString().trim() || 'moyennes',
|
||||
niveauTechnique: row[8]?.toString().trim() || 'moyen',
|
||||
ctaStyle: parseCSVField(row[9]),
|
||||
defautsSimules: parseCSVField(row[10]),
|
||||
|
||||
// NOUVEAU: Configuration IA par étape depuis Google Sheets (colonnes L-O)
|
||||
aiEtape1Base: row[11]?.toString().trim().toLowerCase() || '',
|
||||
aiEtape2Technique: row[12]?.toString().trim().toLowerCase() || '',
|
||||
aiEtape3Transitions: row[13]?.toString().trim().toLowerCase() || '',
|
||||
aiEtape4Style: row[14]?.toString().trim().toLowerCase() || '',
|
||||
|
||||
// Backward compatibility
|
||||
motsCles: parseCSVField(row[3] || '') // Utilise motsClesSecteurs
|
||||
};
|
||||
|
||||
personalities.push(personality);
|
||||
logSh(`✓ Personnalité chargée: ${personality.nom} (${personality.style})`, 'DEBUG');
|
||||
}
|
||||
});
|
||||
|
||||
logSh(`📊 ${personalities.length} personnalités chargées depuis Google Sheet`, "INFO");
|
||||
|
||||
return personalities;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ ÉCHEC: Impossible de récupérer les personnalités Google Sheets - ${error.message}`, "ERROR");
|
||||
throw new Error(`FATAL: Personnalités Google Sheets inaccessibles - arrêt du workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* PARSER CHAMP CSV - Helper function
|
||||
* @param {string} field - Champ à parser
|
||||
* @returns {Array} Liste des éléments parsés
|
||||
*/
|
||||
function parseCSVField(field) {
|
||||
if (!field || field.toString().trim() === '') return [];
|
||||
|
||||
return field.toString()
|
||||
.split(',')
|
||||
.map(item => item.trim())
|
||||
.filter(item => item.length > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sélectionner un sous-ensemble aléatoire de personnalités
|
||||
* @param {Array} allPersonalities - Liste complète des personnalités
|
||||
* @param {number} percentage - Pourcentage à garder (0.6 = 60%)
|
||||
* @returns {Array} Sous-ensemble aléatoire
|
||||
*/
|
||||
function selectRandomPersonalities(allPersonalities, percentage = 0.6) {
|
||||
const count = Math.ceil(allPersonalities.length * percentage);
|
||||
|
||||
// Mélanger avec Fisher-Yates shuffle (meilleur que sort())
|
||||
const shuffled = [...allPersonalities];
|
||||
for (let i = shuffled.length - 1; i > 0; i--) {
|
||||
const j = Math.floor(Math.random() * (i + 1));
|
||||
[shuffled[i], shuffled[j]] = [shuffled[j], shuffled[i]];
|
||||
}
|
||||
|
||||
return shuffled.slice(0, count);
|
||||
}
|
||||
|
||||
/**
|
||||
* NOUVELLE FONCTION: Sélection de 4 personnalités complémentaires pour le pipeline multi-AI
|
||||
* @param {string} mc0 - Mot-clé principal
|
||||
* @param {string} t0 - Titre principal
|
||||
* @param {Array} personalities - Liste des personnalités
|
||||
* @returns {Array} 4 personnalités sélectionnées pour chaque étape
|
||||
*/
|
||||
async function selectMultiplePersonalitiesWithAI(mc0, t0, personalities) {
|
||||
try {
|
||||
logSh(`🎭 Sélection MULTI-personnalités IA pour: ${mc0}`, "INFO");
|
||||
|
||||
// Sélection aléatoire de 80% des personnalités (plus large pour 4 choix)
|
||||
const randomPersonalities = selectRandomPersonalities(personalities, 0.8);
|
||||
const totalCount = personalities.length;
|
||||
const selectedCount = randomPersonalities.length;
|
||||
|
||||
logSh(`🎲 Pool aléatoire: ${selectedCount}/${totalCount} personnalités disponibles`, "DEBUG");
|
||||
logSh(`📋 Personnalités dans le pool: ${randomPersonalities.map(p => p.nom).join(', ')}`, "DEBUG");
|
||||
|
||||
const prompt = `Choisis 4 personnalités COMPLÉMENTAIRES pour générer du contenu sur "${mc0}":
|
||||
|
||||
OBJECTIF: Créer une équipe de 4 rédacteurs avec styles différents mais cohérents
|
||||
|
||||
PERSONNALITÉS DISPONIBLES:
|
||||
${randomPersonalities.map(p => `- ${p.nom}: ${p.description} (Style: ${p.style})`).join('\n')}
|
||||
|
||||
RÔLES À ATTRIBUER:
|
||||
1. GÉNÉRATEUR BASE: Personnalité technique/experte pour la génération initiale
|
||||
2. ENHANCER TECHNIQUE: Personnalité commerciale/précise pour améliorer les termes techniques
|
||||
3. FLUIDITÉ: Personnalité créative/littéraire pour améliorer les transitions
|
||||
4. STYLE FINAL: Personnalité terrain/accessible pour le style final
|
||||
|
||||
CRITÈRES:
|
||||
- 4 personnalités aux styles DIFFÉRENTS mais complémentaires
|
||||
- Adapté au secteur: ${mc0}
|
||||
- Variabilité maximale pour anti-détection
|
||||
- Éviter les doublons de style
|
||||
|
||||
FORMAT DE RÉPONSE (EXACTEMENT 4 noms séparés par des virgules):
|
||||
Nom1, Nom2, Nom3, Nom4`;
|
||||
|
||||
const requestData = {
|
||||
model: "gpt-4o-mini",
|
||||
messages: [{"role": "user", "content": prompt}],
|
||||
max_tokens: 100,
|
||||
temperature: 1.0
|
||||
};
|
||||
|
||||
const response = await axios.post(CONFIG.openai.endpoint, requestData, {
|
||||
headers: {
|
||||
'Authorization': `Bearer ${CONFIG.openai.apiKey}`,
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
timeout: 300000
|
||||
});
|
||||
|
||||
const selectedNames = response.data.choices[0].message.content.trim()
|
||||
.split(',')
|
||||
.map(name => name.trim());
|
||||
|
||||
logSh(`🔍 Noms retournés par IA: ${selectedNames.join(', ')}`, "DEBUG");
|
||||
|
||||
// Mapper aux vraies personnalités
|
||||
const selectedPersonalities = [];
|
||||
selectedNames.forEach(name => {
|
||||
const personality = randomPersonalities.find(p => p.nom === name);
|
||||
if (personality) {
|
||||
selectedPersonalities.push(personality);
|
||||
}
|
||||
});
|
||||
|
||||
// Compléter si pas assez de personnalités trouvées (sécurité)
|
||||
while (selectedPersonalities.length < 4 && randomPersonalities.length > selectedPersonalities.length) {
|
||||
const remaining = randomPersonalities.filter(p =>
|
||||
!selectedPersonalities.some(selected => selected.nom === p.nom)
|
||||
);
|
||||
if (remaining.length > 0) {
|
||||
const randomIndex = Math.floor(Math.random() * remaining.length);
|
||||
selectedPersonalities.push(remaining[randomIndex]);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Garantir exactement 4 personnalités
|
||||
const final4Personalities = selectedPersonalities.slice(0, 4);
|
||||
|
||||
logSh(`✅ Équipe de 4 personnalités sélectionnée:`, "INFO");
|
||||
final4Personalities.forEach((p, index) => {
|
||||
const roles = ['BASE', 'TECHNIQUE', 'FLUIDITÉ', 'STYLE'];
|
||||
logSh(` ${index + 1}. ${roles[index]}: ${p.nom} (${p.style})`, "INFO");
|
||||
});
|
||||
|
||||
return final4Personalities;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ FATAL: Sélection multi-personnalités échouée: ${error.message}`, "ERROR");
|
||||
throw new Error(`FATAL: Sélection multi-personnalités IA impossible - arrêt du workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* FONCTION LEGACY: Sélection personnalité unique (maintenue pour compatibilité)
|
||||
* @param {string} mc0 - Mot-clé principal
|
||||
* @param {string} t0 - Titre principal
|
||||
* @param {Array} personalities - Liste des personnalités
|
||||
* @returns {object} Personnalité sélectionnée
|
||||
*/
|
||||
async function selectPersonalityWithAI(mc0, t0, personalities) {
|
||||
try {
|
||||
logSh(`🤖 Sélection personnalité IA UNIQUE pour: ${mc0}`, "DEBUG");
|
||||
|
||||
// Appeler la fonction multi et prendre seulement la première
|
||||
const multiPersonalities = await selectMultiplePersonalitiesWithAI(mc0, t0, personalities);
|
||||
const selectedPersonality = multiPersonalities[0];
|
||||
|
||||
logSh(`✅ Personnalité IA sélectionnée (mode legacy): ${selectedPersonality.nom}`, "INFO");
|
||||
|
||||
return selectedPersonality;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ FATAL: Sélection personnalité par IA échouée: ${error.message}`, "ERROR");
|
||||
throw new Error(`FATAL: Sélection personnalité IA inaccessible - arrêt du workflow: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* CRÉER TEMPLATE XML PAR DÉFAUT quand colonne I contient un nom de fichier
|
||||
* Utilise les données CSV disponibles pour créer un template robuste
|
||||
*/
|
||||
function createDefaultXMLTemplate() {
|
||||
return `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<article>
|
||||
<header>
|
||||
<h1>|Titre_Principal{{T0}}{Rédige un titre H1 accrocheur de maximum 10 mots pour {{MC0}}. Style {{personality.style}}}|</h1>
|
||||
<intro>|Introduction{{MC0}}{Rédige une introduction engageante de 2-3 phrases sur {{MC0}}. Ton {{personality.style}}, utilise {{personality.vocabulairePref}}}|</intro>
|
||||
</header>
|
||||
|
||||
<main>
|
||||
<section class="primary">
|
||||
<h2>|Titre_H2_1{{MC+1_1}}{Crée un titre H2 informatif sur {{MC+1_1}}. Style {{personality.style}}}|</h2>
|
||||
<p>|Paragraphe_1{{MC+1_1}}{Rédige un paragraphe détaillé de 4-5 phrases sur {{MC+1_1}}. Explique les avantages et caractéristiques. Ton {{personality.style}}}|</p>
|
||||
</section>
|
||||
|
||||
<section class="secondary">
|
||||
<h2>|Titre_H2_2{{MC+1_2}}{Titre H2 pour {{MC+1_2}}. Mets en valeur les points forts. Ton {{personality.style}}}|</h2>
|
||||
<p>|Paragraphe_2{{MC+1_2}}{Paragraphe de 4-5 phrases sur {{MC+1_2}}. Détaille pourquoi c'est important pour {{MC0}}. Ton {{personality.style}}}|</p>
|
||||
</section>
|
||||
|
||||
<section class="benefits">
|
||||
<h2>|Titre_H2_3{{MC+1_3}}{Titre H2 sur les bénéfices de {{MC+1_3}}. Accrocheur et informatif}|</h2>
|
||||
<p>|Paragraphe_3{{MC+1_3}}{Explique en 4-5 phrases les avantages de {{MC+1_3}} pour {{MC0}}. Ton {{personality.style}}}|</p>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<aside class="faq">
|
||||
<h2>|FAQ_Titre{Titre de section FAQ accrocheur sur {{MC0}}}|</h2>
|
||||
|
||||
<div class="faq-item">
|
||||
<h3>|Faq_q_1{{MC+1_1}}{Question fréquente sur {{MC+1_1}} et {{MC0}}}|</h3>
|
||||
<p>|Faq_a_1{{MC+1_1}}{Réponse claire et précise. 2-3 phrases. Ton {{personality.style}}}|</p>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<h3>|Faq_q_2{{MC+1_2}}{Question pratique sur {{MC+1_2}} en lien avec {{MC0}}}|</h3>
|
||||
<p>|Faq_a_2{{MC+1_2}}{Réponse détaillée et utile. 2-3 phrases explicatives. Ton {{personality.style}}}|</p>
|
||||
</div>
|
||||
|
||||
<div class="faq-item">
|
||||
<h3>|Faq_q_3{{MC+1_3}}{Question sur {{MC+1_3}} que se posent les clients}|</h3>
|
||||
<p>|Faq_a_3{{MC+1_3}}{Réponse complète qui rassure et informe. 2-3 phrases. Ton {{personality.style}}}|</p>
|
||||
</div>
|
||||
</aside>
|
||||
|
||||
<footer>
|
||||
<p>|Conclusion{{MC0}}{Conclusion engageante de 2 phrases sur {{MC0}}. Appel à l'action subtil. Ton {{personality.style}}}|</p>
|
||||
</footer>
|
||||
</article>`;
|
||||
}
|
||||
|
||||
/**
|
||||
* CRÉER FICHIERS DE DONNÉES D'EXEMPLE
|
||||
* Fonction utilitaire pour initialiser les fichiers JSON
|
||||
*/
|
||||
async function createSampleDataFiles() {
|
||||
try {
|
||||
// Créer répertoire data s'il n'existe pas
|
||||
await fs.mkdir('./data', { recursive: true });
|
||||
|
||||
// Exemple instructions.json
|
||||
const sampleInstructions = [
|
||||
{
|
||||
slug: "plaque-test",
|
||||
t0: "Plaque test signalétique",
|
||||
mc0: "plaque signalétique",
|
||||
"t-1": "Signalétique",
|
||||
"l-1": "/signaletique/",
|
||||
"mc+1": "plaque dibond, plaque aluminium, plaque PVC",
|
||||
"t+1": "Plaque dibond, Plaque alu, Plaque PVC",
|
||||
"l+1": "/plaque-dibond/, /plaque-aluminium/, /plaque-pvc/",
|
||||
xmlFileName: "template-plaque.xml"
|
||||
}
|
||||
];
|
||||
|
||||
// Exemple personalities.json
|
||||
const samplePersonalities = [
|
||||
{
|
||||
nom: "Marc",
|
||||
description: "Expert technique en signalétique",
|
||||
style: "professionnel et précis",
|
||||
motsClesSecteurs: "technique,dibond,aluminium,impression",
|
||||
vocabulairePref: "précision,qualité,expertise,performance",
|
||||
connecteursPref: "par ailleurs,en effet,notamment,cependant",
|
||||
erreursTypiques: "accord_proximite,repetition_legere",
|
||||
longueurPhrases: "moyennes",
|
||||
niveauTechnique: "élevé",
|
||||
ctaStyle: "découvrir,choisir,commander",
|
||||
defautsSimules: "fatigue_cognitive,hesitation_technique"
|
||||
},
|
||||
{
|
||||
nom: "Sophie",
|
||||
description: "Passionnée de décoration et design",
|
||||
style: "familier et chaleureux",
|
||||
motsClesSecteurs: "décoration,design,esthétique,tendances",
|
||||
vocabulairePref: "joli,magnifique,tendance,style",
|
||||
connecteursPref: "du coup,en fait,sinon,au fait",
|
||||
erreursTypiques: "familiarite_excessive,expression_populaire",
|
||||
longueurPhrases: "courtes",
|
||||
niveauTechnique: "moyen",
|
||||
ctaStyle: "craquer,adopter,foncer",
|
||||
defautsSimules: "enthousiasme_variable,anecdote_personnelle"
|
||||
}
|
||||
];
|
||||
|
||||
// Écrire les fichiers
|
||||
await fs.writeFile('./data/instructions.json', JSON.stringify(sampleInstructions, null, 2));
|
||||
await fs.writeFile('./data/personalities.json', JSON.stringify(samplePersonalities, null, 2));
|
||||
|
||||
logSh('✅ Fichiers de données d\'exemple créés dans ./data/', "INFO");
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur création fichiers exemple: ${error.message}`, "ERROR");
|
||||
}
|
||||
}
|
||||
|
||||
// ============= EXPORTS NODE.JS =============
|
||||
|
||||
module.exports = {
|
||||
getBrainConfig,
|
||||
getPersonalities,
|
||||
selectPersonalityWithAI,
|
||||
selectMultiplePersonalitiesWithAI, // NOUVEAU: Export de la fonction multi-personnalités
|
||||
selectRandomPersonalities,
|
||||
parseCSVField,
|
||||
readInstructionsData,
|
||||
createSampleDataFiles,
|
||||
createDefaultXMLTemplate,
|
||||
CONFIG
|
||||
};
|
||||
|
||||
// ============= TEST RAPIDE SI LANCÉ DIRECTEMENT =============
|
||||
|
||||
if (require.main === module) {
|
||||
(async () => {
|
||||
try {
|
||||
logSh('🧪 Test BrainConfig Node.js...', "INFO");
|
||||
|
||||
// Créer fichiers exemple si nécessaire
|
||||
try {
|
||||
await fs.access('./data/instructions.json');
|
||||
} catch {
|
||||
await createSampleDataFiles();
|
||||
}
|
||||
|
||||
// Test de la fonction principale
|
||||
const result = await getBrainConfig(2);
|
||||
|
||||
if (result.success) {
|
||||
logSh(`✅ Test réussi: ${result.data.personality.nom} pour ${result.data.mc0}`, "INFO");
|
||||
} else {
|
||||
logSh(`❌ Test échoué: ${result.error}`, "ERROR");
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur test: ${error.message}`, "ERROR");
|
||||
}
|
||||
})();
|
||||
}
|
||||
202
lib/ContentAssembly.js
Normal file
202
lib/ContentAssembly.js
Normal file
@ -0,0 +1,202 @@
|
||||
// ========================================
|
||||
// FICHIER: ContentAssembly.js
|
||||
// Description: Assemblage et nettoyage du contenu XML
|
||||
// ========================================
|
||||
|
||||
const { logSh } = require('./ErrorReporting'); // Using unified logSh from ErrorReporting
|
||||
|
||||
/**
|
||||
* Nettoie les balises <strong> du template XML
|
||||
* @param {string} xmlString - Le contenu XML à nettoyer
|
||||
* @returns {string} - XML nettoyé
|
||||
*/
|
||||
function cleanStrongTags(xmlString) {
|
||||
logSh('Nettoyage balises <strong> du template...', 'DEBUG');
|
||||
|
||||
// Enlever toutes les balises <strong> et </strong>
|
||||
let cleaned = xmlString.replace(/<\/?strong>/g, '');
|
||||
|
||||
// Log du nettoyage
|
||||
const strongCount = (xmlString.match(/<\/?strong>/g) || []).length;
|
||||
if (strongCount > 0) {
|
||||
logSh(`${strongCount} balises <strong> supprimées`, 'INFO');
|
||||
}
|
||||
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
/**
|
||||
* Remplace toutes les variables CSV dans le XML
|
||||
* @param {string} xmlString - Le contenu XML
|
||||
* @param {object} csvData - Les données CSV
|
||||
* @returns {string} - XML avec variables remplacées
|
||||
*/
|
||||
function replaceAllCSVVariables(xmlString, csvData) {
|
||||
logSh('Remplacement variables CSV...', 'DEBUG');
|
||||
|
||||
let result = xmlString;
|
||||
|
||||
// Variables simples
|
||||
result = result.replace(/\{\{T0\}\}/g, csvData.t0 || '');
|
||||
result = result.replace(/\{\{MC0\}\}/g, csvData.mc0 || '');
|
||||
result = result.replace(/\{\{T-1\}\}/g, csvData.tMinus1 || '');
|
||||
result = result.replace(/\{\{L-1\}\}/g, csvData.lMinus1 || '');
|
||||
|
||||
logSh(`Variables simples remplacées: T0="${csvData.t0}", MC0="${csvData.mc0}"`, 'DEBUG');
|
||||
|
||||
// Variables multiples
|
||||
const mcPlus1 = (csvData.mcPlus1 || '').split(',').map(s => s.trim());
|
||||
const tPlus1 = (csvData.tPlus1 || '').split(',').map(s => s.trim());
|
||||
const lPlus1 = (csvData.lPlus1 || '').split(',').map(s => s.trim());
|
||||
|
||||
logSh(`Variables multiples: MC+1[${mcPlus1.length}], T+1[${tPlus1.length}], L+1[${lPlus1.length}]`, 'DEBUG');
|
||||
|
||||
// Remplacer MC+1_1, MC+1_2, etc.
|
||||
for (let i = 1; i <= 6; i++) {
|
||||
const mcValue = mcPlus1[i-1] || `[MC+1_${i} non défini]`;
|
||||
const tValue = tPlus1[i-1] || `[T+1_${i} non défini]`;
|
||||
const lValue = lPlus1[i-1] || `[L+1_${i} non défini]`;
|
||||
|
||||
result = result.replace(new RegExp(`\\{\\{MC\\+1_${i}\\}\\}`, 'g'), mcValue);
|
||||
result = result.replace(new RegExp(`\\{\\{T\\+1_${i}\\}\\}`, 'g'), tValue);
|
||||
result = result.replace(new RegExp(`\\{\\{L\\+1_${i}\\}\\}`, 'g'), lValue);
|
||||
|
||||
if (mcPlus1[i-1]) {
|
||||
logSh(`MC+1_${i} = "${mcValue}"`, 'DEBUG');
|
||||
}
|
||||
}
|
||||
|
||||
// Vérifier qu'il ne reste pas de variables non remplacées
|
||||
const remainingVars = (result.match(/\{\{[^}]+\}\}/g) || []);
|
||||
if (remainingVars.length > 0) {
|
||||
logSh(`ATTENTION: Variables non remplacées: ${remainingVars.join(', ')}`, 'WARNING');
|
||||
}
|
||||
|
||||
logSh('Toutes les variables CSV remplacées', 'INFO');
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Injecte le contenu généré dans le XML final
|
||||
* @param {string} cleanXML - XML nettoyé
|
||||
* @param {object} generatedContent - Contenu généré par tag
|
||||
* @param {array} elements - Éléments extraits
|
||||
* @returns {string} - XML final avec contenu injecté
|
||||
*/
|
||||
function injectGeneratedContent(cleanXML, generatedContent, elements) {
|
||||
logSh('🔍 === DEBUG INJECTION MAPPING ===', 'DEBUG');
|
||||
logSh(`XML reçu: ${cleanXML.length} caractères`, 'DEBUG');
|
||||
logSh(`Contenu généré: ${Object.keys(generatedContent).length} éléments`, 'DEBUG');
|
||||
logSh(`Éléments fournis: ${elements.length} éléments`, 'DEBUG');
|
||||
|
||||
// Debug: montrer le XML
|
||||
logSh(`🔍 XML début: ${cleanXML}`, 'DEBUG');
|
||||
|
||||
// Debug: montrer le contenu généré
|
||||
Object.keys(generatedContent).forEach(key => {
|
||||
logSh(`🔍 Généré [${key}]: "${generatedContent[key]}"`, 'DEBUG');
|
||||
});
|
||||
|
||||
// Debug: montrer les éléments
|
||||
elements.forEach((element, i) => {
|
||||
logSh(`🔍 Element ${i+1}: originalTag="${element.originalTag}", originalFullMatch="${element.originalFullMatch}"`, 'DEBUG');
|
||||
});
|
||||
|
||||
let finalXML = cleanXML;
|
||||
|
||||
// Créer un mapping tag pur → tag original complet
|
||||
const tagMapping = {};
|
||||
elements.forEach(element => {
|
||||
tagMapping[element.originalTag] = element.originalFullMatch || element.originalTag;
|
||||
});
|
||||
|
||||
logSh(`🔍 TagMapping créé: ${JSON.stringify(tagMapping, null, 2)}`, 'DEBUG');
|
||||
|
||||
// Remplacer en utilisant les tags originaux complets
|
||||
Object.keys(generatedContent).forEach(pureTag => {
|
||||
const content = generatedContent[pureTag];
|
||||
|
||||
logSh(`🔍 === TRAITEMENT TAG: ${pureTag} ===`, 'DEBUG');
|
||||
logSh(`🔍 Contenu à injecter: "${content}"`, 'DEBUG');
|
||||
|
||||
// Trouver le tag original complet dans le XML
|
||||
const originalTag = findOriginalTagInXML(finalXML, pureTag);
|
||||
|
||||
logSh(`🔍 Tag original trouvé: ${originalTag ? originalTag : 'AUCUN'}`, 'DEBUG');
|
||||
|
||||
if (originalTag) {
|
||||
const beforeLength = finalXML.length;
|
||||
finalXML = finalXML.replace(originalTag, content);
|
||||
const afterLength = finalXML.length;
|
||||
|
||||
if (beforeLength !== afterLength) {
|
||||
logSh(`✅ SUCCÈS: Remplacé ${originalTag} par contenu (${afterLength - beforeLength + originalTag.length} chars)`, 'DEBUG');
|
||||
} else {
|
||||
logSh(`❌ ÉCHEC: Replace n'a pas fonctionné pour ${originalTag}`, 'DEBUG');
|
||||
}
|
||||
} else {
|
||||
// Fallback : essayer avec le tag pur
|
||||
const beforeLength = finalXML.length;
|
||||
finalXML = finalXML.replace(pureTag, content);
|
||||
const afterLength = finalXML.length;
|
||||
|
||||
logSh(`⚠ FALLBACK ${pureTag}: remplacement ${beforeLength !== afterLength ? 'RÉUSSI' : 'ÉCHOUÉ'}`, 'DEBUG');
|
||||
logSh(`⚠ Contenu fallback: "${content}"`, 'DEBUG');
|
||||
}
|
||||
});
|
||||
|
||||
// Vérifier les tags restants
|
||||
const remainingTags = (finalXML.match(/\|[^|]*\|/g) || []);
|
||||
if (remainingTags.length > 0) {
|
||||
logSh(`ATTENTION: ${remainingTags.length} tags non remplacés: ${remainingTags.slice(0, 3).join(', ')}...`, 'WARNING');
|
||||
}
|
||||
|
||||
logSh('Injection terminée', 'INFO');
|
||||
return finalXML;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper pour trouver le tag original complet dans le XML
|
||||
* @param {string} xmlString - Contenu XML
|
||||
* @param {string} pureTag - Tag pur à rechercher
|
||||
* @returns {string|null} - Tag original trouvé ou null
|
||||
*/
|
||||
function findOriginalTagInXML(xmlString, pureTag) {
|
||||
logSh(`🔍 === RECHERCHE TAG DANS XML ===`, 'DEBUG');
|
||||
logSh(`🔍 Tag pur recherché: "${pureTag}"`, 'DEBUG');
|
||||
|
||||
// Extraire le nom du tag pur : |Titre_H1_1| → Titre_H1_1
|
||||
const tagName = pureTag.replace(/\|/g, '');
|
||||
logSh(`🔍 Nom tag extrait: "${tagName}"`, 'DEBUG');
|
||||
|
||||
// Chercher tous les tags qui commencent par ce nom (avec espaces optionnels)
|
||||
const regex = new RegExp(`\\|\\s*${tagName}[^|]*\\|`, 'g');
|
||||
logSh(`🔍 Regex utilisée: ${regex}`, 'DEBUG');
|
||||
|
||||
// Debug: montrer tous les tags présents dans le XML
|
||||
const allTags = xmlString.match(/\|[^|]*\|/g) || [];
|
||||
logSh(`🔍 Tags présents dans XML: ${allTags.length}`, 'DEBUG');
|
||||
allTags.forEach((tag, i) => {
|
||||
logSh(`🔍 ${i+1}. "${tag}"`, 'DEBUG');
|
||||
});
|
||||
|
||||
const matches = xmlString.match(regex);
|
||||
logSh(`🔍 Matches trouvés: ${matches ? matches.length : 0}`, 'DEBUG');
|
||||
|
||||
if (matches && matches.length > 0) {
|
||||
logSh(`🔍 Premier match: "${matches[0]}"`, 'DEBUG');
|
||||
logSh(`✅ Tag original trouvé pour ${pureTag}: ${matches[0]}`, 'DEBUG');
|
||||
return matches[0];
|
||||
}
|
||||
|
||||
logSh(`❌ Aucun tag original trouvé pour ${pureTag}`, 'DEBUG');
|
||||
return null;
|
||||
}
|
||||
|
||||
// ============= EXPORTS =============
|
||||
module.exports = {
|
||||
cleanStrongTags,
|
||||
replaceAllCSVVariables,
|
||||
injectGeneratedContent,
|
||||
findOriginalTagInXML
|
||||
};
|
||||
23
lib/ContentGeneration.js
Normal file
23
lib/ContentGeneration.js
Normal file
@ -0,0 +1,23 @@
|
||||
// ========================================
|
||||
// FICHIER: lib/content-generation.js - CONVERTI POUR NODE.JS
|
||||
// Description: Génération de contenu avec batch enhancement
|
||||
// ========================================
|
||||
|
||||
// 🔄 NODE.JS IMPORTS
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
const { generateWithBatchEnhancement } = require('./SelectiveEnhancement');
|
||||
|
||||
// ============= GÉNÉRATION PRINCIPALE - ADAPTÉE =============
|
||||
|
||||
async function generateWithContext(hierarchy, csvData) {
|
||||
logSh('=== GÉNÉRATION AVEC BATCH ENHANCEMENT ===', 'INFO');
|
||||
|
||||
// *** UTILISE LE SELECTIVE ENHANCEMENT ***
|
||||
return await generateWithBatchEnhancement(hierarchy, csvData);
|
||||
}
|
||||
|
||||
// ============= EXPORTS =============
|
||||
|
||||
module.exports = {
|
||||
generateWithContext
|
||||
};
|
||||
464
lib/DigitalOceanWorkflow.js
Normal file
464
lib/DigitalOceanWorkflow.js
Normal file
@ -0,0 +1,464 @@
|
||||
// ========================================
|
||||
// FICHIER: DigitalOceanWorkflow.js - REFACTORISÉ POUR NODE.JS
|
||||
// RESPONSABILITÉ: Orchestration + Interface Digital Ocean UNIQUEMENT
|
||||
// ========================================
|
||||
|
||||
const crypto = require('crypto');
|
||||
const axios = require('axios');
|
||||
const { GoogleSpreadsheet } = require('google-spreadsheet');
|
||||
const { JWT } = require('google-auth-library');
|
||||
|
||||
// Import des autres modules du projet (à adapter selon votre structure)
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
const { handleFullWorkflow } = require('./Main');
|
||||
const { getPersonalities, selectPersonalityWithAI } = require('./BrainConfig');
|
||||
|
||||
// ============= CONFIGURATION DIGITAL OCEAN =============
|
||||
const DO_CONFIG = {
|
||||
endpoint: 'https://autocollant.fra1.digitaloceanspaces.com',
|
||||
bucketName: 'autocollant',
|
||||
accessKeyId: 'DO801XTYPE968NZGAQM3',
|
||||
secretAccessKey: '5aCCBiS9K+J8gsAe3M3/0GlliHCNjtLntwla1itCN1s',
|
||||
region: 'fra1'
|
||||
};
|
||||
|
||||
// Configuration Google Sheets
|
||||
const SHEET_CONFIG = {
|
||||
sheetId: '1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c',
|
||||
serviceAccountEmail: process.env.GOOGLE_SERVICE_ACCOUNT_EMAIL,
|
||||
privateKey: process.env.GOOGLE_PRIVATE_KEY?.replace(/\\n/g, '\n'),
|
||||
// Alternative: utiliser fichier JSON directement
|
||||
keyFile: './seo-generator-470715-85d4a971c1af.json'
|
||||
};
|
||||
|
||||
async function deployArticle({ path, html, dryRun = false, ...rest }) {
|
||||
if (!path || typeof html !== 'string') {
|
||||
const err = new Error('deployArticle: invalid payload (requires {path, html})');
|
||||
err.code = 'E_PAYLOAD';
|
||||
throw err;
|
||||
}
|
||||
if (dryRun) {
|
||||
return {
|
||||
ok: true,
|
||||
dryRun: true,
|
||||
length: html.length,
|
||||
path,
|
||||
meta: rest || {}
|
||||
};
|
||||
}
|
||||
// --- Impl réelle à toi ici (upload DO Spaces / API / SSH etc.) ---
|
||||
// return await realDeploy({ path, html, ...rest });
|
||||
|
||||
// Placeholder pour ne pas casser l'appel si pas encore implémenté
|
||||
return { ok: true, dryRun: false, path, length: html.length };
|
||||
}
|
||||
|
||||
module.exports.deployArticle = module.exports.deployArticle || deployArticle;
|
||||
|
||||
|
||||
// ============= TRIGGER PRINCIPAL REMPLACÉ PAR WEBHOOK/API =============
|
||||
|
||||
/**
|
||||
* Point d'entrée pour déclencher le workflow
|
||||
* Remplace le trigger onEdit d'Apps Script
|
||||
* @param {number} rowNumber - Numéro de ligne à traiter
|
||||
* @returns {Promise<object>} - Résultat du workflow
|
||||
*/
|
||||
async function triggerAutonomousWorkflow(rowNumber) {
|
||||
try {
|
||||
logSh('🚀 TRIGGER AUTONOME DÉCLENCHÉ (Digital Ocean)', 'INFO');
|
||||
|
||||
// Anti-bouncing simulé
|
||||
await new Promise(resolve => setTimeout(resolve, 2000));
|
||||
|
||||
return await runAutonomousWorkflowFromTrigger(rowNumber);
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur trigger autonome DO: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ORCHESTRATEUR: Prépare les données et délègue à Main.js
|
||||
*/
|
||||
async function runAutonomousWorkflowFromTrigger(rowNumber) {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
logSh(`🎬 ORCHESTRATION AUTONOME - LIGNE ${rowNumber}`, 'INFO');
|
||||
|
||||
// 1. LIRE DONNÉES CSV + XML FILENAME
|
||||
const csvData = await readCSVDataWithXMLFileName(rowNumber);
|
||||
logSh(`✅ CSV: ${csvData.mc0}, XML: ${csvData.xmlFileName}`, 'INFO');
|
||||
|
||||
// 2. RÉCUPÉRER XML DEPUIS DIGITAL OCEAN
|
||||
const xmlTemplate = await fetchXMLFromDigitalOceanSimple(csvData.xmlFileName);
|
||||
logSh(`✅ XML récupéré: ${xmlTemplate.length} caractères`, 'INFO');
|
||||
|
||||
// 3. 🎯 DÉLÉGUER LE WORKFLOW À MAIN.JS
|
||||
const workflowData = {
|
||||
rowNumber: rowNumber,
|
||||
xmlTemplate: Buffer.from(xmlTemplate).toString('base64'), // Encoder comme Make.com
|
||||
csvData: csvData,
|
||||
source: 'digital_ocean_autonomous'
|
||||
};
|
||||
|
||||
const result = await handleFullWorkflow(workflowData);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
logSh(`🏆 ORCHESTRATION TERMINÉE en ${Math.round(duration/1000)}s`, 'INFO');
|
||||
|
||||
// 4. MARQUER LIGNE COMME TRAITÉE
|
||||
await markRowAsProcessed(rowNumber, result);
|
||||
|
||||
return result;
|
||||
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime;
|
||||
logSh(`❌ ERREUR ORCHESTRATION: ${error.toString()}`, 'ERROR');
|
||||
await markRowAsError(rowNumber, error.toString());
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// ============= INTERFACE DIGITAL OCEAN =============
|
||||
|
||||
async function fetchXMLFromDigitalOceanSimple(fileName) {
|
||||
const filePath = `wp-content/XML/${fileName}`;
|
||||
const fileUrl = `${DO_CONFIG.endpoint}/${filePath}`;
|
||||
|
||||
try {
|
||||
const response = await axios.get(fileUrl); // Sans auth
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
throw new Error(`Fichier non accessible: ${error.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Récupérer XML depuis Digital Ocean Spaces avec authentification
|
||||
*/
|
||||
async function fetchXMLFromDigitalOcean(fileName) {
|
||||
if (!fileName) {
|
||||
throw new Error('Nom de fichier XML requis');
|
||||
}
|
||||
|
||||
const filePath = `wp-content/XML/${fileName}`;
|
||||
logSh(`🌊 Récupération XML: ${fileName} , ${filePath}`, 'DEBUG');
|
||||
|
||||
const fileUrl = `${DO_CONFIG.endpoint}/${filePath}`;
|
||||
logSh(`🔗 URL complète: ${fileUrl}`, 'DEBUG');
|
||||
|
||||
const signature = generateAWSSignature(filePath);
|
||||
|
||||
try {
|
||||
const response = await axios.get(fileUrl, {
|
||||
headers: signature.headers
|
||||
});
|
||||
|
||||
logSh(`📡 Response code: ${response.status}`, 'DEBUG');
|
||||
logSh(`📄 Response: ${response.data.toString()}`, 'DEBUG');
|
||||
|
||||
if (response.status === 200) {
|
||||
return response.data;
|
||||
} else {
|
||||
throw new Error(`HTTP ${response.status}: ${response.data}`);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur DO complète: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Lire données CSV avec nom fichier XML (colonne J)
|
||||
*/
|
||||
async function readCSVDataWithXMLFileName(rowNumber) {
|
||||
try {
|
||||
// Configuration Google Sheets - avec fallback sur fichier JSON
|
||||
let serviceAccountAuth;
|
||||
|
||||
if (SHEET_CONFIG.serviceAccountEmail && SHEET_CONFIG.privateKey) {
|
||||
// Utiliser variables d'environnement
|
||||
serviceAccountAuth = new JWT({
|
||||
email: SHEET_CONFIG.serviceAccountEmail,
|
||||
key: SHEET_CONFIG.privateKey,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
} else {
|
||||
// Utiliser fichier JSON
|
||||
serviceAccountAuth = new JWT({
|
||||
keyFile: SHEET_CONFIG.keyFile,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
}
|
||||
|
||||
const doc = new GoogleSpreadsheet(SHEET_CONFIG.sheetId, serviceAccountAuth);
|
||||
await doc.loadInfo();
|
||||
|
||||
const sheet = doc.sheetsByTitle['instructions'];
|
||||
if (!sheet) {
|
||||
throw new Error('Sheet "instructions" non trouvée');
|
||||
}
|
||||
|
||||
await sheet.loadCells(`A${rowNumber}:I${rowNumber}`);
|
||||
|
||||
const slug = sheet.getCell(rowNumber - 1, 0).value;
|
||||
const t0 = sheet.getCell(rowNumber - 1, 1).value;
|
||||
const mc0 = sheet.getCell(rowNumber - 1, 2).value;
|
||||
const tMinus1 = sheet.getCell(rowNumber - 1, 3).value;
|
||||
const lMinus1 = sheet.getCell(rowNumber - 1, 4).value;
|
||||
const mcPlus1 = sheet.getCell(rowNumber - 1, 5).value;
|
||||
const tPlus1 = sheet.getCell(rowNumber - 1, 6).value;
|
||||
const lPlus1 = sheet.getCell(rowNumber - 1, 7).value;
|
||||
const xmlFileName = sheet.getCell(rowNumber - 1, 8).value;
|
||||
|
||||
if (!xmlFileName || xmlFileName.toString().trim() === '') {
|
||||
throw new Error(`Nom fichier XML manquant colonne I, ligne ${rowNumber}`);
|
||||
}
|
||||
|
||||
let cleanFileName = xmlFileName.toString().trim();
|
||||
if (!cleanFileName.endsWith('.xml')) {
|
||||
cleanFileName += '.xml';
|
||||
}
|
||||
|
||||
// Récupérer personnalité (délègue au système existant BrainConfig.js)
|
||||
const personalities = await getPersonalities(); // Pas de paramètre, lit depuis JSON
|
||||
const selectedPersonality = await selectPersonalityWithAI(mc0, t0, personalities);
|
||||
|
||||
return {
|
||||
rowNumber: rowNumber,
|
||||
slug: slug,
|
||||
t0: t0,
|
||||
mc0: mc0,
|
||||
tMinus1: tMinus1,
|
||||
lMinus1: lMinus1,
|
||||
mcPlus1: mcPlus1,
|
||||
tPlus1: tPlus1,
|
||||
lPlus1: lPlus1,
|
||||
xmlFileName: cleanFileName,
|
||||
personality: selectedPersonality
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur lecture CSV: ${error.toString()}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// ============= STATUTS ET VALIDATION =============
|
||||
|
||||
/**
|
||||
* Vérifier si le workflow doit être déclenché
|
||||
* En Node.js, cette logique sera adaptée selon votre stratégie (webhook, polling, etc.)
|
||||
*/
|
||||
function shouldTriggerWorkflow(rowNumber, xmlFileName) {
|
||||
if (!rowNumber || rowNumber <= 1) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!xmlFileName || xmlFileName.toString().trim() === '') {
|
||||
logSh('⚠️ Pas de fichier XML (colonne J), workflow ignoré', 'WARNING');
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
async function markRowAsProcessed(rowNumber, result) {
|
||||
try {
|
||||
// Configuration Google Sheets - avec fallback sur fichier JSON
|
||||
let serviceAccountAuth;
|
||||
|
||||
if (SHEET_CONFIG.serviceAccountEmail && SHEET_CONFIG.privateKey) {
|
||||
serviceAccountAuth = new JWT({
|
||||
email: SHEET_CONFIG.serviceAccountEmail,
|
||||
key: SHEET_CONFIG.privateKey,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
} else {
|
||||
serviceAccountAuth = new JWT({
|
||||
keyFile: SHEET_CONFIG.keyFile,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
}
|
||||
|
||||
const doc = new GoogleSpreadsheet(SHEET_CONFIG.sheetId, serviceAccountAuth);
|
||||
await doc.loadInfo();
|
||||
|
||||
const sheet = doc.sheetsByTitle['instructions'];
|
||||
|
||||
// Vérifier et ajouter headers si nécessaire
|
||||
await sheet.loadCells('K1:N1');
|
||||
if (!sheet.getCell(0, 10).value) {
|
||||
sheet.getCell(0, 10).value = 'Status';
|
||||
sheet.getCell(0, 11).value = 'Processed_At';
|
||||
sheet.getCell(0, 12).value = 'Article_ID';
|
||||
sheet.getCell(0, 13).value = 'Source';
|
||||
await sheet.saveUpdatedCells();
|
||||
}
|
||||
|
||||
// Marquer la ligne
|
||||
await sheet.loadCells(`K${rowNumber}:N${rowNumber}`);
|
||||
sheet.getCell(rowNumber - 1, 10).value = '✅ DO_SUCCESS';
|
||||
sheet.getCell(rowNumber - 1, 11).value = new Date().toISOString();
|
||||
sheet.getCell(rowNumber - 1, 12).value = result.articleStorage?.articleId || '';
|
||||
sheet.getCell(rowNumber - 1, 13).value = 'Digital Ocean';
|
||||
|
||||
await sheet.saveUpdatedCells();
|
||||
|
||||
logSh(`✅ Ligne ${rowNumber} marquée comme traitée`, 'INFO');
|
||||
|
||||
} catch (error) {
|
||||
logSh(`⚠️ Erreur marquage statut: ${error.toString()}`, 'WARNING');
|
||||
}
|
||||
}
|
||||
|
||||
async function markRowAsError(rowNumber, errorMessage) {
|
||||
try {
|
||||
// Configuration Google Sheets - avec fallback sur fichier JSON
|
||||
let serviceAccountAuth;
|
||||
|
||||
if (SHEET_CONFIG.serviceAccountEmail && SHEET_CONFIG.privateKey) {
|
||||
serviceAccountAuth = new JWT({
|
||||
email: SHEET_CONFIG.serviceAccountEmail,
|
||||
key: SHEET_CONFIG.privateKey,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
} else {
|
||||
serviceAccountAuth = new JWT({
|
||||
keyFile: SHEET_CONFIG.keyFile,
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
}
|
||||
|
||||
const doc = new GoogleSpreadsheet(SHEET_CONFIG.sheetId, serviceAccountAuth);
|
||||
await doc.loadInfo();
|
||||
|
||||
const sheet = doc.sheetsByTitle['instructions'];
|
||||
|
||||
await sheet.loadCells(`K${rowNumber}:N${rowNumber}`);
|
||||
sheet.getCell(rowNumber - 1, 10).value = '❌ DO_ERROR';
|
||||
sheet.getCell(rowNumber - 1, 11).value = new Date().toISOString();
|
||||
sheet.getCell(rowNumber - 1, 12).value = errorMessage.substring(0, 100);
|
||||
sheet.getCell(rowNumber - 1, 13).value = 'DO Error';
|
||||
|
||||
await sheet.saveUpdatedCells();
|
||||
|
||||
} catch (error) {
|
||||
logSh(`⚠️ Erreur marquage erreur: ${error.toString()}`, 'WARNING');
|
||||
}
|
||||
}
|
||||
|
||||
// ============= SIGNATURE AWS V4 =============
|
||||
|
||||
function generateAWSSignature(filePath) {
|
||||
const now = new Date();
|
||||
const dateStamp = now.toISOString().slice(0, 10).replace(/-/g, '');
|
||||
const timeStamp = now.toISOString().replace(/[-:]/g, '').slice(0, -5) + 'Z';
|
||||
|
||||
const headers = {
|
||||
'Host': DO_CONFIG.endpoint.replace('https://', ''),
|
||||
'X-Amz-Date': timeStamp,
|
||||
'X-Amz-Content-Sha256': 'UNSIGNED-PAYLOAD'
|
||||
};
|
||||
|
||||
const credentialScope = `${dateStamp}/${DO_CONFIG.region}/s3/aws4_request`;
|
||||
|
||||
const canonicalHeaders = Object.keys(headers)
|
||||
.sort()
|
||||
.map(key => `${key.toLowerCase()}:${headers[key]}`)
|
||||
.join('\n');
|
||||
|
||||
const signedHeaders = Object.keys(headers)
|
||||
.map(key => key.toLowerCase())
|
||||
.sort()
|
||||
.join(';');
|
||||
|
||||
const canonicalRequest = [
|
||||
'GET',
|
||||
`/${filePath}`,
|
||||
'',
|
||||
canonicalHeaders + '\n',
|
||||
signedHeaders,
|
||||
'UNSIGNED-PAYLOAD'
|
||||
].join('\n');
|
||||
|
||||
const stringToSign = [
|
||||
'AWS4-HMAC-SHA256',
|
||||
timeStamp,
|
||||
credentialScope,
|
||||
crypto.createHash('sha256').update(canonicalRequest).digest('hex')
|
||||
].join('\n');
|
||||
|
||||
// Calculs HMAC étape par étape
|
||||
const kDate = crypto.createHmac('sha256', 'AWS4' + DO_CONFIG.secretAccessKey).update(dateStamp).digest();
|
||||
const kRegion = crypto.createHmac('sha256', kDate).update(DO_CONFIG.region).digest();
|
||||
const kService = crypto.createHmac('sha256', kRegion).update('s3').digest();
|
||||
const kSigning = crypto.createHmac('sha256', kService).update('aws4_request').digest();
|
||||
const signature = crypto.createHmac('sha256', kSigning).update(stringToSign).digest('hex');
|
||||
|
||||
headers['Authorization'] = `AWS4-HMAC-SHA256 Credential=${DO_CONFIG.accessKeyId}/${credentialScope}, SignedHeaders=${signedHeaders}, Signature=${signature}`;
|
||||
|
||||
return { headers: headers };
|
||||
}
|
||||
|
||||
// ============= SETUP ET TEST =============
|
||||
|
||||
/**
|
||||
* Configuration du trigger autonome - Remplacé par webhook ou polling en Node.js
|
||||
*/
|
||||
function setupAutonomousTrigger() {
|
||||
logSh('⚙️ Configuration trigger autonome Digital Ocean...', 'INFO');
|
||||
|
||||
// En Node.js, vous pourriez utiliser:
|
||||
// - Express.js avec webhooks
|
||||
// - Cron jobs avec node-cron
|
||||
// - Polling de la Google Sheet
|
||||
// - WebSocket connections
|
||||
|
||||
logSh('✅ Configuration prête pour webhooks/polling Node.js', 'INFO');
|
||||
logSh('🎯 Mode: Webhook/API → Digital Ocean → Main.js', 'INFO');
|
||||
}
|
||||
|
||||
async function testDigitalOceanConnection() {
|
||||
logSh('🧪 Test connexion Digital Ocean...', 'INFO');
|
||||
|
||||
try {
|
||||
const testFiles = ['template1.xml', 'plaque-rue.xml', 'test.xml'];
|
||||
|
||||
for (const fileName of testFiles) {
|
||||
try {
|
||||
const content = await fetchXMLFromDigitalOceanSimple(fileName);
|
||||
logSh(`✅ Fichier '${fileName}' accessible (${content.length} chars)`, 'INFO');
|
||||
return true;
|
||||
} catch (error) {
|
||||
logSh(`⚠️ '${fileName}' non accessible: ${error.toString()}`, 'DEBUG');
|
||||
}
|
||||
}
|
||||
|
||||
logSh('❌ Aucun fichier test accessible dans DO', 'ERROR');
|
||||
return false;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Test DO échoué: ${error.toString()}`, 'ERROR');
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// ============= EXPORTS =============
|
||||
|
||||
module.exports = {
|
||||
triggerAutonomousWorkflow,
|
||||
runAutonomousWorkflowFromTrigger,
|
||||
fetchXMLFromDigitalOcean,
|
||||
fetchXMLFromDigitalOceanSimple,
|
||||
readCSVDataWithXMLFileName,
|
||||
markRowAsProcessed,
|
||||
markRowAsError,
|
||||
testDigitalOceanConnection,
|
||||
setupAutonomousTrigger,
|
||||
DO_CONFIG
|
||||
};
|
||||
425
lib/ElementExtraction.js
Normal file
425
lib/ElementExtraction.js
Normal file
@ -0,0 +1,425 @@
|
||||
// ========================================
|
||||
// FICHIER: lib/element-extraction.js - CONVERTI POUR NODE.JS
|
||||
// Description: Extraction et parsing des éléments XML
|
||||
// ========================================
|
||||
|
||||
// 🔄 NODE.JS IMPORTS
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
|
||||
// ============= EXTRACTION PRINCIPALE =============
|
||||
|
||||
async function extractElements(xmlTemplate, csvData) {
|
||||
try {
|
||||
await logSh('Extraction éléments avec séparation tag/contenu...', 'DEBUG');
|
||||
|
||||
const regex = /\|([^|]+)\|/g;
|
||||
const elements = [];
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(xmlTemplate)) !== null) {
|
||||
const fullMatch = match[1]; // Ex: "Titre_H1_1{{T0}}" ou "Titre_H3_3{{MC+1_3}}"
|
||||
|
||||
// Séparer nom du tag et variables
|
||||
const nameMatch = fullMatch.match(/^([^{]+)/);
|
||||
const variablesMatch = fullMatch.match(/\{\{([^}]+)\}\}/g);
|
||||
|
||||
// FIX REGEX INSTRUCTIONS - Enlever d'abord les {{variables}} puis chercher {instructions}
|
||||
const withoutVariables = fullMatch.replace(/\{\{[^}]+\}\}/g, '');
|
||||
const instructionsMatch = withoutVariables.match(/\{([^}]+)\}/);
|
||||
|
||||
const tagName = nameMatch ? nameMatch[1].trim() : fullMatch.split('{')[0];
|
||||
|
||||
// TAG PUR (sans variables)
|
||||
const pureTag = `|${tagName}|`;
|
||||
|
||||
// RÉSOUDRE le contenu des variables
|
||||
const resolvedContent = resolveVariablesContent(variablesMatch, csvData);
|
||||
|
||||
elements.push({
|
||||
originalTag: pureTag, // ← TAG PUR : |Titre_H3_3|
|
||||
name: tagName, // ← Titre_H3_3
|
||||
variables: variablesMatch || [], // ← [{{MC+1_3}}]
|
||||
resolvedContent: resolvedContent, // ← "Plaque de rue en aluminium"
|
||||
instructions: instructionsMatch ? instructionsMatch[1] : null,
|
||||
type: getElementType(tagName),
|
||||
originalFullMatch: fullMatch // ← Backup si besoin
|
||||
});
|
||||
|
||||
await logSh(`Tag séparé: ${pureTag} → "${resolvedContent}"`, 'DEBUG');
|
||||
}
|
||||
|
||||
await logSh(`${elements.length} éléments extraits avec séparation`, 'INFO');
|
||||
return elements;
|
||||
|
||||
} catch (error) {
|
||||
await logSh(`Erreur extractElements: ${error}`, 'ERROR');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
// ============= RÉSOLUTION VARIABLES - IDENTIQUE =============
|
||||
|
||||
function resolveVariablesContent(variablesMatch, csvData) {
|
||||
if (!variablesMatch || variablesMatch.length === 0) {
|
||||
return ""; // Pas de variables à résoudre
|
||||
}
|
||||
|
||||
let resolvedContent = "";
|
||||
|
||||
variablesMatch.forEach(variable => {
|
||||
const cleanVar = variable.replace(/[{}]/g, ''); // Enlever {{ }}
|
||||
|
||||
switch (cleanVar) {
|
||||
case 'T0':
|
||||
resolvedContent += csvData.t0;
|
||||
break;
|
||||
case 'MC0':
|
||||
resolvedContent += csvData.mc0;
|
||||
break;
|
||||
case 'T-1':
|
||||
resolvedContent += csvData.tMinus1;
|
||||
break;
|
||||
case 'L-1':
|
||||
resolvedContent += csvData.lMinus1;
|
||||
break;
|
||||
default:
|
||||
// Gérer MC+1_1, MC+1_2, etc.
|
||||
if (cleanVar.startsWith('MC+1_')) {
|
||||
const index = parseInt(cleanVar.split('_')[1]) - 1;
|
||||
const mcPlus1 = csvData.mcPlus1.split(',').map(s => s.trim());
|
||||
resolvedContent += mcPlus1[index] || `[${cleanVar} non défini]`;
|
||||
}
|
||||
else if (cleanVar.startsWith('T+1_')) {
|
||||
const index = parseInt(cleanVar.split('_')[1]) - 1;
|
||||
const tPlus1 = csvData.tPlus1.split(',').map(s => s.trim());
|
||||
resolvedContent += tPlus1[index] || `[${cleanVar} non défini]`;
|
||||
}
|
||||
else if (cleanVar.startsWith('L+1_')) {
|
||||
const index = parseInt(cleanVar.split('_')[1]) - 1;
|
||||
const lPlus1 = csvData.lPlus1.split(',').map(s => s.trim());
|
||||
resolvedContent += lPlus1[index] || `[${cleanVar} non défini]`;
|
||||
}
|
||||
else {
|
||||
resolvedContent += `[${cleanVar} non résolu]`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
return resolvedContent;
|
||||
}
|
||||
|
||||
// ============= CLASSIFICATION ÉLÉMENTS - IDENTIQUE =============
|
||||
|
||||
function getElementType(name) {
|
||||
if (name.includes('Titre_H1')) return 'titre_h1';
|
||||
if (name.includes('Titre_H2')) return 'titre_h2';
|
||||
if (name.includes('Titre_H3')) return 'titre_h3';
|
||||
if (name.includes('Intro_')) return 'intro';
|
||||
if (name.includes('Txt_')) return 'texte';
|
||||
if (name.includes('Faq_q')) return 'faq_question';
|
||||
if (name.includes('Faq_a')) return 'faq_reponse';
|
||||
if (name.includes('Faq_H3')) return 'faq_titre';
|
||||
return 'autre';
|
||||
}
|
||||
|
||||
// ============= GÉNÉRATION SÉQUENTIELLE - ADAPTÉE =============
|
||||
|
||||
async function generateAllContent(elements, csvData, xmlTemplate) {
|
||||
await logSh(`Début génération pour ${elements.length} éléments`, 'INFO');
|
||||
|
||||
const generatedContent = {};
|
||||
|
||||
for (let index = 0; index < elements.length; index++) {
|
||||
const element = elements[index];
|
||||
|
||||
try {
|
||||
await logSh(`Élément ${index + 1}/${elements.length}: ${element.name}`, 'DEBUG');
|
||||
|
||||
const prompt = createPromptForElement(element, csvData);
|
||||
await logSh(`Prompt créé: ${prompt}`, 'DEBUG');
|
||||
|
||||
// 🔄 NODE.JS : Import callOpenAI depuis LLM manager
|
||||
const { callLLM } = require('./LLMManager');
|
||||
const content = await callLLM('openai', prompt, {}, csvData.personality);
|
||||
|
||||
await logSh(`Contenu reçu: ${content}`, 'DEBUG');
|
||||
|
||||
generatedContent[element.originalTag] = content;
|
||||
|
||||
// 🔄 NODE.JS : Pas de Utilities.sleep(), les appels API gèrent leur rate limiting
|
||||
|
||||
} catch (error) {
|
||||
await logSh(`ERREUR élément ${element.name}: ${error.toString()}`, 'ERROR');
|
||||
generatedContent[element.originalTag] = `[Erreur génération: ${element.name}]`;
|
||||
}
|
||||
}
|
||||
|
||||
await logSh(`Génération terminée. ${Object.keys(generatedContent).length} éléments`, 'INFO');
|
||||
return generatedContent;
|
||||
}
|
||||
|
||||
// ============= PARSING STRUCTURE - IDENTIQUE =============
|
||||
|
||||
function parseElementStructure(element) {
|
||||
// NETTOYER le nom : enlever <strong>, </strong>, {{...}}, {...}
|
||||
let cleanName = element.name
|
||||
.replace(/<\/?strong>/g, '') // ← ENLEVER <strong>
|
||||
.replace(/\{\{[^}]*\}\}/g, '') // Enlever {{MC0}}
|
||||
.replace(/\{[^}]*\}/g, ''); // Enlever {instructions}
|
||||
|
||||
const parts = cleanName.split('_');
|
||||
|
||||
return {
|
||||
type: parts[0],
|
||||
level: parts[1],
|
||||
indices: parts.slice(2).map(Number),
|
||||
hierarchyPath: parts.slice(1).join('_'),
|
||||
originalElement: element,
|
||||
variables: element.variables || [],
|
||||
instructions: element.instructions
|
||||
};
|
||||
}
|
||||
|
||||
// ============= HIÉRARCHIE INTELLIGENTE - ADAPTÉE =============
|
||||
|
||||
async function buildSmartHierarchy(elements) {
|
||||
const hierarchy = {};
|
||||
|
||||
elements.forEach(element => {
|
||||
const structure = parseElementStructure(element);
|
||||
const path = structure.hierarchyPath;
|
||||
|
||||
if (!hierarchy[path]) {
|
||||
hierarchy[path] = {
|
||||
title: null,
|
||||
text: null,
|
||||
questions: [],
|
||||
children: {}
|
||||
};
|
||||
}
|
||||
|
||||
// Associer intelligemment
|
||||
if (structure.type === 'Titre') {
|
||||
hierarchy[path].title = structure; // Tout l'objet avec variables + instructions
|
||||
} else if (structure.type === 'Txt') {
|
||||
hierarchy[path].text = structure;
|
||||
} else if (structure.type === 'Intro') {
|
||||
hierarchy[path].text = structure;
|
||||
} else if (structure.type === 'Faq') {
|
||||
hierarchy[path].questions.push(structure);
|
||||
}
|
||||
});
|
||||
|
||||
// ← LIGNE COMPILÉE
|
||||
const mappingSummary = Object.keys(hierarchy).map(path => {
|
||||
const section = hierarchy[path];
|
||||
return `${path}:[T:${section.title ? '✓' : '✗'} Txt:${section.text ? '✓' : '✗'} FAQ:${section.questions.length}]`;
|
||||
}).join(' | ');
|
||||
|
||||
await logSh('Correspondances: ' + mappingSummary, 'DEBUG');
|
||||
|
||||
return hierarchy;
|
||||
}
|
||||
|
||||
// ============= PARSERS RÉPONSES - ADAPTÉS =============
|
||||
|
||||
async function parseTitlesResponse(response, allTitles) {
|
||||
const results = {};
|
||||
|
||||
// Utiliser regex pour extraire [TAG] contenu
|
||||
const regex = /\[([^\]]+)\]\s*\n([^[]*?)(?=\n\[|$)/gs;
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(response)) !== null) {
|
||||
const tag = match[1].trim();
|
||||
const content = match[2].trim();
|
||||
|
||||
// Nettoyer le contenu (enlever # et balises HTML si présentes)
|
||||
const cleanContent = content
|
||||
.replace(/^#+\s*/, '') // Enlever # du début
|
||||
.replace(/<\/?[^>]+(>|$)/g, ""); // Enlever balises HTML
|
||||
|
||||
results[`|${tag}|`] = cleanContent;
|
||||
|
||||
await logSh(`✓ Titre parsé [${tag}]: "${cleanContent}"`, 'DEBUG');
|
||||
}
|
||||
|
||||
// Fallback si parsing échoue
|
||||
if (Object.keys(results).length === 0) {
|
||||
await logSh('Parsing titres échoué, fallback ligne par ligne', 'WARNING');
|
||||
const lines = response.split('\n').filter(line => line.trim());
|
||||
|
||||
allTitles.forEach((titleInfo, index) => {
|
||||
if (lines[index]) {
|
||||
results[titleInfo.tag] = lines[index].trim();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
async function parseTextsResponse(response, allTexts) {
|
||||
const results = {};
|
||||
|
||||
await logSh('Parsing réponse textes avec vrais tags...', 'DEBUG');
|
||||
|
||||
// Utiliser regex pour extraire [TAG] contenu avec les vrais noms
|
||||
const regex = /\[([^\]]+)\]\s*\n([^[]*?)(?=\n\[|$)/gs;
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(response)) !== null) {
|
||||
const tag = match[1].trim();
|
||||
const content = match[2].trim();
|
||||
|
||||
// Nettoyer le contenu
|
||||
const cleanContent = content.replace(/^#+\s*/, '').replace(/<\/?[^>]+(>|$)/g, "");
|
||||
|
||||
results[`|${tag}|`] = cleanContent;
|
||||
|
||||
await logSh(`✓ Texte parsé [${tag}]: "${cleanContent}"`, 'DEBUG');
|
||||
}
|
||||
|
||||
// Fallback si parsing échoue - mapper par position
|
||||
if (Object.keys(results).length === 0) {
|
||||
await logSh('Parsing textes échoué, fallback ligne par ligne', 'WARNING');
|
||||
|
||||
const lines = response.split('\n')
|
||||
.map(line => line.trim())
|
||||
.filter(line => line.length > 0 && !line.startsWith('['));
|
||||
|
||||
for (let index = 0; index < allTexts.length; index++) {
|
||||
const textInfo = allTexts[index];
|
||||
if (index < lines.length) {
|
||||
let content = lines[index];
|
||||
content = content.replace(/^\d+\.\s*/, ''); // Enlever "1. " si présent
|
||||
results[textInfo.tag] = content;
|
||||
|
||||
await logSh(`✓ Texte fallback ${index + 1} → ${textInfo.tag}: "${content}"`, 'DEBUG');
|
||||
} else {
|
||||
await logSh(`✗ Pas assez de lignes pour ${textInfo.tag}`, 'WARNING');
|
||||
results[textInfo.tag] = `[Texte manquant ${index + 1}]`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// ============= PARSER FAQ SPÉCIALISÉ - ADAPTÉ =============
|
||||
|
||||
async function parseFAQPairsResponse(response, faqPairs) {
|
||||
const results = {};
|
||||
|
||||
await logSh('Parsing réponse paires FAQ...', 'DEBUG');
|
||||
|
||||
// Parser avec regex pour capturer question + réponse
|
||||
const regex = /\[([^\]]+)\]\s*\n([^[]*?)(?=\n\[|$)/gs;
|
||||
let match;
|
||||
|
||||
const parsedItems = {};
|
||||
|
||||
while ((match = regex.exec(response)) !== null) {
|
||||
const tag = match[1].trim();
|
||||
const content = match[2].trim();
|
||||
|
||||
const cleanContent = content.replace(/^#+\s*/, '').replace(/<\/?[^>]+(>|$)/g, "");
|
||||
|
||||
parsedItems[tag] = cleanContent;
|
||||
|
||||
await logSh(`✓ Item FAQ parsé [${tag}]: "${cleanContent}"`, 'DEBUG');
|
||||
}
|
||||
|
||||
// Mapper aux tags originaux avec |
|
||||
Object.keys(parsedItems).forEach(cleanTag => {
|
||||
const content = parsedItems[cleanTag];
|
||||
results[`|${cleanTag}|`] = content;
|
||||
});
|
||||
|
||||
// Vérification de cohérence paires
|
||||
let pairsCompletes = 0;
|
||||
for (const pair of faqPairs) {
|
||||
const hasQuestion = results[pair.question.tag];
|
||||
const hasAnswer = results[pair.answer.tag];
|
||||
|
||||
if (hasQuestion && hasAnswer) {
|
||||
pairsCompletes++;
|
||||
await logSh(`✓ Paire FAQ ${pair.number} complète: Q+R`, 'DEBUG');
|
||||
} else {
|
||||
await logSh(`⚠ Paire FAQ ${pair.number} incomplète: Q=${!!hasQuestion} R=${!!hasAnswer}`, 'WARNING');
|
||||
}
|
||||
}
|
||||
|
||||
await logSh(`${pairsCompletes}/${faqPairs.length} paires FAQ complètes`, 'INFO');
|
||||
|
||||
// FATAL si paires FAQ manquantes
|
||||
if (pairsCompletes < faqPairs.length) {
|
||||
const manquantes = faqPairs.length - pairsCompletes;
|
||||
await logSh(`❌ FATAL: ${manquantes} paires FAQ manquantes sur ${faqPairs.length}`, 'ERROR');
|
||||
throw new Error(`FATAL: Génération FAQ incomplète (${manquantes}/${faqPairs.length} manquantes) - arrêt du workflow`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
async function parseOtherElementsResponse(response, allOtherElements) {
|
||||
const results = {};
|
||||
|
||||
await logSh('Parsing réponse autres éléments...', 'DEBUG');
|
||||
|
||||
const regex = /\[([^\]]+)\]\s*\n([^[]*?)(?=\n\[|$)/gs;
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(response)) !== null) {
|
||||
const tag = match[1].trim();
|
||||
const content = match[2].trim();
|
||||
|
||||
const cleanContent = content.replace(/^#+\s*/, '').replace(/<\/?[^>]+(>|$)/g, "");
|
||||
|
||||
results[`|${tag}|`] = cleanContent;
|
||||
|
||||
await logSh(`✓ Autre élément parsé [${tag}]: "${cleanContent}"`, 'DEBUG');
|
||||
}
|
||||
|
||||
// Fallback si parsing partiel
|
||||
if (Object.keys(results).length < allOtherElements.length) {
|
||||
await logSh('Parsing autres éléments partiel, complétion fallback', 'WARNING');
|
||||
|
||||
const lines = response.split('\n')
|
||||
.map(line => line.trim())
|
||||
.filter(line => line.length > 0 && !line.startsWith('['));
|
||||
|
||||
allOtherElements.forEach((element, index) => {
|
||||
if (!results[element.tag] && lines[index]) {
|
||||
results[element.tag] = lines[index];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// ============= HELPER FUNCTIONS - ADAPTÉES =============
|
||||
|
||||
function createPromptForElement(element, csvData) {
|
||||
// Cette fonction sera probablement définie dans content-generation.js
|
||||
// Pour l'instant, retour basique
|
||||
return `Génère du contenu pour ${element.type}: ${element.resolvedContent}`;
|
||||
}
|
||||
|
||||
|
||||
// 🔄 NODE.JS EXPORTS
|
||||
module.exports = {
|
||||
extractElements,
|
||||
resolveVariablesContent,
|
||||
getElementType,
|
||||
generateAllContent,
|
||||
parseElementStructure,
|
||||
buildSmartHierarchy,
|
||||
parseTitlesResponse,
|
||||
parseTextsResponse,
|
||||
parseFAQPairsResponse,
|
||||
parseOtherElementsResponse,
|
||||
createPromptForElement
|
||||
};
|
||||
586
lib/ErrorReporting.js
Normal file
586
lib/ErrorReporting.js
Normal file
@ -0,0 +1,586 @@
|
||||
// ========================================
|
||||
// FICHIER: lib/error-reporting.js - CONVERTI POUR NODE.JS
|
||||
// Description: Système de validation et rapport d'erreur
|
||||
// ========================================
|
||||
|
||||
const { google } = require('googleapis');
|
||||
const nodemailer = require('nodemailer');
|
||||
const fs = require('fs').promises;
|
||||
const path = require('path');
|
||||
const pino = require('pino');
|
||||
const pretty = require('pino-pretty');
|
||||
const { PassThrough } = require('stream');
|
||||
const WebSocket = require('ws');
|
||||
|
||||
// Configuration
|
||||
const SHEET_ID = process.env.GOOGLE_SHEETS_ID || '1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c';
|
||||
|
||||
// WebSocket server for real-time logs
|
||||
let wsServer;
|
||||
const wsClients = new Set();
|
||||
|
||||
// Enhanced Pino logger configuration with real-time streaming and dated files
|
||||
const now = new Date();
|
||||
const timestamp = now.toISOString().slice(0, 10) + '_' +
|
||||
now.toLocaleTimeString('fr-FR').replace(/:/g, '-');
|
||||
const logFile = path.join(__dirname, '..', 'logs', `seo-generator-${timestamp}.log`);
|
||||
|
||||
const prettyStream = pretty({
|
||||
colorize: true,
|
||||
translateTime: 'HH:MM:ss.l',
|
||||
ignore: 'pid,hostname',
|
||||
});
|
||||
|
||||
const tee = new PassThrough();
|
||||
tee.pipe(prettyStream).pipe(process.stdout);
|
||||
|
||||
// File destination with dated filename - FORCE DEBUG LEVEL
|
||||
const fileDest = pino.destination({
|
||||
dest: logFile,
|
||||
mkdir: true,
|
||||
sync: false,
|
||||
minLength: 0 // Force immediate write even for small logs
|
||||
});
|
||||
tee.pipe(fileDest);
|
||||
|
||||
// Custom levels for Pino to include TRACE and PROMPT
|
||||
const customLevels = {
|
||||
trace: 5, // Below debug (10)
|
||||
debug: 10,
|
||||
info: 20,
|
||||
prompt: 25, // New level for prompts (between info and warn)
|
||||
warn: 30,
|
||||
error: 40,
|
||||
fatal: 50
|
||||
};
|
||||
|
||||
// Pino logger instance with enhanced configuration and custom levels
|
||||
const logger = pino(
|
||||
{
|
||||
level: 'debug', // FORCE DEBUG LEVEL for file logging
|
||||
base: undefined,
|
||||
timestamp: pino.stdTimeFunctions.isoTime,
|
||||
customLevels: customLevels,
|
||||
useOnlyCustomLevels: true
|
||||
},
|
||||
tee
|
||||
);
|
||||
|
||||
// Initialize WebSocket server
|
||||
function initWebSocketServer() {
|
||||
if (!wsServer) {
|
||||
wsServer = new WebSocket.Server({ port: process.env.LOG_WS_PORT || 8081 });
|
||||
|
||||
wsServer.on('connection', (ws) => {
|
||||
wsClients.add(ws);
|
||||
logger.info('Client connected to log WebSocket');
|
||||
|
||||
ws.on('close', () => {
|
||||
wsClients.delete(ws);
|
||||
logger.info('Client disconnected from log WebSocket');
|
||||
});
|
||||
|
||||
ws.on('error', (error) => {
|
||||
logger.error('WebSocket error:', error.message);
|
||||
wsClients.delete(ws);
|
||||
});
|
||||
});
|
||||
|
||||
logger.info(`Log WebSocket server started on port ${process.env.LOG_WS_PORT || 8081}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Broadcast log to WebSocket clients
|
||||
function broadcastLog(message, level) {
|
||||
const logData = {
|
||||
timestamp: new Date().toISOString(),
|
||||
level: level.toUpperCase(),
|
||||
message: message
|
||||
};
|
||||
|
||||
wsClients.forEach(ws => {
|
||||
if (ws.readyState === WebSocket.OPEN) {
|
||||
try {
|
||||
ws.send(JSON.stringify(logData));
|
||||
} catch (error) {
|
||||
logger.error('Failed to send log to WebSocket client:', error.message);
|
||||
wsClients.delete(ws);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// 🔄 NODE.JS : Google Sheets API setup (remplace SpreadsheetApp)
|
||||
let sheets;
|
||||
let auth;
|
||||
|
||||
async function initGoogleSheets() {
|
||||
if (!sheets) {
|
||||
// Configuration auth Google Sheets API
|
||||
// Pour la démo, on utilise une clé de service (à configurer)
|
||||
auth = new google.auth.GoogleAuth({
|
||||
keyFile: process.env.GOOGLE_CREDENTIALS_PATH, // Chemin vers fichier JSON credentials
|
||||
scopes: ['https://www.googleapis.com/auth/spreadsheets']
|
||||
});
|
||||
|
||||
sheets = google.sheets({ version: 'v4', auth });
|
||||
}
|
||||
return sheets;
|
||||
}
|
||||
|
||||
async function logSh(message, level = 'INFO') {
|
||||
// Initialize WebSocket server if not already done
|
||||
if (!wsServer) {
|
||||
initWebSocketServer();
|
||||
}
|
||||
|
||||
// Convert level to lowercase for Pino
|
||||
const pinoLevel = level.toLowerCase();
|
||||
|
||||
// Enhanced trace metadata for hierarchical logging
|
||||
const traceData = {};
|
||||
if (message.includes('▶') || message.includes('✔') || message.includes('✖') || message.includes('•')) {
|
||||
traceData.trace = true;
|
||||
traceData.evt = message.includes('▶') ? 'span.start' :
|
||||
message.includes('✔') ? 'span.end' :
|
||||
message.includes('✖') ? 'span.error' : 'span.event';
|
||||
}
|
||||
|
||||
// Log with Pino (handles console output with pretty formatting and file logging)
|
||||
switch (pinoLevel) {
|
||||
case 'error':
|
||||
logger.error(traceData, message);
|
||||
break;
|
||||
case 'warning':
|
||||
case 'warn':
|
||||
logger.warn(traceData, message);
|
||||
break;
|
||||
case 'debug':
|
||||
logger.debug(traceData, message);
|
||||
break;
|
||||
case 'trace':
|
||||
logger.trace(traceData, message);
|
||||
break;
|
||||
case 'prompt':
|
||||
logger.prompt(traceData, message);
|
||||
break;
|
||||
default:
|
||||
logger.info(traceData, message);
|
||||
}
|
||||
|
||||
// Broadcast to WebSocket clients for real-time viewing
|
||||
broadcastLog(message, level);
|
||||
|
||||
// Force immediate flush to ensure real-time display and prevent log loss
|
||||
logger.flush();
|
||||
|
||||
// Log to Google Sheets if enabled (async, non-blocking)
|
||||
if (process.env.ENABLE_SHEETS_LOGGING === 'true') {
|
||||
setImmediate(() => {
|
||||
logToGoogleSheets(message, level).catch(err => {
|
||||
// Silent fail for Google Sheets logging to avoid recursion
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Fonction pour déterminer si on doit logger en console
|
||||
function shouldLogToConsole(messageLevel, configLevel) {
|
||||
const levels = { DEBUG: 0, INFO: 1, WARNING: 2, ERROR: 3 };
|
||||
return levels[messageLevel] >= levels[configLevel];
|
||||
}
|
||||
|
||||
// Log to file is now handled by Pino transport
|
||||
// This function is kept for compatibility but does nothing
|
||||
async function logToFile(message, level) {
|
||||
// Pino handles file logging via transport configuration
|
||||
// This function is deprecated and kept for compatibility only
|
||||
}
|
||||
|
||||
// 🔄 NODE.JS : Log vers Google Sheets (version async)
|
||||
async function logToGoogleSheets(message, level) {
|
||||
try {
|
||||
const sheetsApi = await initGoogleSheets();
|
||||
|
||||
const values = [[
|
||||
new Date().toISOString(),
|
||||
level,
|
||||
message,
|
||||
'Node.js workflow'
|
||||
]];
|
||||
|
||||
await sheetsApi.spreadsheets.values.append({
|
||||
spreadsheetId: SHEET_ID,
|
||||
range: 'Logs!A:D',
|
||||
valueInputOption: 'RAW',
|
||||
insertDataOption: 'INSERT_ROWS',
|
||||
resource: { values }
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('Échec log Google Sheets: ' + error.message, 'WARNING'); // Using logSh instead of console.warn
|
||||
}
|
||||
}
|
||||
|
||||
// 🔄 NODE.JS : Version simplifiée cleanLogSheet
|
||||
async function cleanLogSheet() {
|
||||
try {
|
||||
logSh('🧹 Nettoyage logs...', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
// 1. Nettoyer fichiers logs locaux (garder 7 derniers jours)
|
||||
await cleanLocalLogs();
|
||||
|
||||
// 2. Nettoyer Google Sheets si activé
|
||||
if (process.env.ENABLE_SHEETS_LOGGING === 'true') {
|
||||
await cleanGoogleSheetsLogs();
|
||||
}
|
||||
|
||||
logSh('✅ Logs nettoyés', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
} catch (error) {
|
||||
logSh('Erreur nettoyage logs: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanLocalLogs() {
|
||||
try {
|
||||
// Note: With Pino, log files are managed differently
|
||||
// This function is kept for compatibility with Google Sheets logs cleanup
|
||||
// Pino log rotation should be handled by external tools like logrotate
|
||||
|
||||
// For now, we keep the basic cleanup for any remaining old log files
|
||||
const logsDir = path.join(__dirname, '../logs');
|
||||
|
||||
try {
|
||||
const files = await fs.readdir(logsDir);
|
||||
const cutoffDate = new Date();
|
||||
cutoffDate.setDate(cutoffDate.getDate() - 7); // Garder 7 jours
|
||||
|
||||
for (const file of files) {
|
||||
if (file.endsWith('.log')) {
|
||||
const filePath = path.join(logsDir, file);
|
||||
const stats = await fs.stat(filePath);
|
||||
|
||||
if (stats.mtime < cutoffDate) {
|
||||
await fs.unlink(filePath);
|
||||
logSh(`🗑️ Supprimé log ancien: ${file}`, 'INFO');
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
// Directory might not exist, that's fine
|
||||
}
|
||||
} catch (error) {
|
||||
// Silent fail
|
||||
}
|
||||
}
|
||||
|
||||
async function cleanGoogleSheetsLogs() {
|
||||
try {
|
||||
const sheetsApi = await initGoogleSheets();
|
||||
|
||||
// Clear + remettre headers
|
||||
await sheetsApi.spreadsheets.values.clear({
|
||||
spreadsheetId: SHEET_ID,
|
||||
range: 'Logs!A:D'
|
||||
});
|
||||
|
||||
await sheetsApi.spreadsheets.values.update({
|
||||
spreadsheetId: SHEET_ID,
|
||||
range: 'Logs!A1:D1',
|
||||
valueInputOption: 'RAW',
|
||||
resource: {
|
||||
values: [['Timestamp', 'Level', 'Message', 'Source']]
|
||||
}
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('Échec nettoyage Google Sheets: ' + error.message, 'WARNING'); // Using logSh instead of console.warn
|
||||
}
|
||||
}
|
||||
|
||||
// ============= VALIDATION PRINCIPALE - IDENTIQUE =============
|
||||
|
||||
function validateWorkflowIntegrity(elements, generatedContent, finalXML, csvData) {
|
||||
logSh('🔍 >>> VALIDATION INTÉGRITÉ WORKFLOW <<<', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
const errors = [];
|
||||
const warnings = [];
|
||||
const stats = {
|
||||
elementsExtracted: elements.length,
|
||||
contentGenerated: Object.keys(generatedContent).length,
|
||||
tagsReplaced: 0,
|
||||
tagsRemaining: 0
|
||||
};
|
||||
|
||||
// TEST 1: Détection tags dupliqués
|
||||
const duplicateCheck = detectDuplicateTags(elements);
|
||||
if (duplicateCheck.hasDuplicates) {
|
||||
errors.push({
|
||||
type: 'DUPLICATE_TAGS',
|
||||
severity: 'HIGH',
|
||||
message: `Tags dupliqués détectés: ${duplicateCheck.duplicates.join(', ')}`,
|
||||
impact: 'Certains contenus ne seront pas remplacés dans le XML final',
|
||||
suggestion: 'Vérifier le template XML pour corriger la structure'
|
||||
});
|
||||
}
|
||||
|
||||
// TEST 2: Cohérence éléments extraits vs générés
|
||||
const missingGeneration = elements.filter(el => !generatedContent[el.originalTag]);
|
||||
if (missingGeneration.length > 0) {
|
||||
errors.push({
|
||||
type: 'MISSING_GENERATION',
|
||||
severity: 'HIGH',
|
||||
message: `${missingGeneration.length} éléments extraits mais non générés`,
|
||||
details: missingGeneration.map(el => el.originalTag),
|
||||
impact: 'Contenu incomplet dans le XML final'
|
||||
});
|
||||
}
|
||||
|
||||
// TEST 3: Tags non remplacés dans XML final
|
||||
const remainingTags = (finalXML.match(/\|[^|]*\|/g) || []);
|
||||
stats.tagsRemaining = remainingTags.length;
|
||||
|
||||
if (remainingTags.length > 0) {
|
||||
errors.push({
|
||||
type: 'UNREPLACED_TAGS',
|
||||
severity: 'HIGH',
|
||||
message: `${remainingTags.length} tags non remplacés dans le XML final`,
|
||||
details: remainingTags.slice(0, 5),
|
||||
impact: 'XML final contient des placeholders non remplacés'
|
||||
});
|
||||
}
|
||||
|
||||
// TEST 4: Variables CSV manquantes
|
||||
const missingVars = detectMissingCSVVariables(csvData);
|
||||
if (missingVars.length > 0) {
|
||||
warnings.push({
|
||||
type: 'MISSING_CSV_VARIABLES',
|
||||
severity: 'MEDIUM',
|
||||
message: `Variables CSV manquantes: ${missingVars.join(', ')}`,
|
||||
impact: 'Système de génération de mots-clés automatique activé'
|
||||
});
|
||||
}
|
||||
|
||||
// TEST 5: Qualité génération IA
|
||||
const generationQuality = assessGenerationQuality(generatedContent);
|
||||
if (generationQuality.errorRate > 0.1) {
|
||||
warnings.push({
|
||||
type: 'GENERATION_QUALITY',
|
||||
severity: 'MEDIUM',
|
||||
message: `${(generationQuality.errorRate * 100).toFixed(1)}% d'erreurs de génération IA`,
|
||||
impact: 'Qualité du contenu potentiellement dégradée'
|
||||
});
|
||||
}
|
||||
|
||||
// CALCUL STATS FINALES
|
||||
stats.tagsReplaced = elements.length - remainingTags.length;
|
||||
stats.successRate = stats.elementsExtracted > 0 ?
|
||||
((stats.tagsReplaced / elements.length) * 100).toFixed(1) : '100';
|
||||
|
||||
const report = {
|
||||
timestamp: new Date().toISOString(),
|
||||
csvData: { mc0: csvData.mc0, t0: csvData.t0 },
|
||||
stats: stats,
|
||||
errors: errors,
|
||||
warnings: warnings,
|
||||
status: errors.length === 0 ? 'SUCCESS' : 'ERROR'
|
||||
};
|
||||
|
||||
const logLevel = report.status === 'SUCCESS' ? 'INFO' : 'ERROR';
|
||||
logSh(`✅ Validation terminée: ${report.status} (${errors.length} erreurs, ${warnings.length} warnings)`, 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
// ENVOYER RAPPORT SI ERREURS (async en arrière-plan)
|
||||
if (errors.length > 0 || warnings.length > 2) {
|
||||
sendErrorReport(report).catch(err => {
|
||||
logSh('Erreur envoi rapport: ' + err.message, 'ERROR'); // Using logSh instead of console.error
|
||||
});
|
||||
}
|
||||
|
||||
return report;
|
||||
}
|
||||
|
||||
// ============= HELPERS - IDENTIQUES =============
|
||||
|
||||
function detectDuplicateTags(elements) {
|
||||
const tagCounts = {};
|
||||
const duplicates = [];
|
||||
|
||||
elements.forEach(element => {
|
||||
const tag = element.originalTag;
|
||||
tagCounts[tag] = (tagCounts[tag] || 0) + 1;
|
||||
|
||||
if (tagCounts[tag] === 2) {
|
||||
duplicates.push(tag);
|
||||
logSh(`❌ DUPLICATE détecté: ${tag}`, 'ERROR'); // Using logSh instead of console.error
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
hasDuplicates: duplicates.length > 0,
|
||||
duplicates: duplicates,
|
||||
counts: tagCounts
|
||||
};
|
||||
}
|
||||
|
||||
function detectMissingCSVVariables(csvData) {
|
||||
const missing = [];
|
||||
|
||||
if (!csvData.mcPlus1 || csvData.mcPlus1.split(',').length < 4) {
|
||||
missing.push('MC+1 (insuffisant)');
|
||||
}
|
||||
if (!csvData.tPlus1 || csvData.tPlus1.split(',').length < 4) {
|
||||
missing.push('T+1 (insuffisant)');
|
||||
}
|
||||
if (!csvData.lPlus1 || csvData.lPlus1.split(',').length < 4) {
|
||||
missing.push('L+1 (insuffisant)');
|
||||
}
|
||||
|
||||
return missing;
|
||||
}
|
||||
|
||||
function assessGenerationQuality(generatedContent) {
|
||||
let errorCount = 0;
|
||||
let totalCount = Object.keys(generatedContent).length;
|
||||
|
||||
Object.values(generatedContent).forEach(content => {
|
||||
if (content && (
|
||||
content.includes('[ERREUR') ||
|
||||
content.includes('ERROR') ||
|
||||
content.length < 10
|
||||
)) {
|
||||
errorCount++;
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
errorRate: totalCount > 0 ? errorCount / totalCount : 0,
|
||||
totalGenerated: totalCount,
|
||||
errorsFound: errorCount
|
||||
};
|
||||
}
|
||||
|
||||
// 🔄 NODE.JS : Email avec nodemailer (remplace MailApp)
|
||||
async function sendErrorReport(report) {
|
||||
try {
|
||||
logSh('📧 Envoi rapport d\'erreur par email...', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
// Configuration nodemailer (Gmail par exemple)
|
||||
const transporter = nodemailer.createTransport({
|
||||
service: 'gmail',
|
||||
auth: {
|
||||
user: process.env.EMAIL_USER, // 'your-email@gmail.com'
|
||||
pass: process.env.EMAIL_APP_PASSWORD // App password Google
|
||||
}
|
||||
});
|
||||
|
||||
const subject = `Erreur Workflow SEO Node.js - ${report.status} - ${report.csvData.mc0}`;
|
||||
const htmlBody = createHTMLReport(report);
|
||||
|
||||
const mailOptions = {
|
||||
from: process.env.EMAIL_USER,
|
||||
to: 'alexistrouve.pro@gmail.com',
|
||||
subject: subject,
|
||||
html: htmlBody,
|
||||
attachments: [{
|
||||
filename: `error-report-${Date.now()}.json`,
|
||||
content: JSON.stringify(report, null, 2),
|
||||
contentType: 'application/json'
|
||||
}]
|
||||
};
|
||||
|
||||
await transporter.sendMail(mailOptions);
|
||||
logSh('✅ Rapport d\'erreur envoyé par email', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Échec envoi email: ${error.message}`, 'ERROR'); // Using logSh instead of console.error
|
||||
}
|
||||
}
|
||||
|
||||
// ============= HTML REPORT - IDENTIQUE =============
|
||||
|
||||
function createHTMLReport(report) {
|
||||
const statusColor = report.status === 'SUCCESS' ? '#28a745' : '#dc3545';
|
||||
|
||||
let html = `
|
||||
<div style="font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto;">
|
||||
<h1 style="color: ${statusColor};">Rapport Workflow SEO Automatisé (Node.js)</h1>
|
||||
|
||||
<div style="background: #f8f9fa; padding: 15px; border-radius: 5px; margin: 20px 0;">
|
||||
<h2>Résumé Exécutif</h2>
|
||||
<p><strong>Statut:</strong> <span style="color: ${statusColor};">${report.status}</span></p>
|
||||
<p><strong>Article:</strong> ${report.csvData.t0}</p>
|
||||
<p><strong>Mot-clé:</strong> ${report.csvData.mc0}</p>
|
||||
<p><strong>Taux de réussite:</strong> ${report.stats.successRate}%</p>
|
||||
<p><strong>Timestamp:</strong> ${report.timestamp}</p>
|
||||
<p><strong>Plateforme:</strong> Node.js Server</p>
|
||||
</div>`;
|
||||
|
||||
if (report.errors.length > 0) {
|
||||
html += `<div style="background: #f8d7da; padding: 15px; border-radius: 5px; margin: 20px 0;">
|
||||
<h2>Erreurs Critiques (${report.errors.length})</h2>`;
|
||||
|
||||
report.errors.forEach((error, i) => {
|
||||
html += `
|
||||
<div style="margin: 10px 0; padding: 10px; border-left: 3px solid #dc3545;">
|
||||
<h4>${i + 1}. ${error.type}</h4>
|
||||
<p><strong>Message:</strong> ${error.message}</p>
|
||||
<p><strong>Impact:</strong> ${error.impact}</p>
|
||||
${error.suggestion ? `<p><strong>Solution:</strong> ${error.suggestion}</p>` : ''}
|
||||
</div>`;
|
||||
});
|
||||
|
||||
html += `</div>`;
|
||||
}
|
||||
|
||||
if (report.warnings.length > 0) {
|
||||
html += `<div style="background: #fff3cd; padding: 15px; border-radius: 5px; margin: 20px 0;">
|
||||
<h2>Avertissements (${report.warnings.length})</h2>`;
|
||||
|
||||
report.warnings.forEach((warning, i) => {
|
||||
html += `
|
||||
<div style="margin: 10px 0; padding: 10px; border-left: 3px solid #ffc107;">
|
||||
<h4>${i + 1}. ${warning.type}</h4>
|
||||
<p>${warning.message}</p>
|
||||
</div>`;
|
||||
});
|
||||
|
||||
html += `</div>`;
|
||||
}
|
||||
|
||||
html += `
|
||||
<div style="background: #e9ecef; padding: 15px; border-radius: 5px; margin: 20px 0;">
|
||||
<h2>Statistiques Détaillées</h2>
|
||||
<ul>
|
||||
<li>Éléments extraits: ${report.stats.elementsExtracted}</li>
|
||||
<li>Contenus générés: ${report.stats.contentGenerated}</li>
|
||||
<li>Tags remplacés: ${report.stats.tagsReplaced}</li>
|
||||
<li>Tags restants: ${report.stats.tagsRemaining}</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
<div style="background: #d1ecf1; padding: 15px; border-radius: 5px; margin: 20px 0;">
|
||||
<h2>Informations Système</h2>
|
||||
<ul>
|
||||
<li>Plateforme: Node.js</li>
|
||||
<li>Version: ${process.version}</li>
|
||||
<li>Mémoire: ${Math.round(process.memoryUsage().heapUsed / 1024 / 1024)}MB</li>
|
||||
<li>Uptime: ${Math.round(process.uptime())}s</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>`;
|
||||
|
||||
return html;
|
||||
}
|
||||
|
||||
// 🔄 NODE.JS EXPORTS
|
||||
module.exports = {
|
||||
logSh,
|
||||
cleanLogSheet,
|
||||
validateWorkflowIntegrity,
|
||||
detectDuplicateTags,
|
||||
detectMissingCSVVariables,
|
||||
assessGenerationQuality,
|
||||
sendErrorReport,
|
||||
createHTMLReport
|
||||
};
|
||||
572
lib/LLMManager.js
Normal file
572
lib/LLMManager.js
Normal file
@ -0,0 +1,572 @@
|
||||
// ========================================
|
||||
// FICHIER: LLMManager.js
|
||||
// Description: Hub central pour tous les appels LLM (Version Node.js)
|
||||
// Support: Claude, OpenAI, Gemini, Deepseek, Moonshot, Mistral
|
||||
// ========================================
|
||||
|
||||
const fetch = globalThis.fetch.bind(globalThis);
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
|
||||
// ============= CONFIGURATION CENTRALISÉE =============
|
||||
|
||||
const LLM_CONFIG = {
|
||||
openai: {
|
||||
apiKey: process.env.OPENAI_API_KEY || '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',
|
||||
endpoint: 'https://api.openai.com/v1/chat/completions',
|
||||
model: 'gpt-4o-mini',
|
||||
headers: {
|
||||
'Authorization': 'Bearer {API_KEY}',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
temperature: 0.7,
|
||||
timeout: 300000, // 5 minutes
|
||||
retries: 3
|
||||
},
|
||||
|
||||
claude: {
|
||||
apiKey: process.env.CLAUDE_API_KEY || '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',
|
||||
endpoint: 'https://api.anthropic.com/v1/messages',
|
||||
model: 'claude-sonnet-4-20250514',
|
||||
headers: {
|
||||
'x-api-key': '{API_KEY}',
|
||||
'Content-Type': 'application/json',
|
||||
'anthropic-version': '2023-06-01'
|
||||
},
|
||||
temperature: 0.7,
|
||||
timeout: 300000, // 5 minutes
|
||||
retries: 6
|
||||
},
|
||||
|
||||
gemini: {
|
||||
apiKey: process.env.GEMINI_API_KEY || 'AIzaSyAMzmIGbW5nJlBG5Qyr35sdjb3U2bIBtoE',
|
||||
endpoint: 'https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent',
|
||||
model: 'gemini-2.5-flash',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
temperature: 0.7,
|
||||
maxTokens: 6000,
|
||||
timeout: 300000, // 5 minutes
|
||||
retries: 3
|
||||
},
|
||||
|
||||
deepseek: {
|
||||
apiKey: process.env.DEEPSEEK_API_KEY || 'sk-6e02bc9513884bb8b92b9920524e17b5',
|
||||
endpoint: 'https://api.deepseek.com/v1/chat/completions',
|
||||
model: 'deepseek-chat',
|
||||
headers: {
|
||||
'Authorization': 'Bearer {API_KEY}',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
temperature: 0.7,
|
||||
timeout: 300000, // 5 minutes
|
||||
retries: 3
|
||||
},
|
||||
|
||||
moonshot: {
|
||||
apiKey: process.env.MOONSHOT_API_KEY || 'sk-zU9gyNkux2zcsj61cdKfztuP1Jozr6lFJ9viUJRPD8p8owhL',
|
||||
endpoint: 'https://api.moonshot.ai/v1/chat/completions',
|
||||
model: 'moonshot-v1-32k',
|
||||
headers: {
|
||||
'Authorization': 'Bearer {API_KEY}',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
temperature: 0.7,
|
||||
timeout: 300000, // 5 minutes
|
||||
retries: 3
|
||||
},
|
||||
|
||||
mistral: {
|
||||
apiKey: process.env.MISTRAL_API_KEY || 'wESikMCIuixajSH8WHCiOV2z5sevgmVF',
|
||||
endpoint: 'https://api.mistral.ai/v1/chat/completions',
|
||||
model: 'mistral-small-latest',
|
||||
headers: {
|
||||
'Authorization': 'Bearer {API_KEY}',
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
max_tokens: 5000,
|
||||
temperature: 0.7,
|
||||
timeout: 300000, // 5 minutes
|
||||
retries: 3
|
||||
}
|
||||
};
|
||||
|
||||
// ============= HELPER FUNCTIONS =============
|
||||
|
||||
const sleep = (ms) => new Promise(resolve => setTimeout(resolve, ms));
|
||||
|
||||
// ============= INTERFACE UNIVERSELLE =============
|
||||
|
||||
/**
|
||||
* Fonction principale pour appeler n'importe quel LLM
|
||||
* @param {string} llmProvider - claude|openai|gemini|deepseek|moonshot|mistral
|
||||
* @param {string} prompt - Le prompt à envoyer
|
||||
* @param {object} options - Options personnalisées (température, tokens, etc.)
|
||||
* @param {object} personality - Personnalité pour contexte système
|
||||
* @returns {Promise<string>} - Réponse générée
|
||||
*/
|
||||
async function callLLM(llmProvider, prompt, options = {}, personality = null) {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// Vérifier si le provider existe
|
||||
if (!LLM_CONFIG[llmProvider]) {
|
||||
throw new Error(`Provider LLM inconnu: ${llmProvider}`);
|
||||
}
|
||||
|
||||
// Vérifier si l'API key est configurée
|
||||
const config = LLM_CONFIG[llmProvider];
|
||||
if (!config.apiKey || config.apiKey.startsWith('VOTRE_CLE_')) {
|
||||
throw new Error(`Clé API manquante pour ${llmProvider}`);
|
||||
}
|
||||
|
||||
logSh(`🤖 Appel LLM: ${llmProvider.toUpperCase()} (${config.model}) | Personnalité: ${personality?.nom || 'aucune'}`, 'DEBUG');
|
||||
|
||||
// 📢 AFFICHAGE PROMPT COMPLET POUR DEBUG AVEC INFO IA
|
||||
logSh(`\n🔍 ===== PROMPT ENVOYÉ À ${llmProvider.toUpperCase()} (${config.model}) | PERSONNALITÉ: ${personality?.nom || 'AUCUNE'} =====`, 'PROMPT');
|
||||
logSh(prompt, 'PROMPT');
|
||||
logSh(`===== FIN PROMPT ${llmProvider.toUpperCase()} (${personality?.nom || 'AUCUNE'}) =====\n`, 'PROMPT');
|
||||
|
||||
// Préparer la requête selon le provider
|
||||
const requestData = buildRequestData(llmProvider, prompt, options, personality);
|
||||
|
||||
// Effectuer l'appel avec retry logic
|
||||
const response = await callWithRetry(llmProvider, requestData, config);
|
||||
|
||||
// Parser la réponse selon le format du provider
|
||||
const content = parseResponse(llmProvider, response);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
logSh(`✅ ${llmProvider.toUpperCase()} (${personality?.nom || 'sans personnalité'}) réponse en ${duration}ms: "${content.substring(0, 150)}${content.length > 150 ? '...' : ''}"`, 'INFO');
|
||||
|
||||
// Enregistrer les stats d'usage
|
||||
await recordUsageStats(llmProvider, prompt.length, content.length, duration);
|
||||
|
||||
return content;
|
||||
|
||||
} catch (error) {
|
||||
const duration = Date.now() - startTime;
|
||||
logSh(`❌ Erreur ${llmProvider.toUpperCase()} (${personality?.nom || 'sans personnalité'}): ${error.toString()}`, 'ERROR');
|
||||
|
||||
// Enregistrer l'échec
|
||||
await recordUsageStats(llmProvider, prompt.length, 0, duration, error.toString());
|
||||
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// ============= CONSTRUCTION DES REQUÊTES =============
|
||||
|
||||
function buildRequestData(provider, prompt, options, personality) {
|
||||
const config = LLM_CONFIG[provider];
|
||||
const temperature = options.temperature || config.temperature;
|
||||
const maxTokens = options.maxTokens || config.maxTokens;
|
||||
|
||||
// Construire le système prompt si personnalité fournie
|
||||
const systemPrompt = personality ?
|
||||
`Tu es ${personality.nom}. ${personality.description}. Style: ${personality.style}` :
|
||||
'Tu es un assistant expert.';
|
||||
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
case 'deepseek':
|
||||
case 'moonshot':
|
||||
case 'mistral':
|
||||
return {
|
||||
model: config.model,
|
||||
messages: [
|
||||
{ role: 'system', content: systemPrompt },
|
||||
{ role: 'user', content: prompt }
|
||||
],
|
||||
max_tokens: maxTokens,
|
||||
temperature: temperature,
|
||||
stream: false
|
||||
};
|
||||
|
||||
case 'claude':
|
||||
return {
|
||||
model: config.model,
|
||||
max_tokens: maxTokens,
|
||||
temperature: temperature,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{ role: 'user', content: prompt }
|
||||
]
|
||||
};
|
||||
|
||||
case 'gemini':
|
||||
return {
|
||||
contents: [{
|
||||
parts: [{
|
||||
text: `${systemPrompt}\n\n${prompt}`
|
||||
}]
|
||||
}],
|
||||
generationConfig: {
|
||||
temperature: temperature,
|
||||
maxOutputTokens: maxTokens
|
||||
}
|
||||
};
|
||||
|
||||
default:
|
||||
throw new Error(`Format de requête non supporté pour ${provider}`);
|
||||
}
|
||||
}
|
||||
|
||||
// ============= APPELS AVEC RETRY =============
|
||||
|
||||
async function callWithRetry(provider, requestData, config) {
|
||||
let lastError;
|
||||
|
||||
for (let attempt = 1; attempt <= config.retries; attempt++) {
|
||||
try {
|
||||
logSh(`🔄 Tentative ${attempt}/${config.retries} pour ${provider.toUpperCase()}`, 'DEBUG');
|
||||
|
||||
// Préparer les headers avec la clé API
|
||||
const headers = {};
|
||||
Object.keys(config.headers).forEach(key => {
|
||||
headers[key] = config.headers[key].replace('{API_KEY}', config.apiKey);
|
||||
});
|
||||
|
||||
// URL avec clé API pour Gemini (cas spécial)
|
||||
let url = config.endpoint;
|
||||
if (provider === 'gemini') {
|
||||
url += `?key=${config.apiKey}`;
|
||||
}
|
||||
|
||||
const options = {
|
||||
method: 'POST',
|
||||
headers: headers,
|
||||
body: JSON.stringify(requestData),
|
||||
timeout: config.timeout
|
||||
};
|
||||
|
||||
const response = await fetch(url, options);
|
||||
const responseText = await response.text();
|
||||
|
||||
if (response.ok) {
|
||||
return JSON.parse(responseText);
|
||||
} else if (response.status === 429) {
|
||||
// Rate limiting - attendre plus longtemps
|
||||
const waitTime = Math.pow(2, attempt) * 1000; // Exponential backoff
|
||||
logSh(`⏳ Rate limit ${provider.toUpperCase()}, attente ${waitTime}ms`, 'WARNING');
|
||||
await sleep(waitTime);
|
||||
continue;
|
||||
} else {
|
||||
throw new Error(`HTTP ${response.status}: ${responseText}`);
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
|
||||
if (attempt < config.retries) {
|
||||
const waitTime = 1000 * attempt;
|
||||
logSh(`⚠ Erreur tentative ${attempt}: ${error.toString()}, retry dans ${waitTime}ms`, 'WARNING');
|
||||
await sleep(waitTime);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(`Échec après ${config.retries} tentatives: ${lastError.toString()}`);
|
||||
}
|
||||
|
||||
// ============= PARSING DES RÉPONSES =============
|
||||
|
||||
function parseResponse(provider, responseData) {
|
||||
try {
|
||||
switch (provider) {
|
||||
case 'openai':
|
||||
case 'deepseek':
|
||||
case 'moonshot':
|
||||
case 'mistral':
|
||||
return responseData.choices[0].message.content.trim();
|
||||
|
||||
case 'claude':
|
||||
return responseData.content[0].text.trim();
|
||||
|
||||
case 'gemini':
|
||||
const candidate = responseData.candidates[0];
|
||||
|
||||
// Vérifications multiples pour Gemini 2.5
|
||||
if (candidate && candidate.content && candidate.content.parts && candidate.content.parts[0] && candidate.content.parts[0].text) {
|
||||
return candidate.content.parts[0].text.trim();
|
||||
} else if (candidate && candidate.text) {
|
||||
return candidate.text.trim();
|
||||
} else if (candidate && candidate.content && candidate.content.text) {
|
||||
return candidate.content.text.trim();
|
||||
} else {
|
||||
// Debug : logger la structure complète
|
||||
logSh('Gemini structure complète: ' + JSON.stringify(responseData), 'DEBUG');
|
||||
return '[Gemini: pas de texte généré - problème modèle]';
|
||||
}
|
||||
default:
|
||||
throw new Error(`Parser non supporté pour ${provider}`);
|
||||
}
|
||||
} catch (error) {
|
||||
logSh(`❌ Erreur parsing ${provider}: ${error.toString()}`, 'ERROR');
|
||||
logSh(`Response brute: ${JSON.stringify(responseData)}`, 'DEBUG');
|
||||
throw new Error(`Impossible de parser la réponse ${provider}: ${error.toString()}`);
|
||||
}
|
||||
}
|
||||
|
||||
// ============= GESTION DES STATISTIQUES =============
|
||||
|
||||
async function recordUsageStats(provider, promptTokens, responseTokens, duration, error = null) {
|
||||
try {
|
||||
// TODO: Adapter selon votre système de stockage Node.js
|
||||
// Peut être une base de données, un fichier, MongoDB, etc.
|
||||
const statsData = {
|
||||
timestamp: new Date(),
|
||||
provider: provider,
|
||||
model: LLM_CONFIG[provider].model,
|
||||
promptTokens: promptTokens,
|
||||
responseTokens: responseTokens,
|
||||
duration: duration,
|
||||
error: error || ''
|
||||
};
|
||||
|
||||
// Exemple: log vers console ou fichier
|
||||
logSh(`📊 Stats: ${JSON.stringify(statsData)}`, 'DEBUG');
|
||||
|
||||
// TODO: Implémenter sauvegarde réelle (DB, fichier, etc.)
|
||||
|
||||
} catch (statsError) {
|
||||
// Ne pas faire planter le workflow si les stats échouent
|
||||
logSh(`⚠ Erreur enregistrement stats: ${statsError.toString()}`, 'WARNING');
|
||||
}
|
||||
}
|
||||
|
||||
// ============= FONCTIONS UTILITAIRES =============
|
||||
|
||||
/**
|
||||
* Tester la connectivité de tous les LLMs
|
||||
*/
|
||||
async function testAllLLMs() {
|
||||
const testPrompt = "Dis bonjour en 5 mots maximum.";
|
||||
const results = {};
|
||||
|
||||
const allProviders = Object.keys(LLM_CONFIG);
|
||||
|
||||
for (const provider of allProviders) {
|
||||
try {
|
||||
logSh(`🧪 Test ${provider}...`, 'INFO');
|
||||
|
||||
const response = await callLLM(provider, testPrompt);
|
||||
results[provider] = {
|
||||
status: 'SUCCESS',
|
||||
response: response,
|
||||
model: LLM_CONFIG[provider].model
|
||||
};
|
||||
|
||||
} catch (error) {
|
||||
results[provider] = {
|
||||
status: 'ERROR',
|
||||
error: error.toString(),
|
||||
model: LLM_CONFIG[provider].model
|
||||
};
|
||||
}
|
||||
|
||||
// Petit délai entre tests
|
||||
await sleep(500);
|
||||
}
|
||||
|
||||
logSh(`📊 Tests terminés: ${JSON.stringify(results, null, 2)}`, 'INFO');
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtenir les providers disponibles (avec clés API valides)
|
||||
*/
|
||||
function getAvailableProviders() {
|
||||
const available = [];
|
||||
|
||||
Object.keys(LLM_CONFIG).forEach(provider => {
|
||||
const config = LLM_CONFIG[provider];
|
||||
if (config.apiKey && !config.apiKey.startsWith('VOTRE_CLE_')) {
|
||||
available.push(provider);
|
||||
}
|
||||
});
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtenir des statistiques d'usage par provider
|
||||
*/
|
||||
async function getUsageStats() {
|
||||
try {
|
||||
// TODO: Adapter selon votre système de stockage
|
||||
// Pour l'instant retourne un message par défaut
|
||||
return { message: 'Statistiques non implémentées en Node.js' };
|
||||
|
||||
} catch (error) {
|
||||
return { error: error.toString() };
|
||||
}
|
||||
}
|
||||
|
||||
// ============= MIGRATION DE L'ANCIEN CODE =============
|
||||
|
||||
/**
|
||||
* Fonction de compatibilité pour remplacer votre ancien callOpenAI()
|
||||
* Maintient la même signature pour ne pas casser votre code existant
|
||||
*/
|
||||
async function callOpenAI(prompt, personality) {
|
||||
return await callLLM('openai', prompt, {}, personality);
|
||||
}
|
||||
|
||||
// ============= EXPORTS POUR TESTS =============
|
||||
|
||||
/**
|
||||
* Fonction de test rapide
|
||||
*/
|
||||
async function testLLMManager() {
|
||||
logSh('🚀 Test du LLM Manager Node.js...', 'INFO');
|
||||
|
||||
// Test des providers disponibles
|
||||
const available = getAvailableProviders();
|
||||
logSh('Providers disponibles: ' + available.join(', ') + ' (' + available.length + '/6)', 'INFO');
|
||||
|
||||
// Test d'appel simple sur chaque provider disponible
|
||||
for (const provider of available) {
|
||||
try {
|
||||
logSh(`🧪 Test ${provider}...`, 'DEBUG');
|
||||
const startTime = Date.now();
|
||||
|
||||
const response = await callLLM(provider, 'Dis juste "Test OK"');
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
logSh(`✅ Test ${provider} réussi: "${response}" (${duration}ms)`, 'INFO');
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ Test ${provider} échoué: ${error.toString()}`, 'ERROR');
|
||||
}
|
||||
|
||||
// Petit délai pour éviter rate limits
|
||||
await sleep(500);
|
||||
}
|
||||
|
||||
// Test spécifique OpenAI (compatibilité avec ancien code)
|
||||
try {
|
||||
logSh('🎯 Test spécifique OpenAI (compatibilité)...', 'DEBUG');
|
||||
const response = await callLLM('openai', 'Dis juste "Test OK"');
|
||||
logSh('✅ Test OpenAI compatibilité: ' + response, 'INFO');
|
||||
} catch (error) {
|
||||
logSh('❌ Test OpenAI compatibilité échoué: ' + error.toString(), 'ERROR');
|
||||
}
|
||||
|
||||
// Afficher les stats d'usage
|
||||
try {
|
||||
logSh('📊 Récupération statistiques d\'usage...', 'DEBUG');
|
||||
const stats = await getUsageStats();
|
||||
|
||||
if (stats.error) {
|
||||
logSh('⚠ Erreur récupération stats: ' + stats.error, 'WARNING');
|
||||
} else if (stats.message) {
|
||||
logSh('📊 Stats: ' + stats.message, 'INFO');
|
||||
} else {
|
||||
// Formatter les stats pour les logs
|
||||
Object.keys(stats).forEach(provider => {
|
||||
const s = stats[provider];
|
||||
logSh(`📈 ${provider}: ${s.calls} appels, ${s.successRate}% succès, ${s.avgDuration}ms moyen`, 'INFO');
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur lors de la récupération des stats: ' + error.toString(), 'ERROR');
|
||||
}
|
||||
|
||||
// Résumé final
|
||||
const workingCount = available.length;
|
||||
const totalProviders = Object.keys(LLM_CONFIG).length;
|
||||
|
||||
if (workingCount === totalProviders) {
|
||||
logSh(`✅ Test LLM Manager COMPLET: ${workingCount}/${totalProviders} providers opérationnels`, 'INFO');
|
||||
} else if (workingCount >= 2) {
|
||||
logSh(`✅ Test LLM Manager PARTIEL: ${workingCount}/${totalProviders} providers opérationnels (suffisant pour DNA Mixing)`, 'INFO');
|
||||
} else {
|
||||
logSh(`❌ Test LLM Manager INSUFFISANT: ${workingCount}/${totalProviders} providers opérationnels (minimum 2 requis)`, 'ERROR');
|
||||
}
|
||||
|
||||
logSh('🏁 Test LLM Manager terminé', 'INFO');
|
||||
}
|
||||
|
||||
/**
|
||||
* Version complète avec test de tous les providers (même non configurés)
|
||||
*/
|
||||
async function testLLMManagerComplete() {
|
||||
logSh('🚀 Test COMPLET du LLM Manager (tous providers)...', 'INFO');
|
||||
|
||||
const allProviders = Object.keys(LLM_CONFIG);
|
||||
logSh(`Providers configurés: ${allProviders.join(', ')}`, 'INFO');
|
||||
|
||||
const results = {
|
||||
configured: 0,
|
||||
working: 0,
|
||||
failed: 0
|
||||
};
|
||||
|
||||
for (const provider of allProviders) {
|
||||
const config = LLM_CONFIG[provider];
|
||||
|
||||
// Vérifier si configuré
|
||||
if (!config.apiKey || config.apiKey.startsWith('VOTRE_CLE_')) {
|
||||
logSh(`⚙️ ${provider}: NON CONFIGURÉ (clé API manquante)`, 'WARNING');
|
||||
continue;
|
||||
}
|
||||
|
||||
results.configured++;
|
||||
|
||||
try {
|
||||
logSh(`🧪 Test ${provider} (${config.model})...`, 'DEBUG');
|
||||
const startTime = Date.now();
|
||||
|
||||
const response = await callLLM(provider, 'Réponds "OK" seulement.', { maxTokens: 100 });
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
results.working++;
|
||||
logSh(`✅ ${provider}: "${response.trim()}" (${duration}ms)`, 'INFO');
|
||||
|
||||
} catch (error) {
|
||||
results.failed++;
|
||||
logSh(`❌ ${provider}: ${error.toString()}`, 'ERROR');
|
||||
}
|
||||
|
||||
// Délai entre tests
|
||||
await sleep(700);
|
||||
}
|
||||
|
||||
// Résumé final complet
|
||||
logSh(`📊 RÉSUMÉ FINAL:`, 'INFO');
|
||||
logSh(` • Providers total: ${allProviders.length}`, 'INFO');
|
||||
logSh(` • Configurés: ${results.configured}`, 'INFO');
|
||||
logSh(` • Fonctionnels: ${results.working}`, 'INFO');
|
||||
logSh(` • En échec: ${results.failed}`, 'INFO');
|
||||
|
||||
const status = results.working >= 4 ? 'EXCELLENT' :
|
||||
results.working >= 2 ? 'BON' : 'INSUFFISANT';
|
||||
|
||||
logSh(`🏆 STATUS: ${status} (${results.working} LLMs opérationnels)`,
|
||||
status === 'INSUFFISANT' ? 'ERROR' : 'INFO');
|
||||
|
||||
logSh('🏁 Test LLM Manager COMPLET terminé', 'INFO');
|
||||
|
||||
return {
|
||||
total: allProviders.length,
|
||||
configured: results.configured,
|
||||
working: results.working,
|
||||
failed: results.failed,
|
||||
status: status
|
||||
};
|
||||
}
|
||||
|
||||
// ============= EXPORTS MODULE =============
|
||||
|
||||
module.exports = {
|
||||
callLLM,
|
||||
callOpenAI,
|
||||
testAllLLMs,
|
||||
getAvailableProviders,
|
||||
getUsageStats,
|
||||
testLLMManager,
|
||||
testLLMManagerComplete,
|
||||
LLM_CONFIG
|
||||
};
|
||||
|
||||
379
lib/Main.js
Normal file
379
lib/Main.js
Normal file
@ -0,0 +1,379 @@
|
||||
// ========================================
|
||||
// FICHIER: lib/main.js - CONVERTI POUR NODE.JS
|
||||
// RESPONSABILITÉ: COEUR DU WORKFLOW DE GÉNÉRATION
|
||||
// ========================================
|
||||
|
||||
// 🔧 CONFIGURATION ENVIRONNEMENT
|
||||
require('dotenv').config({ path: require('path').join(__dirname, '..', '.env') });
|
||||
|
||||
|
||||
// 🔄 IMPORTS NODE.JS (remplace les dépendances Apps Script)
|
||||
const { getBrainConfig } = require('./BrainConfig');
|
||||
const { extractElements, buildSmartHierarchy } = require('./ElementExtraction');
|
||||
const { generateMissingKeywords } = require('./MissingKeywords');
|
||||
const { generateWithContext } = require('./ContentGeneration');
|
||||
const { injectGeneratedContent, cleanStrongTags } = require('./ContentAssembly');
|
||||
const { validateWorkflowIntegrity, logSh } = require('./ErrorReporting');
|
||||
const { saveGeneratedArticleOrganic } = require('./ArticleStorage');
|
||||
const { tracer } = require('./trace.js');
|
||||
const { fetchXMLFromDigitalOcean } = require('./DigitalOceanWorkflow');
|
||||
const { spawn } = require('child_process');
|
||||
const path = require('path');
|
||||
|
||||
// Variable pour éviter de relancer Edge plusieurs fois
|
||||
let logViewerLaunched = false;
|
||||
|
||||
/**
|
||||
* Lancer le log viewer dans Edge
|
||||
*/
|
||||
function launchLogViewer() {
|
||||
if (logViewerLaunched) return;
|
||||
|
||||
try {
|
||||
const logViewerPath = path.join(__dirname, '..', 'logs-viewer.html');
|
||||
const fileUrl = `file:///${logViewerPath.replace(/\\/g, '/')}`;
|
||||
|
||||
// Lancer Edge avec l'URL du fichier
|
||||
const edgeProcess = spawn('cmd', ['/c', 'start', 'msedge', fileUrl], {
|
||||
detached: true,
|
||||
stdio: 'ignore'
|
||||
});
|
||||
|
||||
edgeProcess.unref();
|
||||
logViewerLaunched = true;
|
||||
|
||||
logSh('🌐 Log viewer ouvert dans Edge', 'INFO');
|
||||
} catch (error) {
|
||||
logSh(`⚠️ Impossible d'ouvrir le log viewer: ${error.message}`, 'WARNING');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* COEUR DU WORKFLOW - Compatible Make.com ET Digital Ocean ET Node.js
|
||||
* @param {object} data - Données du workflow
|
||||
* @param {string} data.xmlTemplate - XML template (base64 encodé)
|
||||
* @param {object} data.csvData - Données CSV ou rowNumber
|
||||
* @param {string} data.source - 'make_com' | 'digital_ocean_autonomous' | 'node_server'
|
||||
*/
|
||||
async function handleFullWorkflow(data) {
|
||||
// Lancer le log viewer au début du workflow
|
||||
launchLogViewer();
|
||||
|
||||
return await tracer.run('Main.handleFullWorkflow()', async () => {
|
||||
await tracer.annotate({ source: data.source || 'node_server', mc0: data.csvData?.mc0 || data.rowNumber });
|
||||
|
||||
// 1. PRÉPARER LES DONNÉES CSV
|
||||
const csvData = await tracer.run('Main.prepareCSVData()', async () => {
|
||||
const result = await prepareCSVData(data);
|
||||
await tracer.event(`CSV préparé: ${result.mc0}`, { csvKeys: Object.keys(result) });
|
||||
return result;
|
||||
}, { rowNumber: data.rowNumber, source: data.source });
|
||||
|
||||
// 2. DÉCODER LE XML TEMPLATE
|
||||
const xmlString = await tracer.run('Main.decodeXMLTemplate()', async () => {
|
||||
const result = decodeXMLTemplate(data.xmlTemplate);
|
||||
await tracer.event(`XML décodé: ${result.length} caractères`);
|
||||
return result;
|
||||
}, { templateLength: data.xmlTemplate?.length });
|
||||
|
||||
// 3. PREPROCESSING XML
|
||||
const processedXML = await tracer.run('Main.preprocessXML()', async () => {
|
||||
const result = preprocessXML(xmlString);
|
||||
await tracer.event('XML préprocessé');
|
||||
global.currentXmlTemplate = result;
|
||||
return result;
|
||||
}, { originalLength: xmlString?.length });
|
||||
|
||||
// 4. EXTRAIRE ÉLÉMENTS
|
||||
const elements = await tracer.run('ElementExtraction.extractElements()', async () => {
|
||||
const result = await extractElements(processedXML, csvData);
|
||||
await tracer.event(`${result.length} éléments extraits`);
|
||||
return result;
|
||||
}, { xmlLength: processedXML?.length, mc0: csvData.mc0 });
|
||||
|
||||
// 5. GÉNÉRER MOTS-CLÉS MANQUANTS
|
||||
const finalElements = await tracer.run('MissingKeywords.generateMissingKeywords()', async () => {
|
||||
const updatedElements = await generateMissingKeywords(elements, csvData);
|
||||
const result = Object.keys(updatedElements).length > 0 ? updatedElements : elements;
|
||||
await tracer.event('Mots-clés manquants traités');
|
||||
return result;
|
||||
}, { elementsCount: elements.length, mc0: csvData.mc0 });
|
||||
|
||||
// 6. CONSTRUIRE HIÉRARCHIE INTELLIGENTE
|
||||
const hierarchy = await tracer.run('ElementExtraction.buildSmartHierarchy()', async () => {
|
||||
const result = await buildSmartHierarchy(finalElements);
|
||||
await tracer.event(`Hiérarchie construite: ${Object.keys(result).length} sections`);
|
||||
return result;
|
||||
}, { finalElementsCount: finalElements.length });
|
||||
|
||||
// 7. 🎯 GÉNÉRATION AVEC SELECTIVE ENHANCEMENT (Phase 2)
|
||||
const generatedContent = await tracer.run('ContentGeneration.generateWithContext()', async () => {
|
||||
const result = await generateWithContext(hierarchy, csvData);
|
||||
await tracer.event(`Contenu généré: ${Object.keys(result).length} éléments`);
|
||||
return result;
|
||||
}, { elementsCount: Object.keys(hierarchy).length, personality: csvData.personality?.nom });
|
||||
|
||||
// 8. ASSEMBLER XML FINAL
|
||||
const finalXML = await tracer.run('ContentAssembly.injectGeneratedContent()', async () => {
|
||||
const result = injectGeneratedContent(processedXML, generatedContent, finalElements);
|
||||
await tracer.event('XML final assemblé');
|
||||
return result;
|
||||
}, { contentPieces: Object.keys(generatedContent).length, elementsCount: finalElements.length });
|
||||
|
||||
// 9. VALIDATION INTÉGRITÉ
|
||||
const validationReport = await tracer.run('ErrorReporting.validateWorkflowIntegrity()', async () => {
|
||||
const result = validateWorkflowIntegrity(finalElements, generatedContent, finalXML, csvData);
|
||||
await tracer.event(`Validation: ${result.status}`);
|
||||
return result;
|
||||
}, { finalXMLLength: finalXML?.length, contentKeys: Object.keys(generatedContent).length });
|
||||
|
||||
// 10. SAUVEGARDE ARTICLE
|
||||
const articleStorage = await tracer.run('Main.saveArticle()', async () => {
|
||||
const result = await saveArticle(finalXML, generatedContent, finalElements, csvData, data.source);
|
||||
if (result) {
|
||||
await tracer.event(`Article sauvé: ID ${result.articleId}`);
|
||||
}
|
||||
return result;
|
||||
}, { source: data.source, mc0: csvData.mc0, elementsCount: finalElements.length });
|
||||
|
||||
// 11. RÉPONSE FINALE
|
||||
const response = await tracer.run('Main.buildWorkflowResponse()', async () => {
|
||||
const result = await buildWorkflowResponse(finalXML, generatedContent, finalElements, csvData, validationReport, articleStorage, data.source);
|
||||
await tracer.event(`Response keys: ${Object.keys(result).join(', ')}`);
|
||||
return result;
|
||||
}, { validationStatus: validationReport?.status, articleId: articleStorage?.articleId });
|
||||
|
||||
return response;
|
||||
}, { source: data.source || 'node_server', rowNumber: data.rowNumber, hasXMLTemplate: !!data.xmlTemplate });
|
||||
}
|
||||
|
||||
// ============= PRÉPARATION DONNÉES =============
|
||||
|
||||
/**
|
||||
* Préparer les données CSV selon la source - ASYNC pour Node.js
|
||||
* RÉCUPÈRE: Google Sheets (données CSV) + Digital Ocean (XML template)
|
||||
*/
|
||||
async function prepareCSVData(data) {
|
||||
if (data.csvData && data.csvData.mc0) {
|
||||
// Données déjà préparées (Digital Ocean ou direct)
|
||||
return data.csvData;
|
||||
} else if (data.rowNumber) {
|
||||
// 1. RÉCUPÉRER DONNÉES CSV depuis Google Sheet (OBLIGATOIRE)
|
||||
await logSh(`🧠 Récupération données CSV ligne ${data.rowNumber}...`, 'INFO');
|
||||
const config = await getBrainConfig(data.rowNumber);
|
||||
if (!config.success) {
|
||||
await logSh('❌ ÉCHEC: Impossible de récupérer les données Google Sheets', 'ERROR');
|
||||
throw new Error('FATAL: Google Sheets inaccessible - arrêt du workflow');
|
||||
}
|
||||
|
||||
// 2. VÉRIFIER XML FILENAME depuis Google Sheet (colonne I)
|
||||
const xmlFileName = config.data.xmlFileName;
|
||||
if (!xmlFileName || xmlFileName.trim() === '') {
|
||||
await logSh('❌ ÉCHEC: Nom fichier XML manquant (colonne I Google Sheets)', 'ERROR');
|
||||
throw new Error('FATAL: XML filename manquant - arrêt du workflow');
|
||||
}
|
||||
|
||||
await logSh(`📋 CSV récupéré: ${config.data.mc0}`, 'INFO');
|
||||
await logSh(`📄 XML filename: ${xmlFileName}`, 'INFO');
|
||||
|
||||
// 3. RÉCUPÉRER XML CONTENT depuis Digital Ocean avec AUTH (OBLIGATOIRE)
|
||||
await logSh(`🌊 Récupération XML template depuis Digital Ocean (avec signature AWS)...`, 'INFO');
|
||||
let xmlContent;
|
||||
try {
|
||||
xmlContent = await fetchXMLFromDigitalOcean(xmlFileName);
|
||||
await logSh(`✅ XML récupéré: ${xmlContent.length} caractères`, 'INFO');
|
||||
} catch (digitalOceanError) {
|
||||
await logSh(`❌ ÉCHEC: Digital Ocean inaccessible - ${digitalOceanError.message}`, 'ERROR');
|
||||
throw new Error(`FATAL: Digital Ocean échec - arrêt du workflow: ${digitalOceanError.message}`);
|
||||
}
|
||||
|
||||
// 4. ENCODER XML pour le workflow (comme Make.com)
|
||||
// Si on a récupéré un fichier XML, l'utiliser. Sinon utiliser le template par défaut déjà dans config.data.xmlTemplate
|
||||
if (xmlContent) {
|
||||
data.xmlTemplate = Buffer.from(xmlContent).toString('base64');
|
||||
await logSh('🔄 XML depuis Digital Ocean encodé base64 pour le workflow', 'DEBUG');
|
||||
} else if (config.data.xmlTemplate) {
|
||||
data.xmlTemplate = Buffer.from(config.data.xmlTemplate).toString('base64');
|
||||
await logSh('🔄 XML template par défaut encodé base64 pour le workflow', 'DEBUG');
|
||||
}
|
||||
|
||||
return config.data;
|
||||
} else {
|
||||
throw new Error('FATAL: Données CSV invalides - rowNumber requis');
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Décoder le XML template - NODE.JS VERSION
|
||||
*/
|
||||
function decodeXMLTemplate(xmlTemplate) {
|
||||
if (!xmlTemplate) {
|
||||
throw new Error('Template XML manquant');
|
||||
}
|
||||
|
||||
// Si le template commence déjà par <?xml, c'est du texte plain
|
||||
if (xmlTemplate.startsWith('<?xml') || xmlTemplate.startsWith('<')) {
|
||||
return xmlTemplate;
|
||||
}
|
||||
|
||||
try {
|
||||
// 🔄 NODE.JS : Tenter base64 uniquement si ce n'est pas déjà du XML
|
||||
const decoded = Buffer.from(xmlTemplate, 'base64').toString('utf8');
|
||||
return decoded;
|
||||
} catch (error) {
|
||||
// Si échec, considérer comme texte plain
|
||||
logSh('🔍 XML pas encodé base64, utilisation directe', 'DEBUG'); // Using logSh instead of console.log
|
||||
return xmlTemplate;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Preprocessing XML (nettoyage) - IDENTIQUE
|
||||
*/
|
||||
function preprocessXML(xmlString) {
|
||||
let processed = xmlString;
|
||||
|
||||
// Nettoyer balises <strong>
|
||||
processed = cleanStrongTags(processed);
|
||||
|
||||
// Autres nettoyages futurs...
|
||||
|
||||
return processed;
|
||||
}
|
||||
|
||||
// ============= SAUVEGARDE =============
|
||||
|
||||
/**
|
||||
* Sauvegarder l'article avec métadonnées source - ASYNC pour Node.js
|
||||
*/
|
||||
async function saveArticle(finalXML, generatedContent, finalElements, csvData, source) {
|
||||
await logSh('💾 Sauvegarde article...', 'INFO');
|
||||
|
||||
const articleData = {
|
||||
xmlContent: finalXML,
|
||||
generatedTexts: generatedContent,
|
||||
elementsGenerated: finalElements.length,
|
||||
originalElements: finalElements
|
||||
};
|
||||
|
||||
const storageConfig = {
|
||||
antiDetectionLevel: 'Selective_Enhancement',
|
||||
llmUsed: 'claude+openai+gemini+mistral',
|
||||
workflowVersion: '2.0-NodeJS', // 🔄 Mise à jour version
|
||||
source: source || 'node_server', // 🔄 Source par défaut
|
||||
enhancementTechniques: [
|
||||
'technical_terms_gpt4',
|
||||
'transitions_gemini',
|
||||
'personality_style_mistral'
|
||||
]
|
||||
};
|
||||
|
||||
try {
|
||||
const articleStorage = await saveGeneratedArticleOrganic(articleData, csvData, storageConfig);
|
||||
await logSh(`✅ Article sauvé: ID ${articleStorage.articleId}`, 'INFO');
|
||||
return articleStorage;
|
||||
} catch (storageError) {
|
||||
await logSh(`⚠️ Erreur sauvegarde: ${storageError.toString()}`, 'WARNING');
|
||||
return null; // Non-bloquant
|
||||
}
|
||||
}
|
||||
|
||||
// ============= RÉPONSE =============
|
||||
|
||||
/**
|
||||
* Construire la réponse finale du workflow - ASYNC pour logSh
|
||||
*/
|
||||
async function buildWorkflowResponse(finalXML, generatedContent, finalElements, csvData, validationReport, articleStorage, source) {
|
||||
const response = {
|
||||
success: true,
|
||||
source: source,
|
||||
xmlContent: finalXML,
|
||||
generatedTexts: generatedContent,
|
||||
elementsGenerated: finalElements.length,
|
||||
personality: csvData.personality?.nom || 'Unknown',
|
||||
csvData: {
|
||||
mc0: csvData.mc0,
|
||||
t0: csvData.t0,
|
||||
personality: csvData.personality?.nom
|
||||
},
|
||||
timestamp: new Date().toISOString(),
|
||||
validationReport: validationReport,
|
||||
articleStorage: articleStorage,
|
||||
|
||||
// NOUVELLES MÉTADONNÉES PHASE 2
|
||||
antiDetectionLevel: 'Selective_Enhancement',
|
||||
llmsUsed: ['claude', 'openai', 'gemini', 'mistral'],
|
||||
enhancementApplied: true,
|
||||
workflowVersion: '2.0-NodeJS', // 🔄 Version mise à jour
|
||||
|
||||
// STATS PERFORMANCE
|
||||
stats: {
|
||||
xmlLength: finalXML.length,
|
||||
contentPieces: Object.keys(generatedContent).length,
|
||||
wordCount: calculateTotalWordCount(generatedContent),
|
||||
validationStatus: validationReport.status
|
||||
}
|
||||
};
|
||||
|
||||
await logSh(`🔍 Response.stats: ${JSON.stringify(response.stats)}`, 'DEBUG');
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
// ============= HELPERS =============
|
||||
|
||||
/**
|
||||
* Calculer nombre total de mots - IDENTIQUE
|
||||
*/
|
||||
function calculateTotalWordCount(generatedContent) {
|
||||
let totalWords = 0;
|
||||
Object.values(generatedContent).forEach(content => {
|
||||
if (content && typeof content === 'string') {
|
||||
totalWords += content.trim().split(/\s+/).length;
|
||||
}
|
||||
});
|
||||
return totalWords;
|
||||
}
|
||||
|
||||
// ============= POINTS D'ENTRÉE SUPPLÉMENTAIRES =============
|
||||
|
||||
/**
|
||||
* Test du workflow principal - ASYNC pour Node.js
|
||||
*/
|
||||
async function testMainWorkflow() {
|
||||
try {
|
||||
const testData = {
|
||||
csvData: {
|
||||
mc0: 'plaque test nodejs',
|
||||
t0: 'Test workflow principal Node.js',
|
||||
personality: { nom: 'Marc', style: 'professionnel' },
|
||||
tMinus1: 'parent test',
|
||||
mcPlus1: 'mot1,mot2,mot3,mot4',
|
||||
tPlus1: 'Titre1,Titre2,Titre3,Titre4'
|
||||
},
|
||||
xmlTemplate: Buffer.from('<?xml version="1.0"?><test>|Test_Element{{T0}}|</test>').toString('base64'),
|
||||
source: 'test_main_nodejs'
|
||||
};
|
||||
|
||||
const result = await handleFullWorkflow(testData);
|
||||
return result;
|
||||
|
||||
} catch (error) {
|
||||
throw error;
|
||||
} finally {
|
||||
tracer.printSummary();
|
||||
}
|
||||
}
|
||||
|
||||
// 🔄 NODE.JS EXPORTS
|
||||
module.exports = {
|
||||
handleFullWorkflow,
|
||||
testMainWorkflow,
|
||||
prepareCSVData,
|
||||
decodeXMLTemplate,
|
||||
preprocessXML,
|
||||
saveArticle,
|
||||
buildWorkflowResponse,
|
||||
calculateTotalWordCount,
|
||||
launchLogViewer
|
||||
};
|
||||
120
lib/ManualTrigger.js
Normal file
120
lib/ManualTrigger.js
Normal file
@ -0,0 +1,120 @@
|
||||
const { logSh } = require('./ErrorReporting'); // Using unified logSh from ErrorReporting
|
||||
|
||||
/**
|
||||
* 🚀 TRIGGER MANUEL - Lit ligne 2 et lance le workflow
|
||||
* Exécute cette fonction depuis l'éditeur Apps Script
|
||||
*/
|
||||
function runWorkflowLigne(numeroLigne = 2) {
|
||||
cleanLogSheet(); // Nettoie les logs pour ce test
|
||||
|
||||
try {
|
||||
logSh('🎬 >>> DÉMARRAGE WORKFLOW MANUEL <<<', 'INFO');
|
||||
|
||||
// 1. LIRE AUTOMATIQUEMENT LA LIGNE INDIQUÉ
|
||||
const csvData = readCSVDataFromRow(numeroLigne);
|
||||
logSh(`✅ Données lues - MC0: ${csvData.mc0}`, 'INFO');
|
||||
logSh(`✅ Titre: ${csvData.t0}`, 'INFO');
|
||||
logSh(`✅ Personnalité: ${csvData.personality.nom}`, 'INFO');
|
||||
|
||||
// 2. XML TEMPLATE SIMPLE POUR TEST (ou lit depuis Digital Ocean si configuré)
|
||||
const xmlTemplate = getXMLTemplateForTest(csvData);
|
||||
logSh(`✅ XML template: ${xmlTemplate.length} caractères`, 'INFO');
|
||||
|
||||
// 3. 🎯 LANCER LE WORKFLOW PRINCIPAL
|
||||
const workflowData = {
|
||||
csvData: csvData,
|
||||
xmlTemplate: Utilities.base64Encode(xmlTemplate),
|
||||
source: 'manuel_ligne2'
|
||||
};
|
||||
|
||||
const result = handleFullWorkflow(workflowData);
|
||||
|
||||
logSh('🏆 === WORKFLOW MANUEL TERMINÉ ===', 'INFO');
|
||||
|
||||
// ← EXTRAIRE LES VRAIES DONNÉES
|
||||
let actualData;
|
||||
if (result && result.getContentText) {
|
||||
// C'est un ContentService, extraire le JSON
|
||||
actualData = JSON.parse(result.getContentText());
|
||||
} else {
|
||||
actualData = result;
|
||||
}
|
||||
|
||||
logSh(`Type result: ${typeof result}`, 'DEBUG');
|
||||
logSh(`Result keys: ${Object.keys(result || {})}`, 'DEBUG');
|
||||
logSh(`ActualData keys: ${Object.keys(actualData || {})}`, 'DEBUG');
|
||||
logSh(`ActualData: ${JSON.stringify(actualData)}`, 'DEBUG');
|
||||
|
||||
if (actualData && actualData.stats) {
|
||||
logSh(`📊 Éléments générés: ${actualData.stats.contentPieces}`, 'INFO');
|
||||
logSh(`📝 Nombre de mots: ${actualData.stats.wordCount}`, 'INFO');
|
||||
} else {
|
||||
logSh('⚠️ Format résultat inattendu', 'WARNING');
|
||||
logSh('ActualData: ' + JSON.stringify(actualData, null, 2), 'DEBUG'); // Using logSh instead of console.log
|
||||
}
|
||||
|
||||
return actualData;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ ERREUR WORKFLOW MANUEL: ${error.toString()}`, 'ERROR');
|
||||
logSh(`Stack: ${error.stack}`, 'ERROR');
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* HELPER - Lire CSV depuis une ligne spécifique
|
||||
*/
|
||||
function readCSVDataFromRow(rowNumber) {
|
||||
const sheetId = '1iA2GvWeUxX-vpnAMfVm3ZMG9LhaC070SdGssEcXAh2c';
|
||||
const spreadsheet = SpreadsheetApp.openById(sheetId);
|
||||
const articlesSheet = spreadsheet.getSheetByName('instructions');
|
||||
|
||||
// Lire la ligne complète (colonnes A à H)
|
||||
const range = articlesSheet.getRange(rowNumber, 1, 1, 9);
|
||||
const [slug, t0, mc0, tMinus1, lMinus1, mcPlus1, tPlus1, lPlus1, xmlFileName] = range.getValues()[0];
|
||||
|
||||
logSh(`📖 Lecture ligne ${rowNumber}: ${slug}`, 'DEBUG');
|
||||
|
||||
// Récupérer personnalités et sélectionner automatiquement
|
||||
const personalitiesSheet = spreadsheet.getSheetByName('Personnalites');
|
||||
const personalities = getPersonalities(personalitiesSheet);
|
||||
const selectedPersonality = selectPersonalityWithAI(mc0, t0, personalities);
|
||||
|
||||
return {
|
||||
rowNumber: rowNumber,
|
||||
slug: slug || 'test-slug',
|
||||
t0: t0 || 'Titre par défaut',
|
||||
mc0: mc0 || 'mot-clé test',
|
||||
tMinus1: tMinus1 || 'parent',
|
||||
lMinus1: lMinus1 || '/parent',
|
||||
mcPlus1: mcPlus1 || 'mot1,mot2,mot3,mot4',
|
||||
tPlus1: tPlus1 || 'Titre1,Titre2,Titre3,Titre4',
|
||||
lPlus1: lPlus1 || '/lien1,/lien2,/lien3,/lien4',
|
||||
personality: selectedPersonality,
|
||||
xmlFileName: xmlFileName ? xmlFileName.toString().trim() : null
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* HELPER - XML Template simple pour test (ou depuis Digital Ocean)
|
||||
*/
|
||||
function getXMLTemplateForTest(csvData) {
|
||||
logSh("csvData.xmlFileName: " + csvData.xmlFileName, 'DEBUG'); // Using logSh instead of console.log
|
||||
|
||||
if (csvData.xmlFileName) {
|
||||
logSh("Tentative Digital Ocean...", 'INFO'); // Using logSh instead of console.log
|
||||
try {
|
||||
return fetchXMLFromDigitalOceanSimple(csvData.xmlFileName);
|
||||
} catch (error) {
|
||||
// ← ENLÈVE LE CATCH SILENCIEUX
|
||||
logSh("Erreur DO: " + error.toString(), 'WARNING'); // Using logSh instead of console.log
|
||||
logSh(`❌ ERREUR DO DÉTAILLÉE: ${error.toString()}`, 'ERROR');
|
||||
|
||||
// Continue sans Digital Ocean
|
||||
}
|
||||
}
|
||||
|
||||
logSh("❌ FATAL: Aucun template XML disponible", 'ERROR');
|
||||
throw new Error("FATAL: Template XML indisponible (Digital Ocean inaccessible + pas de fallback) - arrêt du workflow");
|
||||
}
|
||||
241
lib/MissingKeywords.js
Normal file
241
lib/MissingKeywords.js
Normal file
@ -0,0 +1,241 @@
|
||||
// ========================================
|
||||
// FICHIER: MissingKeywords.js - Version Node.js
|
||||
// Description: Génération automatique des mots-clés manquants
|
||||
// ========================================
|
||||
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
const { callLLM } = require('./LLMManager');
|
||||
|
||||
/**
|
||||
* Génère automatiquement les mots-clés manquants pour les éléments non définis
|
||||
* @param {Array} elements - Liste des éléments extraits
|
||||
* @param {Object} csvData - Données CSV avec personnalité
|
||||
* @returns {Object} Éléments mis à jour avec nouveaux mots-clés
|
||||
*/
|
||||
async function generateMissingKeywords(elements, csvData) {
|
||||
logSh('>>> GÉNÉRATION MOTS-CLÉS MANQUANTS <<<', 'INFO');
|
||||
|
||||
// 1. IDENTIFIER tous les éléments manquants
|
||||
const missingElements = [];
|
||||
elements.forEach(element => {
|
||||
if (element.resolvedContent.includes('non défini') ||
|
||||
element.resolvedContent.includes('non résolu') ||
|
||||
element.resolvedContent.trim() === '') {
|
||||
|
||||
missingElements.push({
|
||||
tag: element.originalTag,
|
||||
name: element.name,
|
||||
type: element.type,
|
||||
currentContent: element.resolvedContent,
|
||||
context: getElementContext(element, elements, csvData)
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
if (missingElements.length === 0) {
|
||||
logSh('Aucun mot-clé manquant détecté', 'INFO');
|
||||
return {};
|
||||
}
|
||||
|
||||
logSh(`${missingElements.length} mots-clés manquants détectés`, 'INFO');
|
||||
|
||||
// 2. ANALYSER le contexte global disponible
|
||||
const contextAnalysis = analyzeAvailableContext(elements, csvData);
|
||||
|
||||
// 3. GÉNÉRER tous les manquants en UN SEUL appel IA
|
||||
const generatedKeywords = await callOpenAIForMissingKeywords(missingElements, contextAnalysis, csvData);
|
||||
|
||||
// 4. METTRE À JOUR les éléments avec les nouveaux mots-clés
|
||||
const updatedElements = updateElementsWithKeywords(elements, generatedKeywords);
|
||||
|
||||
logSh(`Mots-clés manquants générés: ${Object.keys(generatedKeywords).length}`, 'INFO');
|
||||
return updatedElements;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyser le contexte disponible pour guider la génération
|
||||
* @param {Array} elements - Tous les éléments
|
||||
* @param {Object} csvData - Données CSV
|
||||
* @returns {Object} Analyse contextuelle
|
||||
*/
|
||||
function analyzeAvailableContext(elements, csvData) {
|
||||
const availableKeywords = [];
|
||||
const availableContent = [];
|
||||
|
||||
// Récupérer tous les mots-clés/contenu déjà disponibles
|
||||
elements.forEach(element => {
|
||||
if (element.resolvedContent &&
|
||||
!element.resolvedContent.includes('non défini') &&
|
||||
!element.resolvedContent.includes('non résolu') &&
|
||||
element.resolvedContent.trim() !== '') {
|
||||
|
||||
if (element.type.includes('titre')) {
|
||||
availableKeywords.push(element.resolvedContent);
|
||||
} else {
|
||||
availableContent.push(element.resolvedContent.substring(0, 100));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
mainKeyword: csvData.mc0,
|
||||
mainTitle: csvData.t0,
|
||||
availableKeywords: availableKeywords,
|
||||
availableContent: availableContent,
|
||||
theme: csvData.mc0, // Thème principal
|
||||
businessContext: "Autocollant.fr - signalétique personnalisée, plaques"
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtenir le contexte spécifique d'un élément
|
||||
* @param {Object} element - Élément à analyser
|
||||
* @param {Array} allElements - Tous les éléments
|
||||
* @param {Object} csvData - Données CSV
|
||||
* @returns {Object} Contexte de l'élément
|
||||
*/
|
||||
function getElementContext(element, allElements, csvData) {
|
||||
const context = {
|
||||
elementType: element.type,
|
||||
hierarchyLevel: element.name,
|
||||
nearbyElements: []
|
||||
};
|
||||
|
||||
// Trouver les éléments proches dans la hiérarchie
|
||||
const elementParts = element.name.split('_');
|
||||
if (elementParts.length >= 2) {
|
||||
const baseLevel = elementParts.slice(0, 2).join('_'); // Ex: "Titre_H3"
|
||||
|
||||
allElements.forEach(otherElement => {
|
||||
if (otherElement.name.startsWith(baseLevel) &&
|
||||
otherElement.resolvedContent &&
|
||||
!otherElement.resolvedContent.includes('non défini')) {
|
||||
|
||||
context.nearbyElements.push(otherElement.resolvedContent);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
return context;
|
||||
}
|
||||
|
||||
/**
|
||||
* Appel IA pour générer tous les mots-clés manquants en un seul batch
|
||||
* @param {Array} missingElements - Éléments manquants
|
||||
* @param {Object} contextAnalysis - Analyse contextuelle
|
||||
* @param {Object} csvData - Données CSV avec personnalité
|
||||
* @returns {Object} Mots-clés générés
|
||||
*/
|
||||
async function callOpenAIForMissingKeywords(missingElements, contextAnalysis, csvData) {
|
||||
const personality = csvData.personality;
|
||||
|
||||
let prompt = `Tu es ${personality.nom} (${personality.description}). Style: ${personality.style}
|
||||
|
||||
MISSION: GÉNÈRE ${missingElements.length} MOTS-CLÉS/EXPRESSIONS MANQUANTS pour ${contextAnalysis.mainKeyword}
|
||||
|
||||
CONTEXTE:
|
||||
- Sujet: ${contextAnalysis.mainKeyword}
|
||||
- Entreprise: Autocollant.fr (signalétique)
|
||||
- Mots-clés existants: ${contextAnalysis.availableKeywords.slice(0, 3).join(', ')}
|
||||
|
||||
ÉLÉMENTS MANQUANTS:
|
||||
`;
|
||||
|
||||
missingElements.forEach((missing, index) => {
|
||||
prompt += `${index + 1}. [${missing.name}] → Mot-clé SEO\n`;
|
||||
});
|
||||
|
||||
prompt += `\nCONSIGNES:
|
||||
- Thème: ${contextAnalysis.mainKeyword}
|
||||
- Mots-clés SEO naturels
|
||||
- Varie les termes
|
||||
- Évite répétitions
|
||||
|
||||
FORMAT:
|
||||
[${missingElements[0].name}]
|
||||
mot-clé
|
||||
|
||||
[${missingElements[1] ? missingElements[1].name : 'exemple'}]
|
||||
mot-clé
|
||||
|
||||
etc...`;
|
||||
|
||||
try {
|
||||
logSh('Génération mots-clés manquants...', 'DEBUG');
|
||||
|
||||
// Utilisation du LLM Manager avec fallback
|
||||
const response = await callLLM('openai', prompt, {
|
||||
temperature: 0.7,
|
||||
maxTokens: 2000
|
||||
}, personality);
|
||||
|
||||
// Parser la réponse
|
||||
const generatedKeywords = parseMissingKeywordsResponse(response, missingElements);
|
||||
|
||||
return generatedKeywords;
|
||||
|
||||
} catch (error) {
|
||||
logSh(`❌ FATAL: Génération mots-clés manquants échouée: ${error}`, 'ERROR');
|
||||
throw new Error(`FATAL: Génération mots-clés LLM impossible - arrêt du workflow: ${error}`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parser la réponse IA pour extraire les mots-clés générés
|
||||
* @param {string} response - Réponse de l'IA
|
||||
* @param {Array} missingElements - Éléments manquants
|
||||
* @returns {Object} Mots-clés parsés
|
||||
*/
|
||||
function parseMissingKeywordsResponse(response, missingElements) {
|
||||
const results = {};
|
||||
|
||||
const regex = /\[([^\]]+)\]\s*\n([^[]*?)(?=\n\[|$)/gs;
|
||||
let match;
|
||||
|
||||
while ((match = regex.exec(response)) !== null) {
|
||||
const elementName = match[1].trim();
|
||||
const generatedKeyword = match[2].trim();
|
||||
|
||||
results[elementName] = generatedKeyword;
|
||||
|
||||
logSh(`✓ Mot-clé généré [${elementName}]: "${generatedKeyword}"`, 'DEBUG');
|
||||
}
|
||||
|
||||
// FATAL si parsing partiel
|
||||
if (Object.keys(results).length < missingElements.length) {
|
||||
const manquants = missingElements.length - Object.keys(results).length;
|
||||
logSh(`❌ FATAL: Parsing mots-clés partiel - ${manquants}/${missingElements.length} manquants`, 'ERROR');
|
||||
throw new Error(`FATAL: Parsing mots-clés incomplet (${manquants}/${missingElements.length} manquants) - arrêt du workflow`);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mettre à jour les éléments avec les nouveaux mots-clés générés
|
||||
* @param {Array} elements - Éléments originaux
|
||||
* @param {Object} generatedKeywords - Nouveaux mots-clés
|
||||
* @returns {Array} Éléments mis à jour
|
||||
*/
|
||||
function updateElementsWithKeywords(elements, generatedKeywords) {
|
||||
const updatedElements = elements.map(element => {
|
||||
const newKeyword = generatedKeywords[element.name];
|
||||
|
||||
if (newKeyword) {
|
||||
return {
|
||||
...element,
|
||||
resolvedContent: newKeyword
|
||||
};
|
||||
}
|
||||
|
||||
return element;
|
||||
});
|
||||
|
||||
logSh('Éléments mis à jour avec nouveaux mots-clés', 'INFO');
|
||||
return updatedElements;
|
||||
}
|
||||
|
||||
// Exports CommonJS
|
||||
module.exports = {
|
||||
generateMissingKeywords
|
||||
};
|
||||
1545
lib/SelectiveEnhancement.js
Normal file
1545
lib/SelectiveEnhancement.js
Normal file
File diff suppressed because it is too large
Load Diff
273
lib/Utils.js
Normal file
273
lib/Utils.js
Normal file
@ -0,0 +1,273 @@
|
||||
// ========================================
|
||||
// FICHIER: utils.js - Conversion Node.js
|
||||
// Description: Utilitaires génériques pour le workflow
|
||||
// ========================================
|
||||
|
||||
// Import du système de logging (assumant que logSh est disponible globalement)
|
||||
// const { logSh } = require('./logging'); // À décommenter si logSh est dans un module séparé
|
||||
|
||||
/**
|
||||
* Créer une réponse de succès standardisée
|
||||
* @param {Object} data - Données à retourner
|
||||
* @returns {Object} Réponse formatée pour Express/HTTP
|
||||
*/
|
||||
function createSuccessResponse(data) {
|
||||
return {
|
||||
success: true,
|
||||
data: data,
|
||||
timestamp: new Date().toISOString()
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Créer une réponse d'erreur standardisée
|
||||
* @param {string|Error} error - Message d'erreur ou objet Error
|
||||
* @returns {Object} Réponse d'erreur formatée
|
||||
*/
|
||||
function createErrorResponse(error) {
|
||||
const errorMessage = error instanceof Error ? error.message : error.toString();
|
||||
|
||||
return {
|
||||
success: false,
|
||||
error: errorMessage,
|
||||
timestamp: new Date().toISOString(),
|
||||
stack: process.env.NODE_ENV === 'development' && error instanceof Error ? error.stack : undefined
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Middleware Express pour envoyer des réponses standardisées
|
||||
* Usage: res.success(data) ou res.error(error)
|
||||
*/
|
||||
function responseMiddleware(req, res, next) {
|
||||
// Méthode pour réponse de succès
|
||||
res.success = (data, statusCode = 200) => {
|
||||
res.status(statusCode).json(createSuccessResponse(data));
|
||||
};
|
||||
|
||||
// Méthode pour réponse d'erreur
|
||||
res.error = (error, statusCode = 500) => {
|
||||
res.status(statusCode).json(createErrorResponse(error));
|
||||
};
|
||||
|
||||
next();
|
||||
}
|
||||
|
||||
/**
|
||||
* HELPER : Nettoyer les instructions FAQ
|
||||
* Remplace les variables et nettoie le HTML
|
||||
* @param {string} instructions - Instructions à nettoyer
|
||||
* @param {Object} csvData - Données CSV pour remplacement variables
|
||||
* @returns {string} Instructions nettoyées
|
||||
*/
|
||||
function cleanFAQInstructions(instructions, csvData) {
|
||||
if (!instructions || !csvData) {
|
||||
return instructions || '';
|
||||
}
|
||||
|
||||
let clean = instructions.toString();
|
||||
|
||||
try {
|
||||
// Remplacer variables simples
|
||||
clean = clean.replace(/\{\{MC0\}\}/g, csvData.mc0 || '');
|
||||
clean = clean.replace(/\{\{T0\}\}/g, csvData.t0 || '');
|
||||
|
||||
// Variables multiples si nécessaire
|
||||
if (csvData.mcPlus1) {
|
||||
const mcPlus1 = csvData.mcPlus1.split(',').map(s => s.trim());
|
||||
|
||||
for (let i = 1; i <= 6; i++) {
|
||||
const mcValue = mcPlus1[i-1] || `[MC+1_${i} non défini]`;
|
||||
clean = clean.replace(new RegExp(`\\{\\{MC\\+1_${i}\\}\\}`, 'g'), mcValue);
|
||||
}
|
||||
}
|
||||
|
||||
// Variables T+1 et L+1 si disponibles
|
||||
if (csvData.tPlus1) {
|
||||
const tPlus1 = csvData.tPlus1.split(',').map(s => s.trim());
|
||||
for (let i = 1; i <= 6; i++) {
|
||||
const tValue = tPlus1[i-1] || `[T+1_${i} non défini]`;
|
||||
clean = clean.replace(new RegExp(`\\{\\{T\\+1_${i}\\}\\}`, 'g'), tValue);
|
||||
}
|
||||
}
|
||||
|
||||
if (csvData.lPlus1) {
|
||||
const lPlus1 = csvData.lPlus1.split(',').map(s => s.trim());
|
||||
for (let i = 1; i <= 6; i++) {
|
||||
const lValue = lPlus1[i-1] || `[L+1_${i} non défini]`;
|
||||
clean = clean.replace(new RegExp(`\\{\\{L\\+1_${i}\\}\\}`, 'g'), lValue);
|
||||
}
|
||||
}
|
||||
|
||||
// Nettoyer HTML
|
||||
clean = clean.replace(/<\/?[^>]+>/g, '');
|
||||
|
||||
// Nettoyer espaces en trop
|
||||
clean = clean.replace(/\s+/g, ' ').trim();
|
||||
|
||||
} catch (error) {
|
||||
if (typeof logSh === 'function') {
|
||||
logSh(`⚠️ Erreur nettoyage instructions FAQ: ${error.toString()}`, 'WARNING');
|
||||
}
|
||||
// Retourner au moins la version partiellement nettoyée
|
||||
}
|
||||
|
||||
return clean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Utilitaire pour attendre un délai (remplace Utilities.sleep de Google Apps Script)
|
||||
* @param {number} ms - Millisecondes à attendre
|
||||
* @returns {Promise} Promise qui se résout après le délai
|
||||
*/
|
||||
function sleep(ms) {
|
||||
return new Promise(resolve => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
/**
|
||||
* Utilitaire pour encoder en base64
|
||||
* @param {string} text - Texte à encoder
|
||||
* @returns {string} Texte encodé en base64
|
||||
*/
|
||||
function base64Encode(text) {
|
||||
return Buffer.from(text, 'utf8').toString('base64');
|
||||
}
|
||||
|
||||
/**
|
||||
* Utilitaire pour décoder du base64
|
||||
* @param {string} base64Text - Texte base64 à décoder
|
||||
* @returns {string} Texte décodé
|
||||
*/
|
||||
function base64Decode(base64Text) {
|
||||
return Buffer.from(base64Text, 'base64').toString('utf8');
|
||||
}
|
||||
|
||||
/**
|
||||
* Valider et nettoyer un slug/filename
|
||||
* @param {string} slug - Slug à nettoyer
|
||||
* @returns {string} Slug nettoyé
|
||||
*/
|
||||
function cleanSlug(slug) {
|
||||
if (!slug) return '';
|
||||
|
||||
return slug
|
||||
.toString()
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9\-_]/g, '-') // Remplacer caractères spéciaux par -
|
||||
.replace(/-+/g, '-') // Éviter doubles tirets
|
||||
.replace(/^-+|-+$/g, ''); // Enlever tirets début/fin
|
||||
}
|
||||
|
||||
/**
|
||||
* Compter les mots dans un texte
|
||||
* @param {string} text - Texte à analyser
|
||||
* @returns {number} Nombre de mots
|
||||
*/
|
||||
function countWords(text) {
|
||||
if (!text || typeof text !== 'string') return 0;
|
||||
|
||||
return text
|
||||
.trim()
|
||||
.replace(/\s+/g, ' ') // Normaliser espaces
|
||||
.split(' ')
|
||||
.filter(word => word.length > 0)
|
||||
.length;
|
||||
}
|
||||
|
||||
/**
|
||||
* Formater une durée en millisecondes en format lisible
|
||||
* @param {number} ms - Durée en millisecondes
|
||||
* @returns {string} Durée formatée (ex: "2.3s" ou "450ms")
|
||||
*/
|
||||
function formatDuration(ms) {
|
||||
if (ms < 1000) {
|
||||
return `${ms}ms`;
|
||||
} else if (ms < 60000) {
|
||||
return `${(ms / 1000).toFixed(1)}s`;
|
||||
} else {
|
||||
const minutes = Math.floor(ms / 60000);
|
||||
const seconds = ((ms % 60000) / 1000).toFixed(1);
|
||||
return `${minutes}m ${seconds}s`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Utilitaire pour retry automatique d'une fonction
|
||||
* @param {Function} fn - Fonction à exécuter avec retry
|
||||
* @param {number} maxRetries - Nombre maximum de tentatives
|
||||
* @param {number} delay - Délai entre tentatives (ms)
|
||||
* @returns {Promise} Résultat de la fonction ou erreur finale
|
||||
*/
|
||||
async function withRetry(fn, maxRetries = 3, delay = 1000) {
|
||||
let lastError;
|
||||
|
||||
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
return await fn();
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
|
||||
if (typeof logSh === 'function') {
|
||||
logSh(`⚠️ Tentative ${attempt}/${maxRetries} échouée: ${error.toString()}`, 'WARNING');
|
||||
}
|
||||
|
||||
if (attempt < maxRetries) {
|
||||
await sleep(delay * attempt); // Exponential backoff
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
/**
|
||||
* Validation basique d'email
|
||||
* @param {string} email - Email à valider
|
||||
* @returns {boolean} True si email valide
|
||||
*/
|
||||
function isValidEmail(email) {
|
||||
const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/;
|
||||
return emailRegex.test(email);
|
||||
}
|
||||
|
||||
/**
|
||||
* Générer un ID unique simple
|
||||
* @returns {string} ID unique basé sur timestamp + random
|
||||
*/
|
||||
function generateId() {
|
||||
return `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Truncate un texte à une longueur donnée
|
||||
* @param {string} text - Texte à tronquer
|
||||
* @param {number} maxLength - Longueur maximale
|
||||
* @param {string} suffix - Suffixe à ajouter si tronqué (défaut: '...')
|
||||
* @returns {string} Texte tronqué
|
||||
*/
|
||||
function truncate(text, maxLength, suffix = '...') {
|
||||
if (!text || text.length <= maxLength) {
|
||||
return text;
|
||||
}
|
||||
|
||||
return text.substring(0, maxLength - suffix.length) + suffix;
|
||||
}
|
||||
|
||||
// ============= EXPORTS =============
|
||||
|
||||
module.exports = {
|
||||
createSuccessResponse,
|
||||
createErrorResponse,
|
||||
responseMiddleware,
|
||||
cleanFAQInstructions,
|
||||
sleep,
|
||||
base64Encode,
|
||||
base64Decode,
|
||||
cleanSlug,
|
||||
countWords,
|
||||
formatDuration,
|
||||
withRetry,
|
||||
isValidEmail,
|
||||
generateId,
|
||||
truncate
|
||||
};
|
||||
3
lib/package.json
Normal file
3
lib/package.json
Normal file
@ -0,0 +1,3 @@
|
||||
{
|
||||
"type": "commonjs"
|
||||
}
|
||||
10
lib/polyfills/fetch.cjs
Normal file
10
lib/polyfills/fetch.cjs
Normal file
@ -0,0 +1,10 @@
|
||||
// lib/polyfills/fetch.cjs
|
||||
const { fetch, Headers, Request, Response, FormData, File } = require('undici');
|
||||
|
||||
// Expose dans le global AVANT que tes modules importent/require quoi que ce soit
|
||||
globalThis.fetch = fetch;
|
||||
globalThis.Headers = Headers;
|
||||
globalThis.Request = Request;
|
||||
globalThis.Response = Response;
|
||||
globalThis.FormData = FormData;
|
||||
globalThis.File = File;
|
||||
9
lib/trace-wrap.js
Normal file
9
lib/trace-wrap.js
Normal file
@ -0,0 +1,9 @@
|
||||
// lib/trace-wrap.js
|
||||
const { tracer } = require('./trace.js');
|
||||
|
||||
const traced = (name, fn, attrs) => (...args) =>
|
||||
tracer.run(name, () => fn(...args), attrs);
|
||||
|
||||
module.exports = {
|
||||
traced
|
||||
};
|
||||
149
lib/trace.js
Normal file
149
lib/trace.js
Normal file
@ -0,0 +1,149 @@
|
||||
// lib/trace.js
|
||||
const { AsyncLocalStorage } = require('node:async_hooks');
|
||||
const { randomUUID } = require('node:crypto');
|
||||
const { logSh } = require('./ErrorReporting');
|
||||
|
||||
const als = new AsyncLocalStorage();
|
||||
|
||||
function now() { return performance.now(); }
|
||||
function dur(ms) {
|
||||
if (ms < 1e3) return `${ms.toFixed(1)}ms`;
|
||||
const s = ms / 1e3;
|
||||
return s < 60 ? `${s.toFixed(2)}s` : `${(s/60).toFixed(2)}m`;
|
||||
}
|
||||
|
||||
class Span {
|
||||
constructor({ name, parent = null, attrs = {} }) {
|
||||
this.id = randomUUID();
|
||||
this.name = name;
|
||||
this.parent = parent;
|
||||
this.children = [];
|
||||
this.attrs = attrs;
|
||||
this.start = now();
|
||||
this.end = null;
|
||||
this.status = 'ok';
|
||||
this.error = null;
|
||||
}
|
||||
pathNames() {
|
||||
const names = [];
|
||||
let cur = this;
|
||||
while (cur) { names.unshift(cur.name); cur = cur.parent; }
|
||||
return names.join(' > ');
|
||||
}
|
||||
finish() { this.end = now(); }
|
||||
duration() { return (this.end ?? now()) - this.start; }
|
||||
}
|
||||
|
||||
class Tracer {
|
||||
constructor() {
|
||||
this.rootSpans = [];
|
||||
}
|
||||
current() { return als.getStore(); }
|
||||
|
||||
async startSpan(name, attrs = {}) {
|
||||
const parent = this.current();
|
||||
const span = new Span({ name, parent, attrs });
|
||||
if (parent) parent.children.push(span);
|
||||
else this.rootSpans.push(span);
|
||||
|
||||
// Formater les paramètres pour affichage
|
||||
const paramsStr = this.formatParams(attrs);
|
||||
await logSh(`▶ ${name}${paramsStr}`, 'TRACE');
|
||||
return span;
|
||||
}
|
||||
|
||||
async run(name, fn, attrs = {}) {
|
||||
const parent = this.current();
|
||||
const span = await this.startSpan(name, attrs);
|
||||
return await als.run(span, async () => {
|
||||
try {
|
||||
const res = await fn();
|
||||
span.finish();
|
||||
const paramsStr = this.formatParams(span.attrs);
|
||||
await logSh(`✔ ${name}${paramsStr} (${dur(span.duration())})`, 'TRACE');
|
||||
return res;
|
||||
} catch (err) {
|
||||
span.status = 'error';
|
||||
span.error = { message: err?.message, stack: err?.stack };
|
||||
span.finish();
|
||||
const paramsStr = this.formatParams(span.attrs);
|
||||
await logSh(`✖ ${name}${paramsStr} FAILED (${dur(span.duration())})`, 'ERROR');
|
||||
await logSh(`Stack trace: ${span.error.message}`, 'ERROR');
|
||||
if (span.error.stack) {
|
||||
const stackLines = span.error.stack.split('\n').slice(1, 6); // Première 5 lignes du stack
|
||||
for (const line of stackLines) {
|
||||
await logSh(` ${line.trim()}`, 'ERROR');
|
||||
}
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async event(msg, extra = {}) {
|
||||
const span = this.current();
|
||||
const data = { trace: true, evt: 'span.event', ...extra };
|
||||
if (span) {
|
||||
data.span = span.id;
|
||||
data.path = span.pathNames();
|
||||
data.since_ms = +( (now() - span.start).toFixed(1) );
|
||||
}
|
||||
await logSh(`• ${msg}`, 'TRACE');
|
||||
}
|
||||
|
||||
async annotate(fields = {}) {
|
||||
const span = this.current();
|
||||
if (span) Object.assign(span.attrs, fields);
|
||||
await logSh('… annotate', 'TRACE');
|
||||
}
|
||||
|
||||
formatParams(attrs = {}) {
|
||||
const params = Object.entries(attrs)
|
||||
.filter(([key, value]) => value !== undefined && value !== null)
|
||||
.map(([key, value]) => {
|
||||
// Tronquer les valeurs trop longues
|
||||
const strValue = String(value);
|
||||
const truncated = strValue.length > 50 ? strValue.substring(0, 47) + '...' : strValue;
|
||||
return `${key}=${truncated}`;
|
||||
});
|
||||
|
||||
return params.length > 0 ? `(${params.join(', ')})` : '';
|
||||
}
|
||||
|
||||
printSummary() {
|
||||
const lines = [];
|
||||
const draw = (node, depth = 0) => {
|
||||
const pad = ' '.repeat(depth);
|
||||
const icon = node.status === 'error' ? '✖' : '✔';
|
||||
lines.push(`${pad}${icon} ${node.name} (${dur(node.duration())})`);
|
||||
if (Object.keys(node.attrs ?? {}).length) {
|
||||
lines.push(`${pad} attrs: ${JSON.stringify(node.attrs)}`);
|
||||
}
|
||||
for (const ch of node.children) draw(ch, depth + 1);
|
||||
if (node.status === 'error' && node.error?.message) {
|
||||
lines.push(`${pad} error: ${node.error.message}`);
|
||||
if (node.error.stack) {
|
||||
const stackLines = String(node.error.stack || '').split('\n').slice(1, 4).map(s => s.trim());
|
||||
if (stackLines.length) {
|
||||
lines.push(`${pad} stack:`);
|
||||
stackLines.forEach(line => {
|
||||
if (line) lines.push(`${pad} ${line}`);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
for (const r of this.rootSpans) draw(r, 0);
|
||||
const summary = lines.join('\n');
|
||||
logSh(`\n—— TRACE SUMMARY ——\n${summary}\n—— END TRACE ——`, 'INFO');
|
||||
return summary;
|
||||
}
|
||||
}
|
||||
|
||||
const tracer = new Tracer();
|
||||
|
||||
module.exports = {
|
||||
Span,
|
||||
Tracer,
|
||||
tracer
|
||||
};
|
||||
1971
package-lock.json
generated
Normal file
1971
package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
32
package.json
Normal file
32
package.json
Normal file
@ -0,0 +1,32 @@
|
||||
{
|
||||
"name": "seo-generator-server",
|
||||
"version": "1.0.0",
|
||||
"description": "Hello World SEO Generator",
|
||||
"main": "server.js",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"start": "node server.js",
|
||||
"dev": "node server.js",
|
||||
"test:llm": "cross-env TEST_LLM_HOOKS=1 node --test tests/llm/*.test.js",
|
||||
"test:smoke": "node --test tests/smoke/*.test.js",
|
||||
"test:llm": "node --test tests/llm/*.test.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"axios": "^1.6.0",
|
||||
"cors": "^2.8.5",
|
||||
"dotenv": "^16.3.1",
|
||||
"express": "^4.18.2",
|
||||
"google-auth-library": "^10.3.0",
|
||||
"google-spreadsheet": "^5.0.2",
|
||||
"googleapis": "^126.0.1",
|
||||
"node-fetch": "^3.3.2",
|
||||
"nodemailer": "^7.0.6",
|
||||
"pino": "^9.9.0",
|
||||
"pino-pretty": "^13.1.1",
|
||||
"undici": "^7.15.0",
|
||||
"ws": "^8.18.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"cross-env": "^10.0.0"
|
||||
}
|
||||
}
|
||||
293
plan.md
Normal file
293
plan.md
Normal file
@ -0,0 +1,293 @@
|
||||
🚀 Plan d'Implémentation Incrémental - Anti-Détection IA
|
||||
📋 Vue d'ensemble des 6 niveaux
|
||||
Niveau
|
||||
Technique
|
||||
Temps dev
|
||||
Gain attendu
|
||||
Test métrique
|
||||
1
|
||||
Selective Enhancement
|
||||
4h
|
||||
15-20% ↓ détection
|
||||
GPTZero simple
|
||||
2
|
||||
Pattern Breaking
|
||||
3h
|
||||
20-25% ↓ détection
|
||||
2 détecteurs
|
||||
3
|
||||
Adversarial Prompting
|
||||
2h
|
||||
25-30% ↓ détection
|
||||
3 détecteurs
|
||||
4
|
||||
Cross-Linguistic
|
||||
6h
|
||||
35-40% ↓ détection
|
||||
Tests poussés
|
||||
5
|
||||
Temporal + Personality
|
||||
4h
|
||||
40-45% ↓ détection
|
||||
Validation humaine
|
||||
6
|
||||
Full Arsenal
|
||||
3h
|
||||
< 30% détection
|
||||
Tests industriels
|
||||
|
||||
|
||||
🎯 NIVEAU 1 : Selective Enhancement
|
||||
Base solide - Faible risque
|
||||
Objectif
|
||||
Remplacer l'approche actuelle (1 LLM par élément) par 4 améliorations ciblées.
|
||||
Implémentation
|
||||
// AJOUTER dans ContentGeneration.gs
|
||||
function generateWithSelectiveEnhancement(element, csvData) {
|
||||
// 1. Base avec Claude (comme avant)
|
||||
let content = callLLM('claude', createPrompt(element, csvData), {}, csvData.personality);
|
||||
|
||||
// 2. Améliorations ciblées
|
||||
content = enhanceTechnicalTerms(content, csvData); // GPT-4
|
||||
content = improveTransitions(content, csvData); // Gemini
|
||||
content = applyPersonalityStyle(content, csvData); // Mistral
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
// Fonctions helper simples
|
||||
function enhanceTechnicalTerms(content, csvData) {
|
||||
const technicalElements = extractTechnicalTerms(content);
|
||||
if (technicalElements.length === 0) return content;
|
||||
|
||||
const enhanced = callLLM('gpt4',
|
||||
`Améliore SEULEMENT la précision technique de ces éléments: ${technicalElements.join(', ')}. Contexte: ${content}`,
|
||||
{ temperature: 0.6 }, csvData.personality
|
||||
);
|
||||
|
||||
return replaceTargetedElements(content, technicalElements, enhanced);
|
||||
}
|
||||
|
||||
Tests à effectuer
|
||||
[ ] Générer 10 articles avec ancienne méthode
|
||||
[ ] Générer 10 articles avec Selective Enhancement
|
||||
[ ] Comparer sur GPTZero et Originality.ai
|
||||
[ ] Vérifier temps de génération (doit rester < 5 min/article)
|
||||
Critères de validation
|
||||
✅ Réduction détection IA : -15% minimum
|
||||
✅ Qualité préservée : Score humain ≥ ancien système
|
||||
✅ Performance : < 20% augmentation temps génération
|
||||
✅ Stabilité : 0 erreur sur 20 tests
|
||||
Rollback plan
|
||||
Si échec → Revenir à l'ancienne méthode avec 1 ligne de code.
|
||||
|
||||
🔧 NIVEAU 2 : Pattern Breaking Simple
|
||||
Premières techniques adversariales
|
||||
Objectif
|
||||
Casser les patterns syntaxiques typiques des LLMs.
|
||||
Implémentation
|
||||
// AJOUTER dans ContentAssembly.gs
|
||||
function applyBasicPatternBreaking(content, personality) {
|
||||
let result = content;
|
||||
|
||||
// Technique 1: Variation longueur phrases
|
||||
result = varyStructures(result, 0.3);
|
||||
|
||||
// Technique 2: Remplacement mots LLM typiques
|
||||
result = replaceLLMFingerprints(result, personality);
|
||||
|
||||
// Technique 3: Connecteurs plus humains
|
||||
result = humanizeTransitions(result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
// Pattern Breaking Functions
|
||||
function varyStructures(text, intensity) {
|
||||
const sentences = text.split('. ');
|
||||
return sentences.map(s => {
|
||||
if (Math.random() < intensity) {
|
||||
if (s.length > 100) return splitSentence(s);
|
||||
if (s.length < 30) return mergePrevious(s, sentences);
|
||||
}
|
||||
return s;
|
||||
}).join('. ');
|
||||
}
|
||||
|
||||
Tests à effectuer
|
||||
[ ] A/B test sur 20 articles : Niveau 1 vs Niveau 2
|
||||
[ ] Tests sur 3 détecteurs : GPTZero, Originality.ai, CopyLeaks
|
||||
[ ] Validation lisibilité (score Flesch-Kincaid)
|
||||
[ ] Test cohérence sémantique
|
||||
Critères de validation
|
||||
✅ Réduction détection : -20% vs Niveau 1
|
||||
✅ Lisibilité maintenue : Score Flesch > 60
|
||||
✅ Cohérence : Pas de ruptures logiques
|
||||
✅ SEO intact : Mots-clés préservés
|
||||
|
||||
⚔️ NIVEAU 3 : Adversarial Prompting
|
||||
Optimisation des prompts anti-détection
|
||||
Objectif
|
||||
Modifier les prompts pour que les LLMs génèrent naturellement du contenu moins détectable.
|
||||
Implémentation
|
||||
// MODIFIER dans ContentGeneration.gs
|
||||
function createAdversarialPrompt(element, csvData, detectorTarget = 'general') {
|
||||
const basePrompt = createPrompt(element, csvData);
|
||||
|
||||
const adversarialInstructions = {
|
||||
general: `
|
||||
CONTRAINTES ANTI-DÉTECTION :
|
||||
- Évite ces mots typiques IA : "optimal", "comprehensive", "seamless", "robust"
|
||||
- Varie longueurs phrases : courtes ET longues mélangées
|
||||
- Utilise expressions françaises familières
|
||||
- Ajoute imperfections légères (répétitions naturelles)
|
||||
- Change angle 1-2 fois dans texte`,
|
||||
|
||||
gptZero: `
|
||||
SPÉCIAL ANTI-GPTZERO :
|
||||
- Surprends avec tournures inattendues
|
||||
- Évite phrases trop prévisibles
|
||||
- Utilise synonymes moins évidents`,
|
||||
|
||||
originality: `
|
||||
SPÉCIAL ANTI-ORIGINALITY :
|
||||
- Vocabulaire très varié
|
||||
- Structures phrases irrégulières
|
||||
- Changements angles fréquents`
|
||||
};
|
||||
|
||||
return `${basePrompt}\n\n${adversarialInstructions[detectorTarget]}`;
|
||||
}
|
||||
|
||||
Tests à effectuer
|
||||
[ ] Prompts normaux vs adversariaux sur 30 éléments
|
||||
[ ] Tests spécialisés par détecteur
|
||||
[ ] Mesure "perplexité" avec API
|
||||
[ ] Validation qualité contenu par humain
|
||||
Critères de validation
|
||||
✅ Détection réduite : -25% vs Niveau 2
|
||||
✅ Qualité maintenue : Note humaine ≥ 8/10
|
||||
✅ Diversité augmentée : Plus de variation vocabulaire
|
||||
✅ Spécialisation : Chaque prompt efficace contre son détecteur cible
|
||||
|
||||
🌍 NIVEAU 4 : Cross-Linguistic Wash
|
||||
Technique avancée multi-langue
|
||||
Objectif
|
||||
Utiliser la traduction multi-langue pour casser les patterns linguistiques français.
|
||||
Implémentation
|
||||
// NOUVEAU FICHIER : CrossLinguistic.gs
|
||||
function crossLinguisticWash(content, languages = ['en', 'es'], intensity = 0.3) {
|
||||
if (Math.random() > intensity) return content; // Pas toujours appliquer
|
||||
|
||||
let washed = content;
|
||||
|
||||
// Chaîne de traductions
|
||||
languages.forEach(lang => {
|
||||
washed = translateWithLLM(washed, 'french', lang, 'deepseek');
|
||||
});
|
||||
|
||||
// Retour français avec style naturel
|
||||
washed = translateWithLLM(washed, languages[languages.length-1], 'french', 'mistral');
|
||||
|
||||
// Post-processing pour fluidité
|
||||
return harmonizeFrenchStyle(washed);
|
||||
}
|
||||
|
||||
function translateWithLLM(text, fromLang, toLang, llm) {
|
||||
const prompt = `Traduis ce texte de ${fromLang} vers ${toLang} en gardant le sens exact: "${text}"`;
|
||||
return callLLM(llm, prompt, { temperature: 0.3 });
|
||||
}
|
||||
|
||||
Tests à effectuer
|
||||
[ ] Test avant/après wash sur 50 éléments
|
||||
[ ] Vérification préservation sens (validation humaine)
|
||||
[ ] Tests détection sur contenu "washé"
|
||||
[ ] Mesure impact performance (temps)
|
||||
Critères de validation
|
||||
✅ Anti-détection : -35% vs Niveau 3
|
||||
✅ Sens préservé : 95% fidélité sémantique
|
||||
✅ Français naturel : Pas de "smell" traduction
|
||||
✅ Performance : < 2x temps génération
|
||||
|
||||
👤 NIVEAU 5 : Temporal & Personality Injection
|
||||
Simulation comportement humain
|
||||
Objectif
|
||||
Simuler fatigue cognitive et erreurs typiques par personnalité.
|
||||
Implémentation
|
||||
// AJOUTER dans ContentGeneration.gs
|
||||
function applyHumanSimulation(content, csvData, elementIndex, totalElements) {
|
||||
let result = content;
|
||||
|
||||
// 1. Fatigue cognitive selon position
|
||||
const fatigueLevel = calculateFatigue(elementIndex, totalElements);
|
||||
if (fatigueLevel > 0.6) {
|
||||
result = injectFatigueMarkers(result);
|
||||
}
|
||||
|
||||
// 2. Erreurs cohérentes par personnalité
|
||||
result = injectPersonalityErrors(result, csvData.personality);
|
||||
|
||||
// 3. Style temporel (matin/soir)
|
||||
const timeStyle = getTemporalStyle(new Date().getHours());
|
||||
result = applyTemporalStyle(result, timeStyle);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function calculateFatigue(index, total) {
|
||||
// Courbe réaliste : début energique → milieu fatigué → fin regain
|
||||
const position = index / total;
|
||||
return Math.sin(position * Math.PI) * 0.8; // Peak à 50%
|
||||
}
|
||||
|
||||
Tests à effectuer
|
||||
[ ] Générer articles à différents moments journée
|
||||
[ ] Validation patterns fatigue réalistes
|
||||
[ ] Tests erreurs cohérentes par personnalité
|
||||
[ ] Mesure authenticity score
|
||||
Critères de validation
|
||||
✅ Réduction détection : -40% vs Niveau 4
|
||||
✅ Patterns humains : Courbe fatigue réaliste
|
||||
✅ Erreurs cohérentes : Chaque personnalité identifiable
|
||||
✅ Qualité globale : Pas de dégradation notable
|
||||
|
||||
🏆 NIVEAU 6 : Full Arsenal Integration
|
||||
Combinaison optimisée de toutes techniques
|
||||
Objectif
|
||||
Orchestrer toutes les techniques avec intelligence et atteindre < 30% détection.
|
||||
Implémentation
|
||||
// ORCHESTRATOR PRINCIPAL
|
||||
function generateWithFullArsenal(element, csvData, globalContext) {
|
||||
// 1. Selective Enhancement (Niveau 1)
|
||||
let content = generateWithSelectiveEnhancement(element, csvData);
|
||||
|
||||
// 2. Adversarial base dès le départ (Niveau 3)
|
||||
content = regenerateWithAdversarialPrompt(content, element, csvData);
|
||||
|
||||
// 3. Pattern Breaking adaptatif (Niveau 2)
|
||||
content = applyBasicPatternBreaking(content, csvData.personality);
|
||||
|
||||
// 4. Cross-linguistic si éligible (Niveau 4)
|
||||
if (shouldApplyCrossLinguistic(element, globalContext)) {
|
||||
content = crossLinguisticWash(content, ['en', 'es'], 0.4);
|
||||
}
|
||||
|
||||
// 5. Human simulation (Niveau 5)
|
||||
content = applyHumanSimulation(content, csvData, globalContext.elementIndex, globalContext.totalElements);
|
||||
|
||||
// 6. Final quality assurance
|
||||
return finalQualityPass(content, csvData);
|
||||
}
|
||||
|
||||
Tests à effectuer
|
||||
[ ] Tests industriels : 200+ articles
|
||||
[ ] 5+ détecteurs différents
|
||||
[ ] Validation qualité par panel humain
|
||||
[ ] Tests performance/coût complets
|
||||
[ ] A/B test vs concurrents
|
||||
Critères de validation FINALE
|
||||
✅ < 30% détection IA sur TOUS détecteurs
|
||||
✅ Qualité ≥ rédaction humaine (panel expert)
|
||||
✅ Performance acceptable (< 10 min/article)
|
||||
✅ ROI positif (coût justifié par qualité)
|
||||
|
||||
219
process_real.js
Normal file
219
process_real.js
Normal file
@ -0,0 +1,219 @@
|
||||
// ========================================
|
||||
// SCRIPT: process_real.js
|
||||
// VRAI PROCESSEUR GOOGLE SHEETS + DIGITALOCEAN
|
||||
// ========================================
|
||||
|
||||
const { readCSVDataWithXMLFileName, fetchXMLFromDigitalOceanSimple } = require('./lib/DigitalOceanWorkflow');
|
||||
const { handleFullWorkflow } = require('./lib/Main');
|
||||
|
||||
/**
|
||||
* Fonction principale qui fait VRAIMENT tout le processus
|
||||
* 1. Récupère données depuis Google Sheets (ligne rowNumber)
|
||||
* 2. Récupère XML depuis DigitalOcean (selon xmlFileName du GSheet)
|
||||
* 3. Lance le workflow complet
|
||||
*/
|
||||
async function processRealData(rowNumber) {
|
||||
console.log(`🚀 === TRAITEMENT RÉEL LIGNE ${rowNumber} ===\n`);
|
||||
|
||||
// Réduire verbosité console
|
||||
process.env.LOG_LEVEL = 'INFO';
|
||||
|
||||
try {
|
||||
// 1. RÉCUPÉRER DONNÉES GOOGLE SHEETS
|
||||
console.log('1️⃣ Récupération données Google Sheets...');
|
||||
const csvData = await readCSVDataWithXMLFileName(rowNumber);
|
||||
|
||||
console.log(`✅ Données récupérées:`);
|
||||
console.log(` MC0: ${csvData.mc0}`);
|
||||
console.log(` T0: ${csvData.t0}`);
|
||||
console.log(` XML File: ${csvData.xmlFileName}`);
|
||||
console.log(` Personnalité: ${csvData.personality?.nom || 'N/A'}`);
|
||||
|
||||
// 2. RÉCUPÉRER XML DEPUIS DIGITALOCEAN
|
||||
console.log('\n2️⃣ Récupération XML DigitalOcean...');
|
||||
|
||||
if (!csvData.xmlFileName) {
|
||||
throw new Error('Nom fichier XML manquant dans Google Sheets (colonne J)');
|
||||
}
|
||||
|
||||
const xmlContent = await fetchXMLFromDigitalOceanSimple(csvData.xmlFileName);
|
||||
console.log(`✅ XML récupéré: ${csvData.xmlFileName} (${xmlContent.length} caractères)`);
|
||||
|
||||
// 3. PRÉPARER DONNÉES WORKFLOW
|
||||
console.log('\n3️⃣ Préparation workflow...');
|
||||
|
||||
const workflowData = {
|
||||
csvData: csvData,
|
||||
xmlTemplate: Buffer.from(xmlContent).toString('base64'),
|
||||
source: 'real_gsheets_digitalocean',
|
||||
rowNumber: rowNumber
|
||||
};
|
||||
|
||||
// 4. LANCER WORKFLOW COMPLET
|
||||
console.log('4️⃣ Lancement workflow (6 LLMs)...');
|
||||
const startTime = Date.now();
|
||||
|
||||
const result = await handleFullWorkflow(workflowData);
|
||||
|
||||
const duration = Date.now() - startTime;
|
||||
|
||||
// 5. AFFICHER RÉSULTATS
|
||||
console.log(`\n🎯 === RÉSULTATS (${Math.round(duration/1000)}s) ===`);
|
||||
console.log(`✅ Success: ${result.success}`);
|
||||
console.log(`📊 Éléments générés: ${result.elementsGenerated}`);
|
||||
console.log(`📝 Mots total: ${result.stats?.wordCount || 'N/A'}`);
|
||||
console.log(`🤖 LLMs utilisés: ${result.llmsUsed?.join(', ') || 'N/A'}`);
|
||||
console.log(`📄 XML final: ${result.xmlContent?.length || 0} caractères`);
|
||||
console.log(`🔍 Validation: ${result.validationReport?.status || 'N/A'}`);
|
||||
console.log(`💾 Article ID: ${result.articleStorage?.articleId || 'N/A'}`);
|
||||
|
||||
if (result.validationReport?.errors?.length > 0) {
|
||||
console.log(`\n⚠️ Erreurs détectées:`);
|
||||
result.validationReport.errors.forEach(error => {
|
||||
console.log(` - ${error.type}: ${error.message}`);
|
||||
});
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
} catch (error) {
|
||||
console.error(`\n❌ Erreur traitement ligne ${rowNumber}:`, error.message);
|
||||
console.log('\n📋 Vérifiez les logs détaillés dans:', `logs/seo-generator-${new Date().toISOString().split('T')[0]}.log`);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Traiter plusieurs lignes en séquence
|
||||
*/
|
||||
async function processMultipleRealRows(rowNumbers) {
|
||||
console.log(`🔄 === TRAITEMENT MULTI-LIGNES ===`);
|
||||
console.log(`Lignes à traiter: ${rowNumbers.join(', ')}\n`);
|
||||
|
||||
const results = [];
|
||||
|
||||
for (const rowNumber of rowNumbers) {
|
||||
try {
|
||||
console.log(`\n📍 === LIGNE ${rowNumber} ===`);
|
||||
const result = await processRealData(rowNumber);
|
||||
|
||||
results.push({
|
||||
rowNumber,
|
||||
success: true,
|
||||
result
|
||||
});
|
||||
|
||||
console.log(`✅ Ligne ${rowNumber} terminée\n`);
|
||||
|
||||
} catch (error) {
|
||||
console.error(`❌ Ligne ${rowNumber} échouée: ${error.message}\n`);
|
||||
|
||||
results.push({
|
||||
rowNumber,
|
||||
success: false,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Résumé final
|
||||
const successCount = results.filter(r => r.success).length;
|
||||
console.log(`\n🎯 === RÉSUMÉ FINAL ===`);
|
||||
console.log(`✅ Réussis: ${successCount}/${rowNumbers.length}`);
|
||||
console.log(`❌ Échoués: ${rowNumbers.length - successCount}/${rowNumbers.length}`);
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test simple d'une ligne sans traitement complet
|
||||
*/
|
||||
async function debugRealRow(rowNumber) {
|
||||
console.log(`🔍 === DEBUG LIGNE ${rowNumber} ===\n`);
|
||||
|
||||
try {
|
||||
// 1. Test Google Sheets
|
||||
console.log('1️⃣ Test Google Sheets...');
|
||||
const csvData = await readCSVDataWithXMLFileName(rowNumber);
|
||||
console.log('✅ Google Sheets OK');
|
||||
console.log(` Données: ${csvData.mc0} | ${csvData.xmlFileName}`);
|
||||
|
||||
// 2. Test DigitalOcean
|
||||
console.log('\n2️⃣ Test DigitalOcean...');
|
||||
const xmlContent = await fetchXMLFromDigitalOceanSimple(csvData.xmlFileName);
|
||||
console.log('✅ DigitalOcean OK');
|
||||
console.log(` XML: ${xmlContent.length} caractères`);
|
||||
console.log(` Début: ${xmlContent.substring(0, 100)}...`);
|
||||
|
||||
return { csvData, xmlContent };
|
||||
|
||||
} catch (error) {
|
||||
console.error(`❌ Debug échoué:`, error.message);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Usage en ligne de commande
|
||||
if (require.main === module) {
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.includes('--help')) {
|
||||
console.log(`
|
||||
Usage: node process_real.js [options] [rowNumber(s)]
|
||||
|
||||
Options:
|
||||
--help Afficher cette aide
|
||||
--debug Mode debug (pas de traitement complet)
|
||||
--multi Traiter plusieurs lignes (ex: --multi 2,3,4)
|
||||
|
||||
Exemples:
|
||||
node process_real.js 2 # Traiter ligne 2
|
||||
node process_real.js --debug 2 # Debug ligne 2 seulement
|
||||
node process_real.js --multi 2,3,4 # Traiter lignes 2,3,4
|
||||
`);
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
const isDebug = args.includes('--debug');
|
||||
const isMulti = args.includes('--multi');
|
||||
|
||||
let targetRows = [];
|
||||
|
||||
if (isMulti) {
|
||||
const multiIndex = args.indexOf('--multi');
|
||||
const rowsArg = args[multiIndex + 1];
|
||||
if (rowsArg) {
|
||||
targetRows = rowsArg.split(',').map(n => parseInt(n.trim()));
|
||||
}
|
||||
} else {
|
||||
const rowNumber = parseInt(args.find(arg => !arg.startsWith('--'))) || 2;
|
||||
targetRows = [rowNumber];
|
||||
}
|
||||
|
||||
// Lancer le traitement
|
||||
(async () => {
|
||||
try {
|
||||
if (isDebug) {
|
||||
for (const row of targetRows) {
|
||||
await debugRealRow(row);
|
||||
}
|
||||
} else if (isMulti) {
|
||||
await processMultipleRealRows(targetRows);
|
||||
} else {
|
||||
await processRealData(targetRows[0]);
|
||||
}
|
||||
|
||||
console.log('\n🎉 Terminé avec succès !');
|
||||
|
||||
} catch (error) {
|
||||
console.error('\n💥 Échec:', error.message);
|
||||
process.exit(1);
|
||||
}
|
||||
})();
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
processRealData,
|
||||
processMultipleRealRows,
|
||||
debugRealRow
|
||||
};
|
||||
434
server.js
Normal file
434
server.js
Normal file
@ -0,0 +1,434 @@
|
||||
const express = require('express');
|
||||
const cors = require('cors');
|
||||
const path = require('path');
|
||||
require('dotenv').config();
|
||||
|
||||
const { logSh } = require('./lib/ErrorReporting'); // Using unified logSh from ErrorReporting
|
||||
|
||||
// Import du workflow principal (version simplifiée pour démarrage)
|
||||
const { handleFullWorkflow, testMainWorkflow } = require('./lib/SimpleMain');
|
||||
const { getBrainConfig } = require('./lib/BrainConfig');
|
||||
const { testLLMManagerComplete } = require('./lib/LLMManager');
|
||||
const { triggerAutonomousWorkflow, testDigitalOceanConnection, readCSVDataWithXMLFileName, fetchXMLFromDigitalOceanSimple } = require('./lib/DigitalOceanWorkflow');
|
||||
|
||||
const app = express();
|
||||
const PORT = process.env.PORT || 3000;
|
||||
|
||||
// Middleware
|
||||
app.use(express.json());
|
||||
app.use(cors());
|
||||
app.use(express.static('public')); // Pour servir les fichiers statiques
|
||||
|
||||
// Dashboard HTML
|
||||
app.get('/', (req, res) => {
|
||||
res.send(`
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>SEO Generator Server - Dashboard</title>
|
||||
<style>
|
||||
body { font-family: Arial, sans-serif; margin: 40px; background: #f5f5f5; }
|
||||
.container { max-width: 1200px; margin: 0 auto; background: white; padding: 30px; border-radius: 10px; box-shadow: 0 2px 10px rgba(0,0,0,0.1); }
|
||||
h1 { color: #333; text-align: center; margin-bottom: 30px; }
|
||||
.section { margin: 20px 0; padding: 20px; border: 1px solid #ddd; border-radius: 5px; }
|
||||
.button { display: inline-block; padding: 12px 24px; margin: 10px 5px; background: #007cba; color: white; text-decoration: none; border-radius: 5px; border: none; cursor: pointer; font-size: 14px; }
|
||||
.button:hover { background: #005a87; }
|
||||
.button.secondary { background: #28a745; }
|
||||
.button.warning { background: #ffc107; color: #333; }
|
||||
.status { padding: 10px; margin: 10px 0; border-radius: 5px; }
|
||||
.success { background: #d4edda; color: #155724; border: 1px solid #c3e6cb; }
|
||||
.info { background: #d1ecf1; color: #0c5460; border: 1px solid #bee5eb; }
|
||||
.result { background: #f8f9fa; padding: 15px; margin: 10px 0; border-radius: 5px; border: 1px solid #dee2e6; max-height: 400px; overflow-y: auto; }
|
||||
pre { margin: 0; white-space: pre-wrap; word-wrap: break-word; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<h1>🚀 SEO Generator Server Dashboard</h1>
|
||||
|
||||
<div class="status success">
|
||||
<strong>Status:</strong> Serveur actif depuis ${Math.floor(process.uptime())} secondes
|
||||
<br><strong>Version Node:</strong> ${process.version}
|
||||
<br><strong>Timestamp:</strong> ${new Date().toISOString()}
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>🎯 Workflow Principal</h2>
|
||||
<p>Traitement automatique des Google Sheets avec génération de contenu SEO.</p>
|
||||
<button class="button" onclick="checkAndProcess()">🔍 Vérifier Google Sheets & Traiter</button>
|
||||
<button class="button secondary" onclick="testWorkflow()">🧪 Test Workflow Complet</button>
|
||||
<div id="workflow-result" class="result" style="display:none;"></div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>🤖 Tests LLM</h2>
|
||||
<p>Vérifier la connectivité et fonctionnement des modèles IA.</p>
|
||||
<button class="button warning" onclick="testLLMs()">🌐 Test Connectivité LLM</button>
|
||||
<div id="llm-result" class="result" style="display:none;"></div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>📊 Configuration</h2>
|
||||
<p>Gestion des données et personnalités IA.</p>
|
||||
<button class="button" onclick="testConfig()">⚙️ Test Configuration</button>
|
||||
<div id="config-result" class="result" style="display:none;"></div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>🌊 DigitalOcean Workflow</h2>
|
||||
<p>Récupération XML depuis DigitalOcean et traitement automatique.</p>
|
||||
<button class="button" onclick="testDOConnection()">🧪 Test DigitalOcean</button>
|
||||
<button class="button secondary" onclick="processDOWorkflow()">🚀 Traiter Workflow DO</button>
|
||||
<div id="do-result" class="result" style="display:none;"></div>
|
||||
</div>
|
||||
|
||||
<div class="section">
|
||||
<h2>🔗 Tests Réseau</h2>
|
||||
<button class="button" onclick="pingServices()">📡 Ping Services</button>
|
||||
<div id="ping-result" class="result" style="display:none;"></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
async function apiCall(url, resultDiv) {
|
||||
const element = document.getElementById(resultDiv);
|
||||
element.style.display = 'block';
|
||||
element.innerHTML = '<pre>⏳ Chargement...</pre>';
|
||||
|
||||
try {
|
||||
const response = await fetch(url);
|
||||
const data = await response.json();
|
||||
element.innerHTML = '<pre>' + JSON.stringify(data, null, 2) + '</pre>';
|
||||
} catch (error) {
|
||||
element.innerHTML = '<pre style="color: red;">❌ Erreur: ' + error.message + '</pre>';
|
||||
}
|
||||
}
|
||||
|
||||
function checkAndProcess() {
|
||||
apiCall('/api/check-and-process', 'workflow-result');
|
||||
}
|
||||
|
||||
function testWorkflow() {
|
||||
apiCall('/api/test-workflow', 'workflow-result');
|
||||
}
|
||||
|
||||
function testLLMs() {
|
||||
apiCall('/api/test-llm', 'llm-result');
|
||||
}
|
||||
|
||||
function testConfig() {
|
||||
apiCall('/api/test-config', 'config-result');
|
||||
}
|
||||
|
||||
function testDOConnection() {
|
||||
apiCall('/api/test-digitalocean', 'do-result');
|
||||
}
|
||||
|
||||
function processDOWorkflow() {
|
||||
const rowNumber = prompt('Numéro de ligne à traiter:', '2');
|
||||
if (rowNumber) {
|
||||
apiCall('/api/digitalocean-workflow/' + rowNumber, 'do-result');
|
||||
}
|
||||
}
|
||||
|
||||
function pingServices() {
|
||||
apiCall('/ping-all', 'ping-result');
|
||||
}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
`);
|
||||
});
|
||||
|
||||
// API Routes
|
||||
app.get('/api/status', (req, res) => {
|
||||
res.json({
|
||||
success: true,
|
||||
status: 'running',
|
||||
uptime: process.uptime(),
|
||||
timestamp: new Date().toISOString(),
|
||||
node_version: process.version,
|
||||
memory: process.memoryUsage()
|
||||
});
|
||||
});
|
||||
|
||||
// Test du workflow principal
|
||||
app.get('/api/test-workflow', async (req, res) => {
|
||||
try {
|
||||
logSh('🧪 Test workflow principal...', 'INFO'); // Using logSh instead of console.log
|
||||
const result = await testMainWorkflow();
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Test workflow terminé avec succès',
|
||||
result: result
|
||||
});
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur test workflow: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Test des LLM
|
||||
app.get('/api/test-llm', async (req, res) => {
|
||||
try {
|
||||
logSh('🌐 Test connectivité LLM...', 'INFO'); // Using logSh instead of console.log
|
||||
const result = await testLLMManagerComplete();
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Test LLM terminé',
|
||||
result: result
|
||||
});
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur test LLM: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Test de configuration
|
||||
app.get('/api/test-config', async (req, res) => {
|
||||
try {
|
||||
logSh('⚙️ Test configuration...', 'INFO'); // Using logSh instead of console.log
|
||||
const result = await getBrainConfig(2);
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Test configuration terminé',
|
||||
result: result
|
||||
});
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur test config: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Test connexion DigitalOcean
|
||||
app.get('/api/test-digitalocean', async (req, res) => {
|
||||
try {
|
||||
logSh('🧪 Test connexion DigitalOcean...', 'INFO'); // Using logSh instead of console.log
|
||||
const result = await testDigitalOceanConnection();
|
||||
|
||||
res.json({
|
||||
success: result,
|
||||
message: result ? 'Connexion DigitalOcean fonctionnelle' : 'Connexion DigitalOcean échouée',
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur test DO: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Déclencher workflow DigitalOcean pour une ligne spécifique
|
||||
app.get('/api/digitalocean-workflow/:rowNumber', async (req, res) => {
|
||||
try {
|
||||
const rowNumber = parseInt(req.params.rowNumber);
|
||||
|
||||
if (!rowNumber || rowNumber < 2) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: 'Numéro de ligne invalide (minimum 2)'
|
||||
});
|
||||
}
|
||||
|
||||
logSh(`🌊 Déclenchement workflow DigitalOcean ligne ${rowNumber}...`, 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
const result = await triggerAutonomousWorkflow(rowNumber);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `✅ Workflow DigitalOcean ligne ${rowNumber} terminé`,
|
||||
rowNumber: rowNumber,
|
||||
result: result,
|
||||
source: 'digitalocean_autonomous'
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur workflow DO: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message,
|
||||
rowNumber: req.params.rowNumber,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Récupérer données CSV pour une ligne (debug)
|
||||
app.get('/api/digitalocean-csv/:rowNumber', async (req, res) => {
|
||||
try {
|
||||
const rowNumber = parseInt(req.params.rowNumber);
|
||||
|
||||
logSh(`📋 Récupération CSV ligne ${rowNumber}...`, 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
const csvData = await readCSVDataWithXMLFileName(rowNumber);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'Données CSV récupérées',
|
||||
rowNumber: rowNumber,
|
||||
csvData: csvData
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur CSV DO: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message,
|
||||
rowNumber: req.params.rowNumber
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Récupérer XML depuis DigitalOcean (debug)
|
||||
app.get('/api/digitalocean-xml/:fileName', async (req, res) => {
|
||||
try {
|
||||
const fileName = req.params.fileName;
|
||||
|
||||
logSh(`📄 Récupération XML: ${fileName}`, 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
const xmlContent = await fetchXMLFromDigitalOceanSimple(fileName);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'XML récupéré depuis DigitalOcean',
|
||||
fileName: fileName,
|
||||
contentLength: xmlContent.length,
|
||||
content: xmlContent.substring(0, 500) + '...' // Premier extrait
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur XML DO: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message,
|
||||
fileName: req.params.fileName
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Vérifier Google Sheets et traiter
|
||||
app.get('/api/check-and-process', async (req, res) => {
|
||||
try {
|
||||
logSh('🔍 Vérification Google Sheets...', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
// TODO: Implémenter vérification Google Sheets
|
||||
// Pour l'instant, on simule avec des données test
|
||||
const testData = {
|
||||
csvData: {
|
||||
mc0: 'plaque signalétique professionnelle',
|
||||
t0: 'Découvrez nos plaques signalétiques sur mesure',
|
||||
personality: { nom: 'Marc', style: 'professionnel' },
|
||||
tMinus1: 'Signalétique entreprise',
|
||||
mcPlus1: 'plaque dibond,plaque aluminium,plaque gravée,signalétique bureau',
|
||||
tPlus1: 'Plaque Dibond,Plaque Aluminium,Plaque Gravée,Signalétique Bureau'
|
||||
},
|
||||
xmlTemplate: Buffer.from('<?xml version="1.0"?><article><h1>|Title_Main{{T0}}|</h1><p>|Content_Intro{{MC0}}|</p></article>').toString('base64'),
|
||||
source: 'node_server_check'
|
||||
};
|
||||
|
||||
const result = await handleFullWorkflow(testData);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: '✅ Traitement Google Sheets terminé',
|
||||
processed: true,
|
||||
result: result
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
logSh('❌ Erreur check-and-process: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: error.message,
|
||||
stack: error.stack
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// 🆕 ROUTE PING MULTIPLE (Tests réseau)
|
||||
app.get('/ping-all', async (req, res) => {
|
||||
const targets = [
|
||||
'https://www.google.com',
|
||||
'https://api.openai.com',
|
||||
'https://api.anthropic.com',
|
||||
'https://generativelanguage.googleapis.com'
|
||||
];
|
||||
|
||||
logSh('🔍 Ping services réseau...', 'INFO'); // Using logSh instead of console.log
|
||||
|
||||
const results = await Promise.allSettled(
|
||||
targets.map(async (url) => {
|
||||
const startTime = Date.now();
|
||||
try {
|
||||
const response = await fetch(url, { method: 'HEAD', timeout: 5000 });
|
||||
return {
|
||||
url,
|
||||
success: true,
|
||||
status: response.status,
|
||||
duration_ms: Date.now() - startTime,
|
||||
message: `✅ ${url} accessible`
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
url,
|
||||
success: false,
|
||||
error: error.message,
|
||||
duration_ms: Date.now() - startTime,
|
||||
message: `❌ ${url} inaccessible`
|
||||
};
|
||||
}
|
||||
})
|
||||
);
|
||||
|
||||
const successCount = results.filter(r => r.value?.success || r.status === 'fulfilled').length;
|
||||
|
||||
res.json({
|
||||
timestamp: new Date().toISOString(),
|
||||
summary: `${successCount}/${targets.length} services accessibles`,
|
||||
results: results.map(r => r.value || r.reason)
|
||||
});
|
||||
});
|
||||
|
||||
// Middleware de gestion d'erreurs global
|
||||
app.use((error, req, res, next) => {
|
||||
logSh('❌ Erreur serveur: ' + error.message, 'ERROR'); // Using logSh instead of console.error
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: 'Erreur serveur interne',
|
||||
message: error.message,
|
||||
timestamp: new Date().toISOString()
|
||||
});
|
||||
});
|
||||
|
||||
// Route 404
|
||||
app.use('*', (req, res) => {
|
||||
res.status(404).json({
|
||||
success: false,
|
||||
error: 'Route non trouvée',
|
||||
path: req.originalUrl,
|
||||
message: 'Cette route n\'existe pas'
|
||||
});
|
||||
});
|
||||
|
||||
// Démarrage serveur
|
||||
app.listen(PORT, () => {
|
||||
logSh(`🚀 === SEO Generator Server Dashboard ===`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(`🌐 Interface Web: http://localhost:${PORT}`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(`📊 API Status: http://localhost:${PORT}/api/status`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(`🔗 Tests Réseau: http://localhost:${PORT}/ping-all`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(`✅ Serveur prêt à traiter les workflows SEO !`, 'INFO'); // Using logSh instead of console.log
|
||||
logSh(`🎯 Version: Phase 2 Anti-Détection Ready`, 'INFO'); // Using logSh instead of console.log
|
||||
});
|
||||
15
tests/_helpers/env.js
Normal file
15
tests/_helpers/env.js
Normal file
@ -0,0 +1,15 @@
|
||||
import assert from 'node:assert';
|
||||
|
||||
export function requireEnv(keys) {
|
||||
const missing = [];
|
||||
for (const k of keys) {
|
||||
if (!process.env[k] || String(process.env[k]).trim() === '') missing.push(k);
|
||||
}
|
||||
return { ok: missing.length === 0, missing };
|
||||
}
|
||||
|
||||
export const DEFAULT_REQUIRED = [
|
||||
// adapte à ton projet
|
||||
'OPENAI_API_KEY', 'ANTHROPIC_API_KEY', 'GOOGLE_API_KEY',
|
||||
'MAX_COST_PER_ARTICLE', 'TRACE_PATH'
|
||||
];
|
||||
26
tests/_helpers/fakeLLMClient.js
Normal file
26
tests/_helpers/fakeLLMClient.js
Normal file
@ -0,0 +1,26 @@
|
||||
export class FakeLLMClient {
|
||||
constructor(opts = {}) {
|
||||
this.delayMs = opts.delayMs ?? 10;
|
||||
this.plan = opts.plan ?? []; // suite d'événements: 'ok' | {err:{statusCode:429|500|...}} | fn
|
||||
this.calls = [];
|
||||
this._i = 0;
|
||||
this.defaultUsage = { prompt_tokens: 100, completion_tokens: 150 };
|
||||
this.prefix = opts.prefix ?? 'FAKE';
|
||||
}
|
||||
async invoke(input, { model='fake-1' } = {}) {
|
||||
this.calls.push({ input, model, t: Date.now() });
|
||||
await new Promise(r => setTimeout(r, this.delayMs));
|
||||
const step = this.plan[this._i++] ?? 'ok';
|
||||
if (typeof step === 'function') return step(input, {model});
|
||||
if (step?.err) {
|
||||
const e = new Error(`fake error ${step.err.statusCode}`);
|
||||
e.statusCode = step.err.statusCode;
|
||||
throw e;
|
||||
}
|
||||
const text = typeof input === 'string' ? input : JSON.stringify(input);
|
||||
return {
|
||||
completion: `${this.prefix}[${model}]::` + text.slice(0, 80),
|
||||
usage: this.defaultUsage
|
||||
};
|
||||
}
|
||||
}
|
||||
10
tests/_helpers/memoryStorage.js
Normal file
10
tests/_helpers/memoryStorage.js
Normal file
@ -0,0 +1,10 @@
|
||||
export class MemoryArticleStorage {
|
||||
constructor() {
|
||||
this._map = new Map(); // key -> {data, final}
|
||||
}
|
||||
async writeDraft(key, data) { this._map.set(key, {data, final:false}); }
|
||||
async writeFinal(key, data) { this._map.set(key, {data, final:true}); }
|
||||
async hasFinal(key) { return this._map.get(key)?.final === true; }
|
||||
async readFinal(key) { return this._map.get(key)?.data; }
|
||||
count() { return this._map.size; }
|
||||
}
|
||||
24
tests/_helpers/mockLLMManager.js
Normal file
24
tests/_helpers/mockLLMManager.js
Normal file
@ -0,0 +1,24 @@
|
||||
/**
|
||||
* Mock LLMManager : renvoie des sorties déterministes selon l'input,
|
||||
* et peut simuler des erreurs transitoires (429/500) via flags.
|
||||
*/
|
||||
export class MockLLMManager {
|
||||
constructor({failTimes=0, failCode=429} = {}) {
|
||||
this.failTimes = failTimes;
|
||||
this.failCode = failCode;
|
||||
this.calls = [];
|
||||
}
|
||||
async callModel({provider='mock', model='mock-1', input}) {
|
||||
this.calls.push({provider, model, input});
|
||||
if (this.failTimes > 0) {
|
||||
this.failTimes--;
|
||||
const err = new Error(`Mock transient ${this.failCode}`);
|
||||
err.statusCode = this.failCode;
|
||||
throw err;
|
||||
}
|
||||
// sortie déterministe simple
|
||||
const text = (typeof input === 'string' ? input : JSON.stringify(input));
|
||||
const completion = `MOCK[${model}]::` + text.slice(0, 60);
|
||||
return { completion, usage: { prompt_tokens: 100, completion_tokens: 200 } };
|
||||
}
|
||||
}
|
||||
38
tests/_helpers/path.js
Normal file
38
tests/_helpers/path.js
Normal file
@ -0,0 +1,38 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { pathToFileURL } from 'url';
|
||||
import { createRequire } from 'module';
|
||||
|
||||
const ROOT = process.cwd();
|
||||
const CANDIDATES = ['', 'lib', 'src', '.'];
|
||||
const req = createRequire(import.meta.url);
|
||||
|
||||
export function resolveModule(name) {
|
||||
for (const base of CANDIDATES) {
|
||||
const p = path.join(ROOT, base, `${name}.js`);
|
||||
if (fs.existsSync(p)) return p;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
export async function safeImport(name) {
|
||||
const p = resolveModule(name);
|
||||
if (!p) return { ok:false, reason:`Module not found: ${name}` };
|
||||
try {
|
||||
const mod = await import(pathToFileURL(p).href);
|
||||
return normalize(mod, p);
|
||||
} catch (e1) {
|
||||
try {
|
||||
const mod = req(p);
|
||||
return normalize(mod, p);
|
||||
} catch (e2) {
|
||||
return { ok:false, reason:`Failed to load ${name}: ${e1.message} | ${e2.message}` };
|
||||
}
|
||||
}
|
||||
}
|
||||
function normalize(mod, p) {
|
||||
const m = mod?.default && typeof mod.default === 'object'
|
||||
? { ...mod.default, ...mod }
|
||||
: mod;
|
||||
return { ok:true, mod:m, path:p };
|
||||
}
|
||||
43
tests/llm/llmmanager.circuitbreaker.test.js
Normal file
43
tests/llm/llmmanager.circuitbreaker.test.js
Normal file
@ -0,0 +1,43 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { FakeLLMClient } from '../_helpers/fakeLLMClient.js';
|
||||
|
||||
function skip(msg){ console.warn('[SKIP]', msg); }
|
||||
|
||||
test('LLMManager: circuit breaker opens after consecutive failures (optional)', async () => {
|
||||
const res = await safeImport('LLMManager');
|
||||
if (!res.ok) { skip(res.reason); return; }
|
||||
const L = res.mod;
|
||||
if (typeof L.__setClient !== 'function' || typeof L.__setRetryPolicy !== 'function') {
|
||||
skip('Missing __setClient/__setRetryPolicy hooks');
|
||||
return;
|
||||
}
|
||||
|
||||
// Simule toujours 500
|
||||
const fakeFail = new FakeLLMClient({ plan: [{err:{statusCode:500}}] });
|
||||
fakeFail.invoke = async () => { throw Object.assign(new Error('500'), {statusCode:500}); };
|
||||
L.__setClient('mock', fakeFail);
|
||||
|
||||
L.__setRetryPolicy({
|
||||
retries: 0,
|
||||
baseMs: 1,
|
||||
isTransient: (code) => [500].includes(code),
|
||||
openAfter: 3, // <-- si ton impl gère ce champ
|
||||
cooldownMs: 100 // <-- idem
|
||||
});
|
||||
|
||||
let fails = 0;
|
||||
for (let i=0; i<3; i++) {
|
||||
try { await L.callModel({ provider:'mock', model:'fake-1', input:`try-${i}` }); }
|
||||
catch(e){ fails++; }
|
||||
}
|
||||
assert.equal(fails, 3);
|
||||
|
||||
// Après 3, le breaker devrait s'ouvrir (si implémenté)
|
||||
let breakerThrew = false;
|
||||
try { await L.callModel({ provider:'mock', model:'fake-1', input:'blocked' }); }
|
||||
catch(e){ breakerThrew = true; }
|
||||
// On accepte SKIP si non implémenté dans ta lib
|
||||
if (!breakerThrew) skip('Circuit breaker not implemented; test informational only');
|
||||
});
|
||||
30
tests/llm/llmmanager.concurrency.test.js
Normal file
30
tests/llm/llmmanager.concurrency.test.js
Normal file
@ -0,0 +1,30 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { FakeLLMClient } from '../_helpers/fakeLLMClient.js';
|
||||
|
||||
function skip(msg){ console.warn('[SKIP]', msg); }
|
||||
|
||||
test('LLMManager: enforces concurrency limit', async () => {
|
||||
const res = await safeImport('LLMManager');
|
||||
if (!res.ok) { skip(res.reason); return; }
|
||||
const L = res.mod;
|
||||
|
||||
if (typeof L.__setClient !== 'function' || typeof L.__setConcurrency !== 'function') {
|
||||
skip('Missing __setClient/__setConcurrency hooks');
|
||||
return;
|
||||
}
|
||||
|
||||
const fake = new FakeLLMClient({ delayMs: 50 });
|
||||
L.__setClient('mock', fake);
|
||||
L.__setConcurrency(2); // on demande 2 max
|
||||
|
||||
const N = 5;
|
||||
const calls = Array.from({length:N}, (_,i)=> L.callModel({ provider:'mock', model:'fake-1', input:`#${i}` }));
|
||||
const t0 = Date.now();
|
||||
await Promise.all(calls);
|
||||
const t1 = Date.now();
|
||||
|
||||
// Avec delay 50ms et concu=2, 5 requêtes => ~3 vagues => durée >= 150ms (avec marge)
|
||||
assert.ok((t1 - t0) >= 120, `expected batching effect, got ${t1-t0}ms`);
|
||||
});
|
||||
27
tests/llm/llmmanager.contract.test.js
Normal file
27
tests/llm/llmmanager.contract.test.js
Normal file
@ -0,0 +1,27 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { FakeLLMClient } from '../_helpers/fakeLLMClient.js';
|
||||
|
||||
function skip(msg){ console.warn('[SKIP]', msg); }
|
||||
|
||||
test('LLMManager: callModel happy path (mock client)', async () => {
|
||||
const res = await safeImport('LLMManager');
|
||||
if (!res.ok) { skip(res.reason); return; }
|
||||
const L = res.mod;
|
||||
|
||||
// Hooks requis pour injection
|
||||
if (typeof L.__setClient !== 'function' || typeof L.callModel !== 'function') {
|
||||
skip('LLMManager test hooks not found (__setClient) or callModel missing');
|
||||
return;
|
||||
}
|
||||
|
||||
const fake = new FakeLLMClient({ delayMs: 5, prefix: 'TEST' });
|
||||
L.__setClient('mock', fake);
|
||||
|
||||
const out = await L.callModel({ provider:'mock', model:'fake-1', input:'Hello' });
|
||||
assert.ok(out);
|
||||
const text = out.completion || out.text || out.output;
|
||||
assert.equal(typeof text, 'string');
|
||||
assert.ok(/TEST\[fake-1\]::/.test(text), 'completion should come from fake client');
|
||||
});
|
||||
29
tests/llm/llmmanager.cost.test.js
Normal file
29
tests/llm/llmmanager.cost.test.js
Normal file
@ -0,0 +1,29 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { FakeLLMClient } from '../_helpers/fakeLLMClient.js';
|
||||
|
||||
function skip(msg){ console.warn('[SKIP]', msg); }
|
||||
|
||||
test('LLMManager: returns usage and computes cost if table provided', async () => {
|
||||
const res = await safeImport('LLMManager');
|
||||
if (!res.ok) { skip(res.reason); return; }
|
||||
const L = res.mod;
|
||||
|
||||
if (typeof L.__setClient !== 'function' || typeof L.__setCosts !== 'function') {
|
||||
skip('Missing __setClient/__setCosts hooks');
|
||||
return;
|
||||
}
|
||||
|
||||
const fake = new FakeLLMClient();
|
||||
L.__setClient('mock', fake);
|
||||
L.__setCosts({ 'mock:fake-1': { in: 0.001, out: 0.003 } }); // $/1k tokens
|
||||
|
||||
const out = await L.callModel({ provider:'mock', model:'fake-1', input:'price me' });
|
||||
const usage = out.usage || out.meta?.usage;
|
||||
assert.ok(usage, 'usage expected');
|
||||
const cost = out.__meta?.cost ?? out.cost;
|
||||
assert.ok(typeof cost === 'number', 'cost numeric expected');
|
||||
// 100 in, 150 out => 0.1k*0.001 + 0.15k*0.003 = 0.0001 + 0.00045 = 0.00055
|
||||
assert.ok(cost > 0 && cost < 0.001, `cost looks off: ${cost}`);
|
||||
});
|
||||
38
tests/llm/llmmanager.retry.test.js
Normal file
38
tests/llm/llmmanager.retry.test.js
Normal file
@ -0,0 +1,38 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { FakeLLMClient } from '../_helpers/fakeLLMClient.js';
|
||||
|
||||
function skip(msg){ console.warn('[SKIP]', msg); }
|
||||
|
||||
test('LLMManager: retries on transient (429), not on logical (400)', async () => {
|
||||
const res = await safeImport('LLMManager');
|
||||
if (!res.ok) { skip(res.reason); return; }
|
||||
const L = res.mod;
|
||||
|
||||
if (typeof L.__setClient !== 'function' || typeof L.__setRetryPolicy !== 'function') {
|
||||
skip('Missing __setClient/__setRetryPolicy hooks');
|
||||
return;
|
||||
}
|
||||
|
||||
L.__setRetryPolicy({
|
||||
retries: 2,
|
||||
baseMs: 1,
|
||||
isTransient: (code) => [429,500,502,503,504].includes(code)
|
||||
});
|
||||
|
||||
const fakeTransient = new FakeLLMClient({
|
||||
plan: [{err:{statusCode:429}}, {err:{statusCode:429}}, 'ok']
|
||||
});
|
||||
L.__setClient('mock', fakeTransient);
|
||||
const ok = await L.callModel({ provider:'mock', model:'fake-1', input:'X' });
|
||||
assert.ok(ok.completion, 'should succeed after retries');
|
||||
|
||||
const fakeLogical = new FakeLLMClient({
|
||||
plan: [{err:{statusCode:400}}]
|
||||
});
|
||||
L.__setClient('mock', fakeLogical);
|
||||
let threw = false;
|
||||
try { await L.callModel({ provider:'mock', model:'fake-1', input:'Y' }); } catch(e){ threw = true; }
|
||||
assert.ok(threw, 'should not retry logical errors (400)');
|
||||
});
|
||||
25
tests/llm/llmmanager.timeout.test.js
Normal file
25
tests/llm/llmmanager.timeout.test.js
Normal file
@ -0,0 +1,25 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { FakeLLMClient } from '../_helpers/fakeLLMClient.js';
|
||||
|
||||
function skip(msg){ console.warn('[SKIP]', msg); }
|
||||
|
||||
test('LLMManager: supports abort/timeout behavior', async () => {
|
||||
const res = await safeImport('LLMManager');
|
||||
if (!res.ok) { skip(res.reason); return; }
|
||||
const L = res.mod;
|
||||
|
||||
if (typeof L.__setClient !== 'function') { skip('Missing __setClient'); return; }
|
||||
|
||||
const fake = new FakeLLMClient({ delayMs: 200 });
|
||||
L.__setClient('mock', fake);
|
||||
|
||||
const ac = new AbortController();
|
||||
const p = L.callModel({ provider:'mock', model:'fake-1', input:'slow', abortSignal: ac.signal });
|
||||
setTimeout(()=> ac.abort(), 50);
|
||||
|
||||
let aborted = false;
|
||||
try { await p; } catch(e){ aborted = true; }
|
||||
assert.ok(aborted, 'expected abort to be propagated');
|
||||
});
|
||||
81
tests/llm/pipeline-dryrun.test.js
Normal file
81
tests/llm/pipeline-dryrun.test.js
Normal file
@ -0,0 +1,81 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { MemoryArticleStorage } from '../_helpers/memoryStorage.js';
|
||||
import { MockLLMManager } from '../_helpers/mockLLMManager.js';
|
||||
|
||||
function skip(msg) { console.warn('[SKIP]', msg); }
|
||||
|
||||
test('Pipeline dry-run with mock LLM returns structured article', async (t) => {
|
||||
const extr = safeImport('ElementExtraction');
|
||||
const gen = safeImport('ContentGeneration');
|
||||
const asm = safeImport('ContentAssembly');
|
||||
const enh = safeImport('SelectiveEnhancement');
|
||||
const miss = safeImport('MissingKeywords');
|
||||
const utils= safeImport('Utils');
|
||||
const stor = new MemoryArticleStorage();
|
||||
const llm = new MockLLMManager();
|
||||
|
||||
if (![extr,gen,asm,enh,miss].every(r=>r.ok)) {
|
||||
skip('One or more pipeline modules missing; dry-run skipped.');
|
||||
return;
|
||||
}
|
||||
|
||||
const ElementExtraction = extr.mod;
|
||||
const ContentGeneration = gen.mod;
|
||||
const ContentAssembly = asm.mod;
|
||||
const SelectiveEnh = enh.mod;
|
||||
const MissingKeywords = miss.mod;
|
||||
|
||||
// Inputs minimaux (adapte selon tes contrats)
|
||||
const inputs = {
|
||||
instructions: 'Write about hunting jackets: materials, weatherproofing, sizes.',
|
||||
persona: { name: 'Persona A', tone: 'pragmatic' },
|
||||
template: '<article><h1>{{title}}</h1>{{body}}</article>',
|
||||
seed: 42
|
||||
};
|
||||
|
||||
// Étape 1: extraction
|
||||
const elements = await (ElementExtraction.extractElements
|
||||
? ElementExtraction.extractElements(inputs)
|
||||
: { topics: ['materials', 'weatherproofing', 'sizes'] });
|
||||
|
||||
assert.ok(elements, 'elements should be produced');
|
||||
|
||||
// Étape 2: génération (mock via injection simple si API supporte un client)
|
||||
// Si ContentGeneration accepte un LLM param, on l’utilise, sinon on simule simple:
|
||||
const parts = await (ContentGeneration.generateArticleParts
|
||||
? ContentGeneration.generateArticleParts({ inputs, elements, llm })
|
||||
: (Array.isArray(elements.topics) ? elements.topics : Object.keys(elements))
|
||||
.map(t => ({ key:String(t), text:`MOCK SECTION ${t}` })));
|
||||
|
||||
|
||||
assert.ok(parts && Array.isArray(parts), 'parts array expected');
|
||||
assert.ok(parts.length > 0, 'non-empty parts');
|
||||
|
||||
// Étape 3: enhancement sélectif (facultatif)
|
||||
const enhanced = await (SelectiveEnh.enhanceParts
|
||||
? SelectiveEnh.enhanceParts({ parts, llm })
|
||||
: parts);
|
||||
|
||||
assert.ok(enhanced && Array.isArray(enhanced), 'enhanced parts array');
|
||||
|
||||
// Étape 4: missing keywords
|
||||
const completed = await (MissingKeywords.fillMissingKeywords
|
||||
? MissingKeywords.fillMissingKeywords({ parts: enhanced, targetKeywords:['chasse','imperméable'] , llm })
|
||||
: enhanced);
|
||||
|
||||
// Étape 5: assemblage
|
||||
const html = await (ContentAssembly.assembleArticle
|
||||
? ContentAssembly.assembleArticle({ template: inputs.template, parts: completed, meta:{title:'Veste de chasse: guide complet'} })
|
||||
: `<article>${completed.map(p=>`<section>${p.text}</section>`).join('')}</article>`);
|
||||
|
||||
assert.equal(typeof html, 'string');
|
||||
assert.ok(html.includes('<article'), 'html should be article-like');
|
||||
|
||||
// “Stockage final” simulé
|
||||
const key = JSON.stringify(inputs); // simplifié
|
||||
await stor.writeFinal(key, { html });
|
||||
|
||||
assert.ok(await stor.hasFinal(key), 'final should be stored');
|
||||
});
|
||||
32
tests/llm/retry-logic.test.js
Normal file
32
tests/llm/retry-logic.test.js
Normal file
@ -0,0 +1,32 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
import { MockLLMManager } from '../_helpers/mockLLMManager.js';
|
||||
|
||||
test('LLM transient error is retried or bubbled cleanly', async () => {
|
||||
const mgrRes = safeImport('LLMManager');
|
||||
if (!mgrRes.ok || typeof mgrRes.mod.callModel !== 'function') {
|
||||
console.warn('[SKIP] LLMManager.callModel missing');
|
||||
return;
|
||||
}
|
||||
// On teste la nôtre (mock) pour vérifier le comportement désiré,
|
||||
// puis on appelle la “vraie” et on vérifie au moins la remontée d’erreur propre.
|
||||
const mock = new MockLLMManager({ failTimes: 2, failCode: 429 });
|
||||
const ok = await mock.callModel({ input:'test' }).catch(e => e);
|
||||
// après 2 fails, la troisième doit passer:
|
||||
const ok2 = await mock.callModel({ input:'test 2' });
|
||||
assert.ok(ok2.completion?.startsWith('MOCK'));
|
||||
|
||||
// Vraie impl: au minimum, qu’elle throw une Error avec statusCode si pas de retry
|
||||
let threw = false;
|
||||
try {
|
||||
await mgrRes.mod.callModel({ provider:'X', model:'Y', input:'Z', __forceFail429:true });
|
||||
} catch (e) {
|
||||
threw = true;
|
||||
// Pas obligatoire d’avoir statusCode, mais on log si présent
|
||||
if (e?.statusCode) {
|
||||
assert.ok([429,500,502,503,504].includes(e.statusCode));
|
||||
}
|
||||
}
|
||||
assert.ok(threw, 'real callModel should throw or handle gracefully when forced to fail');
|
||||
});
|
||||
13
tests/setup-env.js
Normal file
13
tests/setup-env.js
Normal file
@ -0,0 +1,13 @@
|
||||
// tests/setup-env.js
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
const p = path.join(process.cwd(), '.env');
|
||||
if (fs.existsSync(p)) {
|
||||
// lazy dotenv sans dépendance: Node 20+ ne charge pas .env nativement,
|
||||
// tu peux installer 'dotenv' si tu préfères.
|
||||
const lines = fs.readFileSync(p, 'utf8').split(/\r?\n/);
|
||||
for (const line of lines) {
|
||||
const m = line.match(/^\s*([A-Z0-9_]+)\s*=\s*(.*)\s*$/i);
|
||||
if (m) process.env[m[1]] = m[2].replace(/^"|"$/g, '');
|
||||
}
|
||||
}
|
||||
12
tests/smoke/config.test.js
Normal file
12
tests/smoke/config.test.js
Normal file
@ -0,0 +1,12 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { requireEnv, DEFAULT_REQUIRED } from '../_helpers/env.js';
|
||||
|
||||
test('ENV: required variables present (soft)', () => {
|
||||
const res = requireEnv(DEFAULT_REQUIRED);
|
||||
if (!res.ok) {
|
||||
// soft fail: on avertit mais on n'arrête pas
|
||||
console.warn('[ENV WARN] Missing env:', res.missing);
|
||||
}
|
||||
assert.ok(true, 'Soft check done');
|
||||
});
|
||||
34
tests/smoke/modules-shape.test.js
Normal file
34
tests/smoke/modules-shape.test.js
Normal file
@ -0,0 +1,34 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
|
||||
const EXPECTED = {
|
||||
'LLMManager': [['callModel','invoke','run']],
|
||||
'ContentGeneration': [['generateArticleParts','generate','run']],
|
||||
'ElementExtraction': [['extractElements','extract','run']],
|
||||
'ContentAssembly': [['assembleArticle','assemble','render']],
|
||||
'SelectiveEnhancement': [['enhanceParts','enhance','run']],
|
||||
'MissingKeywords': [['fillMissingKeywords','complete','run']],
|
||||
'ArticleStorage': [['workKey','makeKey','keyOf']],
|
||||
'DigitalOceanWorkflow': [['deployArticle','deploy','publish']],
|
||||
'Main': [['run','main','start']],
|
||||
'ManualTrigger': [['main','run','start']],
|
||||
'Utils': [[]],
|
||||
'trace-wrap': [[]]
|
||||
};
|
||||
|
||||
for (const [name, variants] of Object.entries(EXPECTED)) {
|
||||
test(`Module ${name}: exists and has expected exports (soft)`, async () => {
|
||||
const res = await safeImport(name);
|
||||
if (!res.ok) { console.warn(`[SKIP] ${name}: ${res.reason}`); return; }
|
||||
const mod = res.mod;
|
||||
|
||||
for (const group of variants) {
|
||||
if (group.length === 0) continue;
|
||||
const found = group.find(fn => typeof mod[fn] === 'function');
|
||||
if (!found) {
|
||||
console.warn(`[SKIP] ${name}: none of [${group.join(', ')}] found`);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
21
tests/storage/digitalocean.test.js
Normal file
21
tests/storage/digitalocean.test.js
Normal file
@ -0,0 +1,21 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
|
||||
test('DigitalOceanWorkflow.deployArticle dry-run signature', async () => {
|
||||
const res = safeImport('DigitalOceanWorkflow');
|
||||
if (!res.ok || typeof res.mod.deployArticle !== 'function') {
|
||||
console.warn('[SKIP] DigitalOceanWorkflow.deployArticle missing');
|
||||
return;
|
||||
}
|
||||
const { deployArticle } = res.mod;
|
||||
const payload = { path:'/articles/slug.html', html:'<article>OK</article>', dryRun:true };
|
||||
try {
|
||||
const out = await deployArticle(payload);
|
||||
// On accepte plusieurs formats de retour ; on vérifie juste que ça n’explose pas en dryRun
|
||||
assert.ok(out !== undefined);
|
||||
} catch (e) {
|
||||
// Si ça échoue car pas de dryRun supporté, on “soft fail”
|
||||
console.warn('[SKIP] deployArticle threw (no dryRun support?):', e.message);
|
||||
}
|
||||
});
|
||||
17
tests/storage/idempotence.test.js
Normal file
17
tests/storage/idempotence.test.js
Normal file
@ -0,0 +1,17 @@
|
||||
import test from 'node:test';
|
||||
import assert from 'node:assert';
|
||||
import { safeImport } from '../_helpers/path.js';
|
||||
|
||||
test('ArticleStorage idempotence (workKey) prevents duplicates', async () => {
|
||||
const res = safeImport('ArticleStorage');
|
||||
if (!res.ok || typeof res.mod.workKey !== 'function') {
|
||||
console.warn('[SKIP] ArticleStorage.workKey missing');
|
||||
return;
|
||||
}
|
||||
const { workKey } = res.mod;
|
||||
const A = { instructions:'x', persona:{a:1}, template:'t', seed:1 };
|
||||
const B = { instructions:'x', persona:{a:1}, template:'t', seed:1 };
|
||||
const k1 = workKey(A);
|
||||
const k2 = workKey(B);
|
||||
assert.equal(k1, k2, 'same inputs should yield same key');
|
||||
});
|
||||
288
tools/audit-unused.cjs
Normal file
288
tools/audit-unused.cjs
Normal file
@ -0,0 +1,288 @@
|
||||
// tools/audit-unused.cjs
|
||||
/*
|
||||
Audit "unused":
|
||||
- Fichiers injoignables depuis des entrypoints
|
||||
- Exports non référencés
|
||||
- Imports orphelins
|
||||
⚠️ Static analysis best-effort: si tu fais des require dynamiques, DI, ou evals, ajoute un @keep pour ignorer un export:
|
||||
// @keep:export NomExport
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const ROOT = process.cwd().replace(/\\/g, '/');
|
||||
const EXTS = ['.js', '.cjs', '.mjs', '.jsx'];
|
||||
const IGNORE_DIRS = new Set(['node_modules', '.git', 'dist', 'build', 'out', '.next', '.vercel']);
|
||||
|
||||
const ENTRYPOINTS = [
|
||||
'lib/test-manual.js',
|
||||
'lib/Main.js',
|
||||
].map(p => path.resolve(ROOT, p));
|
||||
|
||||
const files = [];
|
||||
(function walk(dir) {
|
||||
for (const name of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
if (name.isDirectory()) {
|
||||
if (!IGNORE_DIRS.has(name.name)) walk(path.join(dir, name.name));
|
||||
} else {
|
||||
const ext = path.extname(name.name);
|
||||
if (EXTS.includes(ext)) files.push(path.join(dir, name.name));
|
||||
}
|
||||
}
|
||||
})(ROOT);
|
||||
|
||||
const byNorm = new Map();
|
||||
for (const f of files) byNorm.set(path.normalize(f), f);
|
||||
|
||||
function resolveImport(fromFile, spec) {
|
||||
// ignore packages ('openai', 'undici', etc.)
|
||||
if (!spec.startsWith('.') && !spec.startsWith('/')) return null;
|
||||
const base = path.resolve(path.dirname(fromFile), spec);
|
||||
const candidates = [
|
||||
base,
|
||||
base + '.js', base + '.cjs', base + '.mjs', base + '.jsx',
|
||||
path.join(base, 'index.js'), path.join(base, 'index.cjs'), path.join(base, 'index.mjs'),
|
||||
];
|
||||
for (const c of candidates) {
|
||||
const n = path.normalize(c);
|
||||
if (byNorm.has(n)) return byNorm.get(n);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const RE = {
|
||||
// imports
|
||||
require: /require\s*\(\s*['"`]([^'"`]+)['"`]\s*\)/g,
|
||||
importFrom: /import\s+[^'"]+\s+from\s+['"`]([^'"`]+)['"`]/g,
|
||||
importOnly: /import\s+['"`]([^'"`]+)['"`]\s*;?/g,
|
||||
|
||||
// exports CJS
|
||||
moduleExportsObj: /module\.exports\s*=\s*{([\s\S]*?)}/g,
|
||||
moduleExportsAssign: /module\.exports\.(\w+)\s*=/g,
|
||||
exportsAssign: /exports\.(\w+)\s*=/g,
|
||||
|
||||
// exports ESM
|
||||
exportNamed: /export\s+(?:const|let|var|function|class)\s+(\w+)/g,
|
||||
exportList: /export\s*{\s*([^}]+)\s*}/g,
|
||||
|
||||
// usage of members
|
||||
// const { a, b } = require('./m'); import { a } from './m'
|
||||
destructured: /(?:const|let|var|import)\s*{([^}]+)}\s*=\s*(?:require\(|from\s+['"`])/g,
|
||||
// require('./m').foo OR (await import('./m')).foo
|
||||
dotUse: /(?:require\(\s*['"`][^'"`]+['"`]\s*\)|import\(\s*['"`][^'"`]+['"`]\s*\))\.(\w+)/g,
|
||||
|
||||
keep: /@keep:export\s+(\w+)/g,
|
||||
};
|
||||
|
||||
function parseFile(file) {
|
||||
const txt = fs.readFileSync(file, 'utf8');
|
||||
|
||||
const imports = [];
|
||||
for (const rx of [RE.require, RE.importFrom, RE.importOnly]) {
|
||||
let m; while ((m = rx.exec(txt))) imports.push(m[1]);
|
||||
}
|
||||
|
||||
const exports = new Set();
|
||||
const keep = new Set();
|
||||
|
||||
// CJS object export
|
||||
let m;
|
||||
while ((m = RE.moduleExportsObj.exec(txt))) {
|
||||
const inside = m[1];
|
||||
inside.split(',').forEach(p => {
|
||||
const n = (p.split(':')[0] || '').trim().replace(/\s+/g, '');
|
||||
if (n) exports.add(n);
|
||||
});
|
||||
}
|
||||
while ((m = RE.moduleExportsAssign.exec(txt))) exports.add(m[1]);
|
||||
while ((m = RE.exportsAssign.exec(txt))) exports.add(m[1]);
|
||||
|
||||
// ESM named
|
||||
while ((m = RE.exportNamed.exec(txt))) exports.add(m[1]);
|
||||
while ((m = RE.exportList.exec(txt))) {
|
||||
m[1].split(',').forEach(s => {
|
||||
const n = s.split(' as ')[0].trim();
|
||||
if (n) exports.add(n);
|
||||
});
|
||||
}
|
||||
|
||||
while ((m = RE.keep.exec(txt))) keep.add(m[1]);
|
||||
|
||||
return { file, txt, imports, exports: [...exports], keep: [...keep] };
|
||||
}
|
||||
|
||||
const parsed = files.map(parseFile);
|
||||
|
||||
// Build edges
|
||||
const edges = new Map(); // from -> Set(to)
|
||||
const importMap = new Map(); // from -> [{raw, resolved}]
|
||||
for (const p of parsed) {
|
||||
const tos = new Set();
|
||||
const details = [];
|
||||
for (const spec of p.imports) {
|
||||
const resolved = resolveImport(p.file, spec);
|
||||
if (resolved) tos.add(resolved);
|
||||
details.push({ raw: spec, resolved });
|
||||
}
|
||||
edges.set(p.file, tos);
|
||||
importMap.set(p.file, details);
|
||||
}
|
||||
|
||||
// Reachability from entrypoints
|
||||
const reachable = new Set();
|
||||
function dfs(start) {
|
||||
const stack = [start];
|
||||
while (stack.length) {
|
||||
const f = stack.pop();
|
||||
if (!byNorm.has(path.normalize(f))) continue;
|
||||
if (reachable.has(f)) continue;
|
||||
reachable.add(f);
|
||||
const tos = edges.get(f) || new Set();
|
||||
tos.forEach(t => stack.push(t));
|
||||
}
|
||||
}
|
||||
ENTRYPOINTS.forEach(dfs);
|
||||
|
||||
// Usage of exports (very simple heuristic)
|
||||
const exportIndex = new Map(); // file -> Set(exportName)
|
||||
parsed.forEach(p => exportIndex.set(p.file, new Set(p.exports)));
|
||||
|
||||
const used = new Map(); // file -> Set(usedName)
|
||||
for (const p of parsed) {
|
||||
const txt = p.txt;
|
||||
|
||||
// destructured usage
|
||||
let m;
|
||||
while ((m = RE.destructured.exec(txt))) {
|
||||
const names = m[1].split(',').map(s => s.split(':')[0].trim());
|
||||
names.forEach(n => {
|
||||
if (!n) return;
|
||||
// mark globally; we can't know which file exactly (static pass), will map later by imports
|
||||
});
|
||||
}
|
||||
|
||||
// dot usage; we can't resolve to a file reliably; will do name-only aggregation
|
||||
}
|
||||
|
||||
function collectImportedNames(file) {
|
||||
// Return map: targetFile -> Set(names)
|
||||
const res = new Map();
|
||||
const txt = fs.readFileSync(file, 'utf8');
|
||||
|
||||
// import {a,b} from './x'
|
||||
const importNamed = /import\s*{([^}]+)}\s*from\s*['"`]([^'"`]+)['"`]/g;
|
||||
let m;
|
||||
while ((m = importNamed.exec(txt))) {
|
||||
const names = m[1].split(',').map(s => s.split(' as ')[0].trim()).filter(Boolean);
|
||||
const target = resolveImport(file, m[2]);
|
||||
if (target) {
|
||||
if (!res.has(target)) res.set(target, new Set());
|
||||
names.forEach(n => res.get(target).add(n));
|
||||
}
|
||||
}
|
||||
|
||||
// const {a,b} = require('./x')
|
||||
const reqNamed = /(?:const|let|var)\s*{([^}]+)}\s*=\s*require\(\s*['"`]([^'"`]+)['"`]\s*\)/g;
|
||||
while ((m = reqNamed.exec(txt))) {
|
||||
const names = m[1].split(',').map(s => s.split(':')[0].trim()).filter(Boolean);
|
||||
const target = resolveImport(file, m[2]);
|
||||
if (target) {
|
||||
if (!res.has(target)) res.set(target, new Set());
|
||||
names.forEach(n => res.get(target).add(n));
|
||||
}
|
||||
}
|
||||
|
||||
// require('./x').foo
|
||||
const reqDot = /require\(\s*['"`]([^'"`]+)['"`]\s*\)\.(\w+)/g;
|
||||
while ((m = reqDot.exec(txt))) {
|
||||
const target = resolveImport(file, m[1]);
|
||||
const name = m[2];
|
||||
if (target) {
|
||||
if (!res.has(target)) res.set(target, new Set());
|
||||
res.get(target).add(name);
|
||||
}
|
||||
}
|
||||
|
||||
// (await import('./x')).foo OR import('./x').then(m=>m.foo)
|
||||
const impDot = /import\(\s*['"`]([^'"`]+)['"`]\s*\)\.(\w+)/g;
|
||||
while ((m = impDot.exec(txt))) {
|
||||
const target = resolveImport(file, m[1]);
|
||||
const name = m[2];
|
||||
if (target) {
|
||||
if (!res.has(target)) res.set(target, new Set());
|
||||
res.get(target).add(name);
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
const usageByTarget = new Map(); // targetFile -> Set(namesUsed)
|
||||
for (const p of parsed) {
|
||||
const m = collectImportedNames(p.file);
|
||||
for (const [target, names] of m) {
|
||||
if (!usageByTarget.has(target)) usageByTarget.set(target, new Set());
|
||||
names.forEach(n => usageByTarget.get(target).add(n));
|
||||
}
|
||||
}
|
||||
|
||||
function rel(p) { return path.relative(ROOT, p).replace(/\\/g, '/'); }
|
||||
|
||||
const report = {
|
||||
entrypoints: ENTRYPOINTS.filter(fs.existsSync).map(rel),
|
||||
unreachableFiles: [],
|
||||
brokenImports: [],
|
||||
unusedExports: [],
|
||||
};
|
||||
|
||||
for (const f of files) {
|
||||
if (!reachable.has(f) && !ENTRYPOINTS.includes(f)) {
|
||||
report.unreachableFiles.push(rel(f));
|
||||
}
|
||||
}
|
||||
|
||||
for (const [from, list] of importMap) {
|
||||
for (const { raw, resolved } of list) {
|
||||
if ((raw.startsWith('.') || raw.startsWith('/')) && !resolved) {
|
||||
report.brokenImports.push({ from: rel(from), spec: raw });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (const p of parsed) {
|
||||
const exp = exportIndex.get(p.file);
|
||||
if (!exp || exp.size === 0) continue;
|
||||
|
||||
const usedSet = usageByTarget.get(p.file) || new Set();
|
||||
const keepMarks = new Set(p.keep || []);
|
||||
|
||||
const unused = [...exp].filter(n => !usedSet.has(n) && !keepMarks.has(n));
|
||||
if (unused.length > 0) {
|
||||
report.unusedExports.push({ file: rel(p.file), unused });
|
||||
}
|
||||
}
|
||||
|
||||
console.log('=== UNUSED AUDIT REPORT ===');
|
||||
console.log('');
|
||||
console.log('Entrypoints:', report.entrypoints);
|
||||
console.log('');
|
||||
|
||||
console.log('— Unreachable files (dead):');
|
||||
if (report.unreachableFiles.length === 0) console.log(' ✔ none');
|
||||
else report.unreachableFiles.sort().forEach(f => console.log(' •', f));
|
||||
console.log('');
|
||||
|
||||
console.log('— Broken relative imports:');
|
||||
if (report.brokenImports.length === 0) console.log(' ✔ none');
|
||||
else report.brokenImports.forEach(i => console.log(` • ${i.from} -> ${i.spec}`));
|
||||
console.log('');
|
||||
|
||||
console.log('— Unused exports:');
|
||||
if (report.unusedExports.length === 0) console.log(' ✔ none');
|
||||
else report.unusedExports
|
||||
.sort((a,b)=>a.file.localeCompare(b.file))
|
||||
.forEach(r => console.log(` • ${r.file}: ${r.unused.join(', ')}`));
|
||||
|
||||
console.log('');
|
||||
console.log('Tip: ajoute un commentaire "@keep:export Nom" au-dessus d’un export pour le protéger des faux positifs.');
|
||||
155
tools/pack-lib.cjs
Normal file
155
tools/pack-lib.cjs
Normal file
@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env node
|
||||
/* eslint-disable no-console */
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
// ---- CLI options -----------------------------------------------------------
|
||||
function getArg(name, def) {
|
||||
const i = args.findIndex(a => a === `--${name}` || a.startsWith(`--${name}=`));
|
||||
if (i === -1) return def;
|
||||
const v = args[i].includes('=') ? args[i].split('=').slice(1).join('=') : args[i + 1];
|
||||
return v ?? true;
|
||||
}
|
||||
const ROOT = process.cwd();
|
||||
const DIR = path.resolve(ROOT, getArg('dir', 'lib'));
|
||||
const OUT = path.resolve(ROOT, getArg('out', 'code.js'));
|
||||
const ENTRY= getArg('entry', 'lib/test-manual.js');
|
||||
const ORDER= getArg('order', 'topo'); // 'topo' | 'alpha' | 'entry-first'
|
||||
|
||||
// ---- Helpers ---------------------------------------------------------------
|
||||
function readFileSafe(p) {
|
||||
try { return fs.readFileSync(p, 'utf8'); }
|
||||
catch (e) { return null; }
|
||||
}
|
||||
function* walk(dir) {
|
||||
const list = fs.readdirSync(dir, { withFileTypes: true });
|
||||
for (const d of list) {
|
||||
const p = path.join(dir, d.name);
|
||||
if (d.isDirectory()) yield* walk(p);
|
||||
else if (d.isFile() && d.name.endsWith('.js')) yield p;
|
||||
}
|
||||
}
|
||||
|
||||
// Normalise un chemin importé (./x, ../x, sans extension)
|
||||
function resolveImport(fromFile, spec) {
|
||||
if (!spec) return null;
|
||||
if (spec.startsWith('node:')) return null; // builtin
|
||||
if (!spec.startsWith('./') && !spec.startsWith('../')) return null; // externe: ignorer
|
||||
let target = path.resolve(path.dirname(fromFile), spec);
|
||||
if (!/\.js$/i.test(target)) {
|
||||
const withJs = `${target}.js`;
|
||||
if (fs.existsSync(withJs)) target = withJs;
|
||||
else if (fs.existsSync(path.join(target, 'index.js'))) target = path.join(target, 'index.js');
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
// Très simple parseur d’imports/require (suffisant pour 95 % des cas)
|
||||
const RE_IMPORT_1 = /\bimport\s+[^'"]*\s+from\s+['"]([^'"]+)['"]/g; // import X from '...'
|
||||
const RE_IMPORT_2 = /\bimport\s+['"]([^'"]+)['"]/g; // import '...'
|
||||
const RE_REQUIRE = /\brequire\(\s*['"]([^'"]+)['"]\s*\)/g; // require('...')
|
||||
|
||||
function extractDeps(absPath, code) {
|
||||
const specs = new Set();
|
||||
let m;
|
||||
for (RE_IMPORT_1.lastIndex = 0; (m = RE_IMPORT_1.exec(code)); ) specs.add(m[1]);
|
||||
for (RE_IMPORT_2.lastIndex = 0; (m = RE_IMPORT_2.exec(code)); ) specs.add(m[1]);
|
||||
for (RE_REQUIRE.lastIndex = 0; (m = RE_REQUIRE.exec(code)); ) specs.add(m[1]);
|
||||
const deps = [];
|
||||
for (const s of specs) {
|
||||
const r = resolveImport(absPath, s);
|
||||
if (r) deps.push(r);
|
||||
}
|
||||
return deps;
|
||||
}
|
||||
|
||||
function asciiBox(title, width = 70) {
|
||||
const clean = title.length > width - 6 ? title.slice(0, width - 9) + '…' : title;
|
||||
const line = '─'.repeat(width - 2);
|
||||
const pad = width - 6 - clean.length;
|
||||
return [
|
||||
`/*`,
|
||||
`┌${line}┐`,
|
||||
`│ ${clean}${' '.repeat(Math.max(0, pad))} │`,
|
||||
`└${line}┘`,
|
||||
`*/`
|
||||
].join('\n');
|
||||
}
|
||||
|
||||
// ---- Collecte des fichiers -------------------------------------------------
|
||||
if (!fs.existsSync(DIR) || !fs.statSync(DIR).isDirectory()) {
|
||||
console.error(`[pack-lib] Dossier introuvable: ${DIR}`);
|
||||
process.exit(1);
|
||||
}
|
||||
const allFiles = Array.from(walk(DIR)).map(p => path.resolve(p));
|
||||
|
||||
// ---- Graphe de dépendances -------------------------------------------------
|
||||
/**
|
||||
* graph: Map<file, { deps: string[], code: string }>
|
||||
*/
|
||||
const graph = new Map();
|
||||
for (const f of allFiles) {
|
||||
const code = readFileSafe(f) ?? '';
|
||||
const deps = extractDeps(f, code).filter(d => allFiles.includes(d));
|
||||
graph.set(f, { deps, code });
|
||||
}
|
||||
|
||||
// ---- Ordonnancement --------------------------------------------------------
|
||||
function topoSort(graph, entryAbs) {
|
||||
const visited = new Set();
|
||||
const temp = new Set();
|
||||
const order = [];
|
||||
|
||||
const visit = (n) => {
|
||||
if (visited.has(n)) return;
|
||||
if (temp.has(n)) return; // cycle: on ignore pour éviter boucle
|
||||
temp.add(n);
|
||||
const { deps = [] } = graph.get(n) || {};
|
||||
for (const d of deps) if (graph.has(d)) visit(d);
|
||||
temp.delete(n);
|
||||
visited.add(n);
|
||||
order.push(n);
|
||||
};
|
||||
|
||||
if (entryAbs && graph.has(entryAbs)) visit(entryAbs);
|
||||
for (const n of graph.keys()) visit(n);
|
||||
return order;
|
||||
}
|
||||
|
||||
let order;
|
||||
const entryAbs = path.resolve(ROOT, ENTRY);
|
||||
if (ORDER === 'alpha') {
|
||||
order = [...graph.keys()].sort((a, b) => a.localeCompare(b));
|
||||
} else if (ORDER === 'entry-first') {
|
||||
order = [entryAbs, ...[...graph.keys()].filter(f => f !== entryAbs).sort((a,b)=>a.localeCompare(b))]
|
||||
.filter(Boolean);
|
||||
} else {
|
||||
order = topoSort(graph, entryAbs);
|
||||
}
|
||||
|
||||
// ---- Écriture du bundle ----------------------------------------------------
|
||||
const header = `/*
|
||||
code.js — bundle concaténé
|
||||
Généré: ${new Date().toISOString()}
|
||||
Source: ${path.relative(ROOT, DIR)}
|
||||
Fichiers: ${order.length}
|
||||
Ordre: ${ORDER}
|
||||
*/\n\n`;
|
||||
|
||||
let out = header;
|
||||
|
||||
for (const file of order) {
|
||||
const rel = path.relative(ROOT, file).replace(/\\/g, '/');
|
||||
const box = asciiBox(`File: ${rel}`);
|
||||
const code = (graph.get(file)?.code) ?? '';
|
||||
out += `${box}\n\n${code}\n\n`;
|
||||
}
|
||||
|
||||
// S’assure que le dossier cible existe
|
||||
fs.writeFileSync(OUT, out, 'utf8');
|
||||
|
||||
const relOut = path.relative(ROOT, OUT).replace(/\\/g, '/');
|
||||
console.log(`[pack-lib] OK -> ${relOut} (${order.length} fichiers)`);
|
||||
|
||||
Loading…
Reference in New Issue
Block a user