import test from 'node:test'; import assert from 'node:assert'; import { safeImport } from '../_helpers/path.js'; import { FakeLLMClient } from '../_helpers/fakeLLMClient.js'; import { AutoReporter } from '../reporters/AutoReporter.js'; function skip(msg){ console.warn('[SKIP]', msg); } // Auto-Reporter Configuration const autoReporter = new AutoReporter(); test('LLMManager: enforces concurrency limit', async () => { const res = await safeImport('LLMManager'); if (!res.ok) { skip(res.reason); return; } const L = res.mod; if (typeof L.__setClient !== 'function' || typeof L.__setConcurrency !== 'function') { skip('Missing __setClient/__setConcurrency hooks'); return; } const fake = new FakeLLMClient({ delayMs: 50 }); L.__setClient('mock', fake); L.__setConcurrency(2); // on demande 2 max const N = 5; const calls = Array.from({length:N}, (_,i)=> L.callModel({ provider:'mock', model:'fake-1', input:`#${i}` })); const t0 = Date.now(); await Promise.all(calls); const t1 = Date.now(); // Avec delay 50ms et concu=2, 5 requêtes => ~3 vagues => durée >= 150ms (avec marge) assert.ok((t1 - t0) >= 120, `expected batching effect, got ${t1-t0}ms`); });