289 lines
8.7 KiB
JavaScript
289 lines
8.7 KiB
JavaScript
// tools/audit-unused.cjs
|
||
/*
|
||
Audit "unused":
|
||
- Fichiers injoignables depuis des entrypoints
|
||
- Exports non référencés
|
||
- Imports orphelins
|
||
⚠️ Static analysis best-effort: si tu fais des require dynamiques, DI, ou evals, ajoute un @keep pour ignorer un export:
|
||
// @keep:export NomExport
|
||
*/
|
||
|
||
const fs = require('fs');
|
||
const path = require('path');
|
||
|
||
const ROOT = process.cwd().replace(/\\/g, '/');
|
||
const EXTS = ['.js', '.cjs', '.mjs', '.jsx'];
|
||
const IGNORE_DIRS = new Set(['node_modules', '.git', 'dist', 'build', 'out', '.next', '.vercel']);
|
||
|
||
const ENTRYPOINTS = [
|
||
'lib/test-manual.js',
|
||
'lib/Main.js',
|
||
].map(p => path.resolve(ROOT, p));
|
||
|
||
const files = [];
|
||
(function walk(dir) {
|
||
for (const name of fs.readdirSync(dir, { withFileTypes: true })) {
|
||
if (name.isDirectory()) {
|
||
if (!IGNORE_DIRS.has(name.name)) walk(path.join(dir, name.name));
|
||
} else {
|
||
const ext = path.extname(name.name);
|
||
if (EXTS.includes(ext)) files.push(path.join(dir, name.name));
|
||
}
|
||
}
|
||
})(ROOT);
|
||
|
||
const byNorm = new Map();
|
||
for (const f of files) byNorm.set(path.normalize(f), f);
|
||
|
||
function resolveImport(fromFile, spec) {
|
||
// ignore packages ('openai', 'undici', etc.)
|
||
if (!spec.startsWith('.') && !spec.startsWith('/')) return null;
|
||
const base = path.resolve(path.dirname(fromFile), spec);
|
||
const candidates = [
|
||
base,
|
||
base + '.js', base + '.cjs', base + '.mjs', base + '.jsx',
|
||
path.join(base, 'index.js'), path.join(base, 'index.cjs'), path.join(base, 'index.mjs'),
|
||
];
|
||
for (const c of candidates) {
|
||
const n = path.normalize(c);
|
||
if (byNorm.has(n)) return byNorm.get(n);
|
||
}
|
||
return null;
|
||
}
|
||
|
||
const RE = {
|
||
// imports
|
||
require: /require\s*\(\s*['"`]([^'"`]+)['"`]\s*\)/g,
|
||
importFrom: /import\s+[^'"]+\s+from\s+['"`]([^'"`]+)['"`]/g,
|
||
importOnly: /import\s+['"`]([^'"`]+)['"`]\s*;?/g,
|
||
|
||
// exports CJS
|
||
moduleExportsObj: /module\.exports\s*=\s*{([\s\S]*?)}/g,
|
||
moduleExportsAssign: /module\.exports\.(\w+)\s*=/g,
|
||
exportsAssign: /exports\.(\w+)\s*=/g,
|
||
|
||
// exports ESM
|
||
exportNamed: /export\s+(?:const|let|var|function|class)\s+(\w+)/g,
|
||
exportList: /export\s*{\s*([^}]+)\s*}/g,
|
||
|
||
// usage of members
|
||
// const { a, b } = require('./m'); import { a } from './m'
|
||
destructured: /(?:const|let|var|import)\s*{([^}]+)}\s*=\s*(?:require\(|from\s+['"`])/g,
|
||
// require('./m').foo OR (await import('./m')).foo
|
||
dotUse: /(?:require\(\s*['"`][^'"`]+['"`]\s*\)|import\(\s*['"`][^'"`]+['"`]\s*\))\.(\w+)/g,
|
||
|
||
keep: /@keep:export\s+(\w+)/g,
|
||
};
|
||
|
||
function parseFile(file) {
|
||
const txt = fs.readFileSync(file, 'utf8');
|
||
|
||
const imports = [];
|
||
for (const rx of [RE.require, RE.importFrom, RE.importOnly]) {
|
||
let m; while ((m = rx.exec(txt))) imports.push(m[1]);
|
||
}
|
||
|
||
const exports = new Set();
|
||
const keep = new Set();
|
||
|
||
// CJS object export
|
||
let m;
|
||
while ((m = RE.moduleExportsObj.exec(txt))) {
|
||
const inside = m[1];
|
||
inside.split(',').forEach(p => {
|
||
const n = (p.split(':')[0] || '').trim().replace(/\s+/g, '');
|
||
if (n) exports.add(n);
|
||
});
|
||
}
|
||
while ((m = RE.moduleExportsAssign.exec(txt))) exports.add(m[1]);
|
||
while ((m = RE.exportsAssign.exec(txt))) exports.add(m[1]);
|
||
|
||
// ESM named
|
||
while ((m = RE.exportNamed.exec(txt))) exports.add(m[1]);
|
||
while ((m = RE.exportList.exec(txt))) {
|
||
m[1].split(',').forEach(s => {
|
||
const n = s.split(' as ')[0].trim();
|
||
if (n) exports.add(n);
|
||
});
|
||
}
|
||
|
||
while ((m = RE.keep.exec(txt))) keep.add(m[1]);
|
||
|
||
return { file, txt, imports, exports: [...exports], keep: [...keep] };
|
||
}
|
||
|
||
const parsed = files.map(parseFile);
|
||
|
||
// Build edges
|
||
const edges = new Map(); // from -> Set(to)
|
||
const importMap = new Map(); // from -> [{raw, resolved}]
|
||
for (const p of parsed) {
|
||
const tos = new Set();
|
||
const details = [];
|
||
for (const spec of p.imports) {
|
||
const resolved = resolveImport(p.file, spec);
|
||
if (resolved) tos.add(resolved);
|
||
details.push({ raw: spec, resolved });
|
||
}
|
||
edges.set(p.file, tos);
|
||
importMap.set(p.file, details);
|
||
}
|
||
|
||
// Reachability from entrypoints
|
||
const reachable = new Set();
|
||
function dfs(start) {
|
||
const stack = [start];
|
||
while (stack.length) {
|
||
const f = stack.pop();
|
||
if (!byNorm.has(path.normalize(f))) continue;
|
||
if (reachable.has(f)) continue;
|
||
reachable.add(f);
|
||
const tos = edges.get(f) || new Set();
|
||
tos.forEach(t => stack.push(t));
|
||
}
|
||
}
|
||
ENTRYPOINTS.forEach(dfs);
|
||
|
||
// Usage of exports (very simple heuristic)
|
||
const exportIndex = new Map(); // file -> Set(exportName)
|
||
parsed.forEach(p => exportIndex.set(p.file, new Set(p.exports)));
|
||
|
||
const used = new Map(); // file -> Set(usedName)
|
||
for (const p of parsed) {
|
||
const txt = p.txt;
|
||
|
||
// destructured usage
|
||
let m;
|
||
while ((m = RE.destructured.exec(txt))) {
|
||
const names = m[1].split(',').map(s => s.split(':')[0].trim());
|
||
names.forEach(n => {
|
||
if (!n) return;
|
||
// mark globally; we can't know which file exactly (static pass), will map later by imports
|
||
});
|
||
}
|
||
|
||
// dot usage; we can't resolve to a file reliably; will do name-only aggregation
|
||
}
|
||
|
||
function collectImportedNames(file) {
|
||
// Return map: targetFile -> Set(names)
|
||
const res = new Map();
|
||
const txt = fs.readFileSync(file, 'utf8');
|
||
|
||
// import {a,b} from './x'
|
||
const importNamed = /import\s*{([^}]+)}\s*from\s*['"`]([^'"`]+)['"`]/g;
|
||
let m;
|
||
while ((m = importNamed.exec(txt))) {
|
||
const names = m[1].split(',').map(s => s.split(' as ')[0].trim()).filter(Boolean);
|
||
const target = resolveImport(file, m[2]);
|
||
if (target) {
|
||
if (!res.has(target)) res.set(target, new Set());
|
||
names.forEach(n => res.get(target).add(n));
|
||
}
|
||
}
|
||
|
||
// const {a,b} = require('./x')
|
||
const reqNamed = /(?:const|let|var)\s*{([^}]+)}\s*=\s*require\(\s*['"`]([^'"`]+)['"`]\s*\)/g;
|
||
while ((m = reqNamed.exec(txt))) {
|
||
const names = m[1].split(',').map(s => s.split(':')[0].trim()).filter(Boolean);
|
||
const target = resolveImport(file, m[2]);
|
||
if (target) {
|
||
if (!res.has(target)) res.set(target, new Set());
|
||
names.forEach(n => res.get(target).add(n));
|
||
}
|
||
}
|
||
|
||
// require('./x').foo
|
||
const reqDot = /require\(\s*['"`]([^'"`]+)['"`]\s*\)\.(\w+)/g;
|
||
while ((m = reqDot.exec(txt))) {
|
||
const target = resolveImport(file, m[1]);
|
||
const name = m[2];
|
||
if (target) {
|
||
if (!res.has(target)) res.set(target, new Set());
|
||
res.get(target).add(name);
|
||
}
|
||
}
|
||
|
||
// (await import('./x')).foo OR import('./x').then(m=>m.foo)
|
||
const impDot = /import\(\s*['"`]([^'"`]+)['"`]\s*\)\.(\w+)/g;
|
||
while ((m = impDot.exec(txt))) {
|
||
const target = resolveImport(file, m[1]);
|
||
const name = m[2];
|
||
if (target) {
|
||
if (!res.has(target)) res.set(target, new Set());
|
||
res.get(target).add(name);
|
||
}
|
||
}
|
||
|
||
return res;
|
||
}
|
||
|
||
const usageByTarget = new Map(); // targetFile -> Set(namesUsed)
|
||
for (const p of parsed) {
|
||
const m = collectImportedNames(p.file);
|
||
for (const [target, names] of m) {
|
||
if (!usageByTarget.has(target)) usageByTarget.set(target, new Set());
|
||
names.forEach(n => usageByTarget.get(target).add(n));
|
||
}
|
||
}
|
||
|
||
function rel(p) { return path.relative(ROOT, p).replace(/\\/g, '/'); }
|
||
|
||
const report = {
|
||
entrypoints: ENTRYPOINTS.filter(fs.existsSync).map(rel),
|
||
unreachableFiles: [],
|
||
brokenImports: [],
|
||
unusedExports: [],
|
||
};
|
||
|
||
for (const f of files) {
|
||
if (!reachable.has(f) && !ENTRYPOINTS.includes(f)) {
|
||
report.unreachableFiles.push(rel(f));
|
||
}
|
||
}
|
||
|
||
for (const [from, list] of importMap) {
|
||
for (const { raw, resolved } of list) {
|
||
if ((raw.startsWith('.') || raw.startsWith('/')) && !resolved) {
|
||
report.brokenImports.push({ from: rel(from), spec: raw });
|
||
}
|
||
}
|
||
}
|
||
|
||
for (const p of parsed) {
|
||
const exp = exportIndex.get(p.file);
|
||
if (!exp || exp.size === 0) continue;
|
||
|
||
const usedSet = usageByTarget.get(p.file) || new Set();
|
||
const keepMarks = new Set(p.keep || []);
|
||
|
||
const unused = [...exp].filter(n => !usedSet.has(n) && !keepMarks.has(n));
|
||
if (unused.length > 0) {
|
||
report.unusedExports.push({ file: rel(p.file), unused });
|
||
}
|
||
}
|
||
|
||
console.log('=== UNUSED AUDIT REPORT ===');
|
||
console.log('');
|
||
console.log('Entrypoints:', report.entrypoints);
|
||
console.log('');
|
||
|
||
console.log('— Unreachable files (dead):');
|
||
if (report.unreachableFiles.length === 0) console.log(' ✔ none');
|
||
else report.unreachableFiles.sort().forEach(f => console.log(' •', f));
|
||
console.log('');
|
||
|
||
console.log('— Broken relative imports:');
|
||
if (report.brokenImports.length === 0) console.log(' ✔ none');
|
||
else report.brokenImports.forEach(i => console.log(` • ${i.from} -> ${i.spec}`));
|
||
console.log('');
|
||
|
||
console.log('— Unused exports:');
|
||
if (report.unusedExports.length === 0) console.log(' ✔ none');
|
||
else report.unusedExports
|
||
.sort((a,b)=>a.file.localeCompare(b.file))
|
||
.forEach(r => console.log(` • ${r.file}: ${r.unused.join(', ')}`));
|
||
|
||
console.log('');
|
||
console.log('Tip: ajoute un commentaire "@keep:export Nom" au-dessus d’un export pour le protéger des faux positifs.');
|