82 lines
2.8 KiB
JavaScript
82 lines
2.8 KiB
JavaScript
|
|
// Centralised hot-reload watcher.
|
||
|
|
//
|
||
|
|
// After the cron rewrites the in-memory data files (search chunks, IMDb
|
||
|
|
// mappings, IMDb ratings TSV), the existing per-call mtime checks already
|
||
|
|
// reload them lazily — but the FIRST request post-cron pays a 1-2 s penalty.
|
||
|
|
//
|
||
|
|
// This module watches all four data sources and proactively triggers a reload
|
||
|
|
// in the background as soon as the files change, so users never hit the cold
|
||
|
|
// path. Debounced because the cron writes ~10 files within seconds.
|
||
|
|
|
||
|
|
import { unwatchFile, watch, watchFile } from 'node:fs';
|
||
|
|
import { IMDB_RATINGS, TMDBINTEGRAL_DIR } from '../config.js';
|
||
|
|
import { preloadMappings } from './imdbMapping.js';
|
||
|
|
import { getRatings } from './imdbRatings.js';
|
||
|
|
import { imdbRatingsCount } from './metrics.js';
|
||
|
|
import { reloadAllPools } from './searchEngine.js';
|
||
|
|
|
||
|
|
const DEBOUNCE_MS = 5000;
|
||
|
|
const timers = { chunks: null, mappings: null, ratings: null };
|
||
|
|
const watchers = [];
|
||
|
|
|
||
|
|
function debounce(key, fn) {
|
||
|
|
clearTimeout(timers[key]);
|
||
|
|
timers[key] = setTimeout(() => {
|
||
|
|
Promise.resolve(fn()).catch((err) => console.error(`Hot reload (${key}) failed:`, err.message));
|
||
|
|
}, DEBOUNCE_MS);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function reloadRatings() {
|
||
|
|
// getRatings() checks the file mtime and rebuilds the Map if it changed.
|
||
|
|
// Calling it here pre-populates the cache so the next request is hot.
|
||
|
|
const map = await getRatings();
|
||
|
|
imdbRatingsCount.set(map.size);
|
||
|
|
console.log(`Hot reload: imdbratings.tsv (${map.size} entries)`);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function reloadMappings() {
|
||
|
|
const out = await preloadMappings();
|
||
|
|
console.log(`Hot reload: IMDb mappings (movie=${out.movie}, tv=${out.tv})`);
|
||
|
|
}
|
||
|
|
|
||
|
|
async function reloadChunks() {
|
||
|
|
await reloadAllPools();
|
||
|
|
}
|
||
|
|
|
||
|
|
export function startWatchers() {
|
||
|
|
// tmdbintegral/ — covers search chunks AND imdb2{movie,tv}.json
|
||
|
|
try {
|
||
|
|
const w = watch(TMDBINTEGRAL_DIR, (_event, filename) => {
|
||
|
|
if (!filename) return;
|
||
|
|
if (/^search(movie|tv)\d+\.json$/.test(filename)) {
|
||
|
|
debounce('chunks', reloadChunks);
|
||
|
|
} else if (filename === 'imdb2movie.json' || filename === 'imdb2tv.json') {
|
||
|
|
debounce('mappings', reloadMappings);
|
||
|
|
}
|
||
|
|
});
|
||
|
|
w.unref();
|
||
|
|
watchers.push(w);
|
||
|
|
} catch (err) {
|
||
|
|
console.warn(`Cannot watch ${TMDBINTEGRAL_DIR}:`, err.message);
|
||
|
|
}
|
||
|
|
|
||
|
|
// imdbratings.tsv — fs.watch on a single file across a rename(tmp -> file)
|
||
|
|
// is unreliable on Linux because the inode changes. fs.watchFile (poll
|
||
|
|
// every 10 s) follows the path, not the inode. Cost: one stat() every 10 s.
|
||
|
|
watchFile(IMDB_RATINGS, { interval: 10_000 }, (curr, prev) => {
|
||
|
|
if (curr.mtimeMs !== prev.mtimeMs) debounce('ratings', reloadRatings);
|
||
|
|
});
|
||
|
|
watchers.push({ close: () => unwatchFile(IMDB_RATINGS) });
|
||
|
|
}
|
||
|
|
|
||
|
|
export function stopWatchers() {
|
||
|
|
for (const w of watchers) {
|
||
|
|
try {
|
||
|
|
w.close();
|
||
|
|
} catch {
|
||
|
|
/* ignore */
|
||
|
|
}
|
||
|
|
}
|
||
|
|
watchers.length = 0;
|
||
|
|
}
|