singularity-forge/scripts/watch-resources.js

68 lines
2.2 KiB
JavaScript
Raw Normal View History

2026-03-14 18:47:03 +01:00
#!/usr/bin/env node
/**
* Watch src/resources/ and sync changes to dist/resources/.
*
* Runs alongside `tsc --watch` to ensure non-TS resources (prompts, agents,
* skills, workflow files) are kept in sync with the build output.
*
* This solves the `npm link` branch-drift problem: without dist/resources/,
* `initResources()` reads from src/resources/ which changes with git branch
* switches, causing stale extensions to be synced to ~/.gsd/agent/ for ALL
* projects using gsd.
*/
import { watch } from 'node:fs'
import { cpSync, mkdirSync, rmSync } from 'node:fs'
import { resolve, dirname } from 'node:path'
import { fileURLToPath } from 'node:url'
const __dirname = dirname(fileURLToPath(import.meta.url))
const src = resolve(__dirname, '..', 'src', 'resources')
const dest = resolve(__dirname, '..', 'dist', 'resources')
function sync() {
// Remove dest first to mirror deletions from src (prevents stale files)
rmSync(dest, { recursive: true, force: true })
mkdirSync(dest, { recursive: true })
cpSync(src, dest, { recursive: true, force: true })
}
// Initial sync
sync()
process.stderr.write(`[watch-resources] Initial sync done\n`)
// Watch for changes — recursive, debounced.
// fs.watch({ recursive: true }) is supported on macOS and Windows.
// On Linux (Node <20.13) it throws ERR_FEATURE_UNAVAILABLE_ON_PLATFORM.
// Fall back to polling on unsupported platforms.
let timer = null
fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation (#2314) * fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation Addresses all findings from a systematic memory leak audit across five dimensions: event listeners, timers, file system handles, subscriptions/ closures, and GSD automation lifecycle. Critical fixes: rpc-client.ts: stderr .on("data") handler attached in start() was never removed in stop(). Now stored as _stderrHandler and removed via removeListener() on stop. lsp/client.ts: Three process.on() handlers (beforeExit, SIGINT, SIGTERM) registered at module load time with anonymous functions — impossible to remove. Now stored as named references; new removeProcessHandlers() export allows graceful teardown. stdout/stderr stream listeners in startMessageReader/startStderrReader also stored per-client in clientStreamHandlers map and removed in shutdownClient() and shutdownAll(). parallel-orchestrator.ts: spawnWorker() attached 5 listeners to child process streams on every spawn with no removal on worker stop/respawn, accumulating listeners indefinitely. Added cleanup() field to WorkerInfo; called via removeAllListeners() on exit, graceful stop, stale detection, and dead PID cleanup paths. Also: module-level state.workers Map was never cleared between orchestration runs; startParallel() and resetOrchestrator() now iterate and clean up all WorkerInfo entries before reassigning state. scripts/watch-resources.js: fs.watch() return value was discarded (OS watcher never closed) and the fallback setInterval handle was also discarded (timer ran forever). Both now stored; process.on("exit") handler closes/clears them. gsd-db.ts: closeDatabase() did not checkpoint the WAL before closing — .db-shm/.db-wal files accumulated on disk across crash-recovery cycles. Now runs PRAGMA wal_checkpoint(TRUNCATE) before close. Also added a one-time process.on("exit") handler in openDatabase() so the handle is always closed even on unclean exits. Medium fixes: bg-shell/overlay.ts: 1-second refresh setInterval only cleared in keyboard exit handler; abnormal teardown leaked the timer. Added dispose() method that unconditionally clears it. file-watcher.ts: pending debounce Map was scoped inside startFileWatcher() making it inaccessible to stopFileWatcher(). Moved to module scope; stopFileWatcher() now clears all pending timers and empties the map before closing the watcher. auto-supervisor.ts: registerSigtermHandler() could accumulate multiple SIGTERM handlers if called without passing back the previous reference. Added module-level _currentSigtermHandler; old handler is always removed before registering the new one regardless of whether caller passes it. Low-severity fixes: print-mode.ts: session.subscribe() return value was discarded. Now stored and called in a finally block to guarantee cleanup on both normal completion and errors. rpc-mode.ts: same — subscribe() unsubscribe now called in the shutdown path before process.exit(). theme.ts: onThemeChangeCallback singleton silently overwrote any previous subscriber. Converted to Set<() => void>; onThemeChange() now returns a cleanup function. All four internal call sites updated to forEach(). Backward-compatible — existing callers that discard the return are unaffected. * fix: ensure unsubscribe is called on error/abort in print-mode The PR #2314 added unsubscribe storage but still called process.exit(1) directly, bypassing the unsubscribe. Wrapped in try/finally to guarantee cleanup runs before exit.
2026-03-24 08:23:36 -05:00
let fsWatcher = null
let pollInterval = null
2026-03-14 18:47:03 +01:00
const onChange = () => {
if (timer) clearTimeout(timer)
timer = setTimeout(() => {
sync()
process.stderr.write(`[watch-resources] Synced at ${new Date().toLocaleTimeString()}\n`)
}, 300)
}
try {
fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation (#2314) * fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation Addresses all findings from a systematic memory leak audit across five dimensions: event listeners, timers, file system handles, subscriptions/ closures, and GSD automation lifecycle. Critical fixes: rpc-client.ts: stderr .on("data") handler attached in start() was never removed in stop(). Now stored as _stderrHandler and removed via removeListener() on stop. lsp/client.ts: Three process.on() handlers (beforeExit, SIGINT, SIGTERM) registered at module load time with anonymous functions — impossible to remove. Now stored as named references; new removeProcessHandlers() export allows graceful teardown. stdout/stderr stream listeners in startMessageReader/startStderrReader also stored per-client in clientStreamHandlers map and removed in shutdownClient() and shutdownAll(). parallel-orchestrator.ts: spawnWorker() attached 5 listeners to child process streams on every spawn with no removal on worker stop/respawn, accumulating listeners indefinitely. Added cleanup() field to WorkerInfo; called via removeAllListeners() on exit, graceful stop, stale detection, and dead PID cleanup paths. Also: module-level state.workers Map was never cleared between orchestration runs; startParallel() and resetOrchestrator() now iterate and clean up all WorkerInfo entries before reassigning state. scripts/watch-resources.js: fs.watch() return value was discarded (OS watcher never closed) and the fallback setInterval handle was also discarded (timer ran forever). Both now stored; process.on("exit") handler closes/clears them. gsd-db.ts: closeDatabase() did not checkpoint the WAL before closing — .db-shm/.db-wal files accumulated on disk across crash-recovery cycles. Now runs PRAGMA wal_checkpoint(TRUNCATE) before close. Also added a one-time process.on("exit") handler in openDatabase() so the handle is always closed even on unclean exits. Medium fixes: bg-shell/overlay.ts: 1-second refresh setInterval only cleared in keyboard exit handler; abnormal teardown leaked the timer. Added dispose() method that unconditionally clears it. file-watcher.ts: pending debounce Map was scoped inside startFileWatcher() making it inaccessible to stopFileWatcher(). Moved to module scope; stopFileWatcher() now clears all pending timers and empties the map before closing the watcher. auto-supervisor.ts: registerSigtermHandler() could accumulate multiple SIGTERM handlers if called without passing back the previous reference. Added module-level _currentSigtermHandler; old handler is always removed before registering the new one regardless of whether caller passes it. Low-severity fixes: print-mode.ts: session.subscribe() return value was discarded. Now stored and called in a finally block to guarantee cleanup on both normal completion and errors. rpc-mode.ts: same — subscribe() unsubscribe now called in the shutdown path before process.exit(). theme.ts: onThemeChangeCallback singleton silently overwrote any previous subscriber. Converted to Set<() => void>; onThemeChange() now returns a cleanup function. All four internal call sites updated to forEach(). Backward-compatible — existing callers that discard the return are unaffected. * fix: ensure unsubscribe is called on error/abort in print-mode The PR #2314 added unsubscribe storage but still called process.exit(1) directly, bypassing the unsubscribe. Wrapped in try/finally to guarantee cleanup runs before exit.
2026-03-24 08:23:36 -05:00
fsWatcher = watch(src, { recursive: true }, onChange)
2026-03-14 18:47:03 +01:00
} catch {
// Fallback: poll every 2s (Linux without recursive watch support)
process.stderr.write(`[watch-resources] fs.watch recursive not supported, falling back to polling\n`)
fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation (#2314) * fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation Addresses all findings from a systematic memory leak audit across five dimensions: event listeners, timers, file system handles, subscriptions/ closures, and GSD automation lifecycle. Critical fixes: rpc-client.ts: stderr .on("data") handler attached in start() was never removed in stop(). Now stored as _stderrHandler and removed via removeListener() on stop. lsp/client.ts: Three process.on() handlers (beforeExit, SIGINT, SIGTERM) registered at module load time with anonymous functions — impossible to remove. Now stored as named references; new removeProcessHandlers() export allows graceful teardown. stdout/stderr stream listeners in startMessageReader/startStderrReader also stored per-client in clientStreamHandlers map and removed in shutdownClient() and shutdownAll(). parallel-orchestrator.ts: spawnWorker() attached 5 listeners to child process streams on every spawn with no removal on worker stop/respawn, accumulating listeners indefinitely. Added cleanup() field to WorkerInfo; called via removeAllListeners() on exit, graceful stop, stale detection, and dead PID cleanup paths. Also: module-level state.workers Map was never cleared between orchestration runs; startParallel() and resetOrchestrator() now iterate and clean up all WorkerInfo entries before reassigning state. scripts/watch-resources.js: fs.watch() return value was discarded (OS watcher never closed) and the fallback setInterval handle was also discarded (timer ran forever). Both now stored; process.on("exit") handler closes/clears them. gsd-db.ts: closeDatabase() did not checkpoint the WAL before closing — .db-shm/.db-wal files accumulated on disk across crash-recovery cycles. Now runs PRAGMA wal_checkpoint(TRUNCATE) before close. Also added a one-time process.on("exit") handler in openDatabase() so the handle is always closed even on unclean exits. Medium fixes: bg-shell/overlay.ts: 1-second refresh setInterval only cleared in keyboard exit handler; abnormal teardown leaked the timer. Added dispose() method that unconditionally clears it. file-watcher.ts: pending debounce Map was scoped inside startFileWatcher() making it inaccessible to stopFileWatcher(). Moved to module scope; stopFileWatcher() now clears all pending timers and empties the map before closing the watcher. auto-supervisor.ts: registerSigtermHandler() could accumulate multiple SIGTERM handlers if called without passing back the previous reference. Added module-level _currentSigtermHandler; old handler is always removed before registering the new one regardless of whether caller passes it. Low-severity fixes: print-mode.ts: session.subscribe() return value was discarded. Now stored and called in a finally block to guarantee cleanup on both normal completion and errors. rpc-mode.ts: same — subscribe() unsubscribe now called in the shutdown path before process.exit(). theme.ts: onThemeChangeCallback singleton silently overwrote any previous subscriber. Converted to Set<() => void>; onThemeChange() now returns a cleanup function. All four internal call sites updated to forEach(). Backward-compatible — existing callers that discard the return are unaffected. * fix: ensure unsubscribe is called on error/abort in print-mode The PR #2314 added unsubscribe storage but still called process.exit(1) directly, bypassing the unsubscribe. Wrapped in try/finally to guarantee cleanup runs before exit.
2026-03-24 08:23:36 -05:00
pollInterval = setInterval(() => {
2026-03-14 18:47:03 +01:00
try { sync() } catch {}
}, 2000)
}
fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation (#2314) * fix(memory): fix memory and resource leaks across TUI, LSP, DB, and automation Addresses all findings from a systematic memory leak audit across five dimensions: event listeners, timers, file system handles, subscriptions/ closures, and GSD automation lifecycle. Critical fixes: rpc-client.ts: stderr .on("data") handler attached in start() was never removed in stop(). Now stored as _stderrHandler and removed via removeListener() on stop. lsp/client.ts: Three process.on() handlers (beforeExit, SIGINT, SIGTERM) registered at module load time with anonymous functions — impossible to remove. Now stored as named references; new removeProcessHandlers() export allows graceful teardown. stdout/stderr stream listeners in startMessageReader/startStderrReader also stored per-client in clientStreamHandlers map and removed in shutdownClient() and shutdownAll(). parallel-orchestrator.ts: spawnWorker() attached 5 listeners to child process streams on every spawn with no removal on worker stop/respawn, accumulating listeners indefinitely. Added cleanup() field to WorkerInfo; called via removeAllListeners() on exit, graceful stop, stale detection, and dead PID cleanup paths. Also: module-level state.workers Map was never cleared between orchestration runs; startParallel() and resetOrchestrator() now iterate and clean up all WorkerInfo entries before reassigning state. scripts/watch-resources.js: fs.watch() return value was discarded (OS watcher never closed) and the fallback setInterval handle was also discarded (timer ran forever). Both now stored; process.on("exit") handler closes/clears them. gsd-db.ts: closeDatabase() did not checkpoint the WAL before closing — .db-shm/.db-wal files accumulated on disk across crash-recovery cycles. Now runs PRAGMA wal_checkpoint(TRUNCATE) before close. Also added a one-time process.on("exit") handler in openDatabase() so the handle is always closed even on unclean exits. Medium fixes: bg-shell/overlay.ts: 1-second refresh setInterval only cleared in keyboard exit handler; abnormal teardown leaked the timer. Added dispose() method that unconditionally clears it. file-watcher.ts: pending debounce Map was scoped inside startFileWatcher() making it inaccessible to stopFileWatcher(). Moved to module scope; stopFileWatcher() now clears all pending timers and empties the map before closing the watcher. auto-supervisor.ts: registerSigtermHandler() could accumulate multiple SIGTERM handlers if called without passing back the previous reference. Added module-level _currentSigtermHandler; old handler is always removed before registering the new one regardless of whether caller passes it. Low-severity fixes: print-mode.ts: session.subscribe() return value was discarded. Now stored and called in a finally block to guarantee cleanup on both normal completion and errors. rpc-mode.ts: same — subscribe() unsubscribe now called in the shutdown path before process.exit(). theme.ts: onThemeChangeCallback singleton silently overwrote any previous subscriber. Converted to Set<() => void>; onThemeChange() now returns a cleanup function. All four internal call sites updated to forEach(). Backward-compatible — existing callers that discard the return are unaffected. * fix: ensure unsubscribe is called on error/abort in print-mode The PR #2314 added unsubscribe storage but still called process.exit(1) directly, bypassing the unsubscribe. Wrapped in try/finally to guarantee cleanup runs before exit.
2026-03-24 08:23:36 -05:00
process.on('exit', () => {
if (timer) clearTimeout(timer)
if (fsWatcher) fsWatcher.close()
if (pollInterval) clearInterval(pollInterval)
})
2026-03-14 18:47:03 +01:00
process.stderr.write(`[watch-resources] Watching src/resources/ → dist/resources/\n`)