perf: parallelize I/O, add runtime cache, extend nix devenv

- unit-context-composer: resolve artifact keys in parallel (Promise.all)
- unit-runtime: add in-memory cache to avoid repeated disk reads per dispatch
- auto-timers: share 15s idle watchdog tick with context-pressure check
- auto-prompts: 1s TTL budget cache to coalesce repeated loadEffectiveSFPreferences calls
- native-git-bridge: extend nativeHasChanges TTL 10s→30s
- auto-dashboard: remove pulsing dot animation (CPU churn, no UX value)
- flake.nix: add nodePackages.typescript to dev shell

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Mikael Hugo 2026-04-25 10:12:32 +02:00
parent 12aabd863e
commit 6cb6de4fd2
10 changed files with 369 additions and 269 deletions

View file

@ -24,6 +24,7 @@
clippy
git
nodejs_24
nodePackages.typescript
protobuf
rust-analyzer
rustc

View file

@ -576,7 +576,6 @@ export function updateProgressWidget(
const effectiveServiceTier = getEffectiveServiceTier();
ctx.ui.setWidget("sf-progress", (tui, theme) => {
let pulseBright = true;
let cachedLines: string[] | undefined;
let cachedWidth: number | undefined;
let cachedRtkLabel: string | null | undefined;
@ -594,12 +593,6 @@ export function updateProgressWidget(
refreshRtkLabel();
const pulseTimer = setInterval(() => {
pulseBright = !pulseBright;
cachedLines = undefined;
tui.requestRender();
}, 800);
// Refresh progress cache from disk every 15s so the widget reflects
// task/slice completion mid-unit. Without this, the progress bar only
// updates at dispatch time, appearing frozen during long-running units.
@ -635,7 +628,7 @@ export function updateProgressWidget(
// ── Line 1: Top bar ───────────────────────────────────────────────
lines.push(...ui.bar());
const dot = pulseBright
const dot = Math.floor(Date.now() / 2000) % 2 === 0
? theme.fg("accent", GLYPH.statusActive)
: theme.fg("dim", GLYPH.statusPending);
const elapsed = formatAutoElapsed(accessors.getAutoStartTime());
@ -950,7 +943,6 @@ export function updateProgressWidget(
cachedWidth = undefined;
},
dispose() {
clearInterval(pulseTimer);
if (progressRefreshTimer) clearInterval(progressRefreshTimer);
},
};

View file

@ -49,6 +49,14 @@ import { warnIfManifestHasMissingSkills } from "./skill-manifest.js";
*/
const MAX_PREAMBLE_CHARS = 30_000;
// Module-scope budget cache: `loadEffectiveSFPreferences` does existsSync +
// readFileSync on every call, which is expensive when `resolvePromptBudgets`
// is called multiple times per prompt build (capPreamble + resolveSummaryBudgetChars).
// A 1-second TTL coalesces all calls within a single dispatch tick without
// holding stale values across tick boundaries. Preferences change on human
// timescales, not sub-second timescales.
let _budgetCache: { value: ReturnType<typeof computeBudgets>; expiresAt: number } | null = null;
/**
* Resolve prompt budgets from the configured executor context window.
*
@ -60,14 +68,20 @@ const MAX_PREAMBLE_CHARS = 30_000;
* n_ctx is not discoverable through the model registry. Issue #4435.
*/
function resolvePromptBudgets(): ReturnType<typeof computeBudgets> {
const now = Date.now();
if (_budgetCache && now < _budgetCache.expiresAt) return _budgetCache.value;
try {
const prefs = loadEffectiveSFPreferences();
const sessionWindow = prefs?.preferences.context_window_override;
const windowTokens = resolveExecutorContextWindow(undefined, prefs?.preferences, sessionWindow);
return computeBudgets(windowTokens);
const value = computeBudgets(windowTokens);
_budgetCache = { value, expiresAt: now + 1_000 };
return value;
} catch (e) {
logWarning("prompt", `resolvePromptBudgets failed: ${(e as Error).message}`);
return computeBudgets(200_000);
const value = computeBudgets(200_000);
_budgetCache = { value, expiresAt: now + 1_000 };
return value;
}
}
@ -370,20 +384,20 @@ export async function inlineDependencySummaries(
}
}
const sections: string[] = [];
const seen = new Set<string>();
for (const dep of depends) {
if (seen.has(dep)) continue;
seen.add(dep);
const summaryFile = resolveSliceFile(base, mid, dep, "SUMMARY");
const summaryContent = summaryFile ? await loadFile(summaryFile) : null;
const relPath = relSliceFile(base, mid, dep, "SUMMARY");
if (summaryContent) {
sections.push(`#### ${dep} Summary\nSource: \`${relPath}\`\n\n${summaryContent.trim()}`);
} else {
sections.push(`- \`${relPath}\` _(not found)_`);
}
}
// Deduplicate deps while preserving order, then load all summaries in parallel.
const uniqueDeps = [...new Set(depends)];
const sections = await Promise.all(
uniqueDeps.map(async (dep) => {
const summaryFile = resolveSliceFile(base, mid, dep, "SUMMARY");
const summaryContent = summaryFile ? await loadFile(summaryFile) : null;
const relPath = relSliceFile(base, mid, dep, "SUMMARY");
if (summaryContent) {
return `#### ${dep} Summary\nSource: \`${relPath}\`\n\n${summaryContent.trim()}`;
} else {
return `- \`${relPath}\` _(not found)_`;
}
}),
);
const result = sections.join("\n\n");
if (budgetChars !== undefined && result.length > budgetChars) {
@ -1713,14 +1727,53 @@ export async function buildExecuteTaskPrompt(
// Inject phase handoff anchor from planning phase (if available)
const planAnchor = readPhaseAnchor(base, mid, "plan-slice");
const priorSummaries = opts.carryForwardPaths ?? await getPriorTaskSummaryPaths(mid, sid, tid, base);
const priorLines = priorSummaries.length > 0
? priorSummaries.map(p => `- \`${p}\``).join("\n")
: "- (no prior tasks)";
// Resolve paths before the parallel fetch so closures capture stable values.
const taskPlanPath = resolveTaskFile(base, mid, sid, tid, "PLAN");
const taskPlanContent = taskPlanPath ? await loadFile(taskPlanPath) : null;
const taskPlanRelPath = relSlicePath(base, mid, sid) + `/tasks/${tid}-PLAN.md`;
const slicePlanPath = resolveSliceFile(base, mid, sid, "PLAN");
const continueFile = resolveSliceFile(base, mid, sid, "CONTINUE");
const legacyContinueDir = resolveSlicePath(base, mid, sid);
const legacyContinuePath = legacyContinueDir ? join(legacyContinueDir, "continue.md") : null;
const continueRelPath = relSliceFile(base, mid, sid, "CONTINUE");
const knowledgeAbsPath = resolveSfRootFile(base, "KNOWLEDGE");
const runtimePath = resolveRuntimeFile(base);
// Fan out all independent I/O in parallel: task plan, slice plan, continue
// file, runtime, knowledge, graph subgraph, overrides, prior summary paths.
const [
taskPlanContent,
slicePlanContent,
continueContent,
runtimeContent,
knowledgeInlineET,
graphBlockET,
activeOverrides,
priorSummaries,
] = await Promise.all([
taskPlanPath ? loadFile(taskPlanPath) : Promise.resolve(null),
slicePlanPath ? loadFile(slicePlanPath) : Promise.resolve(null),
continueFile ? loadFile(continueFile) : Promise.resolve(null),
existsSync(runtimePath) ? loadFile(runtimePath) : Promise.resolve(null),
existsSync(knowledgeAbsPath)
? inlineFileSmart(
knowledgeAbsPath,
relSfRootFile("KNOWLEDGE"),
"Project Knowledge",
`${tTitle} ${sTitle}`,
)
: Promise.resolve(null),
inlineGraphSubgraph(base, `${tid} ${tTitle}`, { budget: 2000 }),
loadActiveOverrides(base),
opts.carryForwardPaths
? Promise.resolve(opts.carryForwardPaths)
: getPriorTaskSummaryPaths(mid, sid, tid, base),
]);
// Legacy continue file only needed when the primary continue file was absent.
const legacyContinueContent = !continueContent && legacyContinuePath
? await loadFile(legacyContinuePath)
: null;
const taskPlanInline = taskPlanContent
? [
"## Inlined Task Plan (authoritative local execution contract)",
@ -1733,17 +1786,8 @@ export async function buildExecuteTaskPrompt(
`Task plan not found at dispatch time. Read \`${taskPlanRelPath}\` before executing.`,
].join("\n");
const slicePlanPath = resolveSliceFile(base, mid, sid, "PLAN");
const slicePlanContent = slicePlanPath ? await loadFile(slicePlanPath) : null;
const slicePlanExcerpt = extractSliceExecutionExcerpt(slicePlanContent, relSliceFile(base, mid, sid, "PLAN"));
// Check for continue file (new naming or legacy)
const continueFile = resolveSliceFile(base, mid, sid, "CONTINUE");
const legacyContinueDir = resolveSlicePath(base, mid, sid);
const legacyContinuePath = legacyContinueDir ? join(legacyContinueDir, "continue.md") : null;
const continueContent = continueFile ? await loadFile(continueFile) : null;
const legacyContinueContent = !continueContent && legacyContinuePath ? await loadFile(legacyContinuePath) : null;
const continueRelPath = relSliceFile(base, mid, sid, "CONTINUE");
const resumeSection = buildResumeSection(
continueContent,
legacyContinueContent,
@ -1751,28 +1795,19 @@ export async function buildExecuteTaskPrompt(
legacyContinuePath ? `${relSlicePath(base, mid, sid)}/continue.md` : null,
);
const priorLines = priorSummaries.length > 0
? priorSummaries.map(p => `- \`${p}\``).join("\n")
: "- (no prior tasks)";
// For minimal inline level, only carry forward the most recent prior summary
const effectivePriorSummaries = inlineLevel === "minimal" && priorSummaries.length > 1
? priorSummaries.slice(-1)
: priorSummaries;
const carryForwardSection = await buildCarryForwardSection(effectivePriorSummaries, base);
// Inline project knowledge if available (smart-chunked for relevance)
const knowledgeAbsPath = resolveSfRootFile(base, "KNOWLEDGE");
const knowledgeInlineET = existsSync(knowledgeAbsPath)
? await inlineFileSmart(
knowledgeAbsPath,
relSfRootFile("KNOWLEDGE"),
"Project Knowledge",
`${tTitle} ${sTitle}`, // use task + slice title as relevance query
)
: null;
// Only include if it has content (not a "not found" result)
// Only include knowledge if it has content (not a "not found" result)
const knowledgeContent = knowledgeInlineET && !knowledgeInlineET.includes("not found") ? knowledgeInlineET : null;
// Knowledge graph: tight subgraph for this task (graceful — skipped if no graph.json)
const graphBlockET = await inlineGraphSubgraph(base, `${tid} ${tTitle}`, { budget: 2000 });
const inlinedTemplates = inlineLevel === "minimal"
? inlineTemplate("task-summary", "Task Summary")
: [
@ -1784,9 +1819,12 @@ export async function buildExecuteTaskPrompt(
const taskSummaryPath = join(base, `${relSlicePath(base, mid, sid)}/tasks/${tid}-SUMMARY.md`);
const activeOverrides = await loadActiveOverrides(base);
const overridesSection = formatOverridesSection(activeOverrides);
const runtimeContext = runtimeContent
? `### Runtime Context\nSource: \`.gsd/RUNTIME.md\`\n\n${runtimeContent.trim()}`
: "";
// Compute verification budget for the executor's context window (issue #707)
const prefs = loadEffectiveSFPreferences();
const contextWindow = resolveExecutorContextWindow(opts.modelRegistry, prefs?.preferences, opts.sessionContextWindow);
@ -1800,13 +1838,6 @@ export async function buildExecuteTaskPrompt(
finalCarryForward = truncateAtSectionBoundary(carryForwardSection, carryForwardBudget).content;
}
// Inline RUNTIME.md if present
const runtimePath = resolveRuntimeFile(base);
const runtimeContent = existsSync(runtimePath) ? await loadFile(runtimePath) : null;
const runtimeContext = runtimeContent
? `### Runtime Context\nSource: \`.gsd/RUNTIME.md\`\n\n${runtimeContent.trim()}`
: "";
let phaseAnchorSection = planAnchor ? formatAnchorForPrompt(planAnchor) : "";
// ADR-011 Phase 2: inject any resolved-but-unapplied escalation override
@ -1868,6 +1899,7 @@ export async function buildExecuteTaskPrompt(
taskTitle: tTitle,
taskPlanContent,
extraContext: [taskPlanInline, slicePlanExcerpt, finalCarryForward, resumeSection],
preferences: prefs?.preferences,
}),
});
}
@ -1906,15 +1938,19 @@ export async function buildCompleteSlicePrompt(
const tDir = resolveTasksDir(base, mid, sid);
if (!tDir) return null;
const summaryFiles = resolveTaskFiles(tDir, "SUMMARY").sort();
if (summaryFiles.length === 0) return null;
const sRel = relSlicePath(base, mid, sid);
const blocks: string[] = [];
for (const file of summaryFiles) {
const absPath = join(tDir, file);
const content = await loadFile(absPath);
if (!content) continue;
const relPath = `${sRel}/tasks/${file}`;
blocks.push(`### Task Summary: ${file.replace(/-SUMMARY\.md$/i, "")}\nSource: \`${relPath}\`\n\n${content.trim()}`);
}
// Load all task summaries in parallel — independent reads.
const entries = await Promise.all(
summaryFiles.map(async (file) => {
const absPath = join(tDir, file);
const content = await loadFile(absPath);
if (!content) return null;
const relPath = `${sRel}/tasks/${file}`;
return `### Task Summary: ${file.replace(/-SUMMARY\.md$/i, "")}\nSource: \`${relPath}\`\n\n${content.trim()}`;
}),
);
const blocks = entries.filter((b): b is string => b !== null);
return blocks.length > 0 ? blocks.join("\n\n---\n\n") : null;
}
case "templates": {
@ -2026,17 +2062,23 @@ export async function buildCompleteMilestonePrompt(
sliceIds = parseRoadmap(roadmapContent).slices.map(s => s.id);
}
}
const seenSlices = new Set<string>();
// Deduplicate slice IDs while preserving order.
const uniqueSliceIds = [...new Set(sliceIds)];
// Load all slice summary excerpts in parallel — independent reads.
const sliceSummaryResults = await Promise.all(
uniqueSliceIds.map(async (sid) => {
const summaryPath = resolveSliceFile(base, mid, sid, "SUMMARY");
const summaryRel = relSliceFile(base, mid, sid, "SUMMARY");
// Compact excerpt instead of full inline (#4780). Closer Reads the
// full file on-demand when synthesizing LEARNINGS narrative.
const excerpt = await buildSliceSummaryExcerpt(summaryPath, summaryRel, sid);
return { sid, summaryRel, excerpt };
}),
);
const summaryRelPaths: string[] = [];
for (const sid of sliceIds) {
if (seenSlices.has(sid)) continue;
seenSlices.add(sid);
const summaryPath = resolveSliceFile(base, mid, sid, "SUMMARY");
const summaryRel = relSliceFile(base, mid, sid, "SUMMARY");
for (const { summaryRel, excerpt } of sliceSummaryResults) {
summaryRelPaths.push(summaryRel);
// Compact excerpt instead of full inline (#4780). Closer Reads the
// full file on-demand when synthesizing LEARNINGS narrative.
inlined.push(await buildSliceSummaryExcerpt(summaryPath, summaryRel, sid));
inlined.push(excerpt);
}
if (summaryRelPaths.length > 0) {
const pathList = summaryRelPaths.map(p => `- \`${p}\``).join("\n");
@ -2143,31 +2185,50 @@ export async function buildValidateMilestonePrompt(
valSliceIds = parseRoadmap(roadmapContent).slices.map(s => s.id);
}
}
const seenValSlices = new Set<string>();
for (const sid of valSliceIds) {
if (seenValSlices.has(sid)) continue;
seenValSlices.add(sid);
const summaryPath = resolveSliceFile(base, mid, sid, "SUMMARY");
const summaryRel = relSliceFile(base, mid, sid, "SUMMARY");
inlined.push(await inlineFile(summaryPath, summaryRel, `${sid} Summary`));
// Single parallel pass per slice: load summary + assessment, derive inline
// blocks AND outstanding-items extraction in one read (previously two loops
// that each called loadFile on every SUMMARY).
const uniqueValSliceIds = [...new Set(valSliceIds)];
const valSliceResults = await Promise.all(
uniqueValSliceIds.map(async (sid) => {
const summaryPath = resolveSliceFile(base, mid, sid, "SUMMARY");
const summaryRel = relSliceFile(base, mid, sid, "SUMMARY");
const assessmentPath = resolveSliceFile(base, mid, sid, "ASSESSMENT");
const assessmentRel = relSliceFile(base, mid, sid, "ASSESSMENT");
const assessmentPath = resolveSliceFile(base, mid, sid, "ASSESSMENT");
const assessmentRel = relSliceFile(base, mid, sid, "ASSESSMENT");
const assessmentInline = await inlineFileOptional(assessmentPath, assessmentRel, `${sid} Assessment`);
if (assessmentInline) inlined.push(assessmentInline);
}
const [summaryContent, assessmentInline] = await Promise.all([
summaryPath ? loadFile(summaryPath) : Promise.resolve(null),
inlineFileOptional(assessmentPath, assessmentRel, `${sid} Assessment`),
]);
// Aggregate unresolved follow-ups and known limitations across slices
const summaryInline = summaryContent
? `### ${sid} Summary\nSource: \`${summaryRel}\`\n\n${summaryContent.trim()}`
: `### ${sid} Summary\nSource: \`${summaryRel}\`\n\n_(not found — file does not exist yet)_`;
// Derive outstanding items from the same content we just loaded.
const outstandingLines: string[] = [];
if (summaryContent) {
try {
const summary = parseSummary(summaryContent);
if (summary.followUps) outstandingLines.push(`- **${sid} Follow-ups:** ${summary.followUps.trim()}`);
if (summary.knownLimitations) outstandingLines.push(`- **${sid} Known Limitations:** ${summary.knownLimitations.trim()}`);
} catch {
// parseSummary failure — skip outstanding items for this slice
}
}
return { summaryInline, assessmentInline, outstandingLines };
}),
);
// Push inline blocks in order; collect outstanding items across all slices.
const outstandingItems: string[] = [];
for (const sid of valSliceIds) {
const summaryPath = resolveSliceFile(base, mid, sid, "SUMMARY");
if (!summaryPath) continue;
const content = await loadFile(summaryPath);
if (!content) continue;
const summary = parseSummary(content);
if (summary.followUps) outstandingItems.push(`- **${sid} Follow-ups:** ${summary.followUps.trim()}`);
if (summary.knownLimitations) outstandingItems.push(`- **${sid} Known Limitations:** ${summary.knownLimitations.trim()}`);
for (const { summaryInline, assessmentInline, outstandingLines } of valSliceResults) {
inlined.push(summaryInline);
if (assessmentInline) inlined.push(assessmentInline);
outstandingItems.push(...outstandingLines);
}
if (outstandingItems.length > 0) {
inlined.push(`### Outstanding Items (aggregated from slice summaries)\n\nThese follow-ups and known limitations were documented during slice completion but have not been resolved.\n\n${outstandingItems.join('\n')}`);
}
@ -2489,41 +2550,44 @@ export async function buildReactiveExecutePrompt(
...graphLines,
].join("\n");
// Build individual subagent prompts for each ready task
const subagentSections: string[] = [];
const readyTaskListLines: string[] = [];
// Build individual subagent prompts for each ready task in parallel.
const modelSuffix = subagentModel ? ` with model: "${subagentModel}"` : "";
const taskResults = await Promise.all(
readyTaskIds.map(async (tid) => {
const node = graph.find((n) => n.id === tid);
const tTitle = node?.title ?? tid;
for (const tid of readyTaskIds) {
const node = graph.find((n) => n.id === tid);
const tTitle = node?.title ?? tid;
readyTaskListLines.push(`- **${tid}: ${tTitle}**`);
// Build dependency-scoped carry-forward paths for this task.
const depPaths = await getDependencyTaskSummaryPaths(
mid, sid, tid, node?.dependsOn ?? [], base,
);
// Build dependency-scoped carry-forward paths for this task
const depPaths = await getDependencyTaskSummaryPaths(
mid, sid, tid, node?.dependsOn ?? [], base,
);
// Build a full execute-task prompt with dependency-based carry-forward.
const taskPrompt = await buildExecuteTaskPrompt(
mid, sid, sTitle, tid, tTitle, base,
{
carryForwardPaths: depPaths,
sessionContextWindow: opts?.sessionContextWindow,
modelRegistry: opts?.modelRegistry,
},
);
// Build a full execute-task prompt with dependency-based carry-forward
const taskPrompt = await buildExecuteTaskPrompt(
mid, sid, sTitle, tid, tTitle, base,
{
carryForwardPaths: depPaths,
sessionContextWindow: opts?.sessionContextWindow,
modelRegistry: opts?.modelRegistry,
},
);
const section = [
`### ${tid}: ${tTitle}`,
"",
`Use this as the prompt for a \`subagent\` call${modelSuffix}:`,
"",
"```",
taskPrompt,
"```",
].join("\n");
const modelSuffix = subagentModel ? ` with model: "${subagentModel}"` : "";
subagentSections.push([
`### ${tid}: ${tTitle}`,
"",
`Use this as the prompt for a \`subagent\` call${modelSuffix}:`,
"",
"```",
taskPrompt,
"```",
].join("\n"));
}
return { tid, tTitle, section };
}),
);
const readyTaskListLines = taskResults.map(({ tid, tTitle }) => `- **${tid}: ${tTitle}**`);
const subagentSections = taskResults.map(({ section }) => section);
const inlinedTemplates = inlineTemplate("task-summary", "Task Summary");
@ -2595,21 +2659,22 @@ export async function buildParallelResearchSlicesPrompt(
basePath: string,
subagentModel?: string,
): Promise<string> {
// Build individual research-slice prompts for each slice
const subagentSections: string[] = [];
// Build individual research-slice prompts for each slice in parallel.
const modelSuffix = subagentModel ? ` with model: "${subagentModel}"` : "";
for (const slice of slices) {
const slicePrompt = await buildResearchSlicePrompt(mid, midTitle, slice.id, slice.title, basePath);
subagentSections.push([
`### ${slice.id}: ${slice.title}`,
"",
`Use this as the prompt for a \`subagent\` call${modelSuffix} (agent: \`gsd-executor\` or the default agent):`,
"",
"```",
slicePrompt,
"```",
].join("\n"));
}
const subagentSections = await Promise.all(
slices.map(async (slice) => {
const slicePrompt = await buildResearchSlicePrompt(mid, midTitle, slice.id, slice.title, basePath);
return [
`### ${slice.id}: ${slice.title}`,
"",
`Use this as the prompt for a \`subagent\` call${modelSuffix} (agent: \`gsd-executor\` or the default agent):`,
"",
"```",
slicePrompt,
"```",
].join("\n");
}),
);
return loadPrompt("parallel-research-slices", {
mid,

View file

@ -145,11 +145,64 @@ export function startUnitSupervision(sctx: SupervisionContext): void {
);
}, softTimeoutMs);
// ── 2. Idle watchdog ──
// ── 2. Idle watchdog (shared 15s tick — also runs context-pressure check) ──
if (s.continueHereHandle) {
clearInterval(s.continueHereHandle);
s.continueHereHandle = null;
}
const executorContextWindow = resolveExecutorContextWindow(
ctx.modelRegistry as Parameters<typeof resolveExecutorContextWindow>[0],
prefs as Parameters<typeof resolveExecutorContextWindow>[1],
ctx.model?.contextWindow,
);
const continueHereThreshold = computeBudgets(executorContextWindow).continueThresholdPercent;
s.idleWatchdogHandle = setInterval(async () => {
try {
if (!s.active || !s.currentUnit) return;
// Read runtime record ONCE and share it between both checks.
const runtime = readUnitRuntimeRecord(s.basePath, unitType, unitId);
// ── 2a. Context-pressure / continue-here check ──
// Runs first so it fires even when the agent is still making progress.
if (s.cmdCtx && runtime && !runtime.continueHereFired) {
const contextUsage = s.cmdCtx.getContextUsage();
if (contextUsage && contextUsage.percent != null && contextUsage.percent >= continueHereThreshold) {
writeUnitRuntimeRecord(s.basePath, unitType, unitId, s.currentUnit.startedAt, {
continueHereFired: true,
});
if (s.verbose) {
ctx.ui.notify(
`Context at ${contextUsage.percent}% (threshold: ${continueHereThreshold}%) — sending wrap-up signal.`,
"info",
);
}
// Only trigger a new turn if no tools are currently in flight (#3512).
const contextTrigger = getInFlightToolCount() === 0;
pi.sendMessage(
{
customType: "sf-auto-wrapup",
display: s.verbose,
content: [
"**CONTEXT BUDGET WARNING — wrap up this unit now.**",
`Context window is at ${contextUsage.percent}% (threshold: ${continueHereThreshold}%).`,
"The next unit needs a fresh context to work effectively. Wrap up now:",
"1. Finish any in-progress file writes",
"2. Write or update the required durable artifacts (summary, checkboxes)",
"3. Mark task state on disk correctly",
"4. Leave precise resume notes if anything remains unfinished",
"Do NOT start new sub-tasks or investigations.",
].join("\n"),
},
{ triggerTurn: contextTrigger },
);
}
}
// ── 2b. Idle watchdog check ──
if (!runtime) return;
if (Date.now() - runtime.lastProgressAt < idleTimeoutMs) return;
@ -268,60 +321,5 @@ export function startUnitSupervision(sctx: SupervisionContext): void {
}
}, hardTimeoutMs);
// ── 4. Context-pressure continue-here monitor ──
if (s.continueHereHandle) {
clearInterval(s.continueHereHandle);
s.continueHereHandle = null;
}
const executorContextWindow = resolveExecutorContextWindow(
ctx.modelRegistry as Parameters<typeof resolveExecutorContextWindow>[0],
prefs as Parameters<typeof resolveExecutorContextWindow>[1],
ctx.model?.contextWindow,
);
const continueHereThreshold = computeBudgets(executorContextWindow).continueThresholdPercent;
s.continueHereHandle = setInterval(() => {
if (!s.active || !s.currentUnit || !s.cmdCtx) return;
const runtime = readUnitRuntimeRecord(s.basePath, unitType, unitId);
if (runtime?.continueHereFired) return;
const contextUsage = s.cmdCtx.getContextUsage();
if (!contextUsage || contextUsage.percent == null || contextUsage.percent < continueHereThreshold) return;
writeUnitRuntimeRecord(s.basePath, unitType, unitId, s.currentUnit!.startedAt, {
continueHereFired: true,
});
if (s.verbose) {
ctx.ui.notify(
`Context at ${contextUsage.percent}% (threshold: ${continueHereThreshold}%) — sending wrap-up signal.`,
"info",
);
}
// Only trigger a new turn if no tools are currently in flight (#3512).
const contextTrigger = getInFlightToolCount() === 0;
pi.sendMessage(
{
customType: "sf-auto-wrapup",
display: s.verbose,
content: [
"**CONTEXT BUDGET WARNING — wrap up this unit now.**",
`Context window is at ${contextUsage.percent}% (threshold: ${continueHereThreshold}%).`,
"The next unit needs a fresh context to work effectively. Wrap up now:",
"1. Finish any in-progress file writes",
"2. Write or update the required durable artifacts (summary, checkboxes)",
"3. Mark task state on disk correctly",
"4. Leave precise resume notes if anything remains unfinished",
"Do NOT start new sub-tasks or investigations.",
].join("\n"),
},
{ triggerTurn: contextTrigger },
);
if (s.continueHereHandle) {
clearInterval(s.continueHereHandle);
s.continueHereHandle = null;
}
}, 15_000);
}

View file

@ -87,7 +87,7 @@ export class SFDashboardOverlay {
this.refreshTimer = setInterval(() => {
this.scheduleRefresh();
}, 2000);
}, 10_000);
}
private scheduleRefresh(initial = false): void {

View file

@ -254,23 +254,19 @@ export function nativeWorkingTreeStatus(basePath: string): string {
return gitExec(basePath, ["status", "--porcelain"], true);
}
// ─── nativeHasChanges fallback cache (10s TTL) ────────────────────────
// ─── nativeHasChanges cache (30s TTL, applies to both native and fallback)
let _hasChangesCachedResult: boolean = false;
let _hasChangesCachedAt: number = 0;
let _hasChangesCachedPath: string = "";
const HAS_CHANGES_CACHE_TTL_MS = 10_000; // 10 seconds
const HAS_CHANGES_CACHE_TTL_MS = 30_000; // 30 seconds
/**
* Quick check: any staged or unstaged changes?
* Native: libgit2 status check (single syscall).
* Fallback: `git status --short` (cached for 10s per basePath).
* Fallback: `git status --short`.
* Result is cached for 30s per basePath regardless of which path is taken.
*/
export function nativeHasChanges(basePath: string): boolean {
const native = loadNative();
if (native) {
return native.gitHasChanges(basePath);
}
const now = Date.now();
if (
basePath === _hasChangesCachedPath &&
@ -279,8 +275,14 @@ export function nativeHasChanges(basePath: string): boolean {
return _hasChangesCachedResult;
}
const result = gitExec(basePath, ["status", "--short"], true);
const hasChanges = result !== "";
const native = loadNative();
let hasChanges: boolean;
if (native) {
hasChanges = native.gitHasChanges(basePath);
} else {
const result = gitExec(basePath, ["status", "--short"], true);
hasChanges = result !== "";
}
_hasChangesCachedResult = hasChanges;
_hasChangesCachedAt = now;

View file

@ -10,7 +10,7 @@
import { existsSync, statSync, readFileSync, openSync, readSync, closeSync, readdirSync } from "node:fs";
import { join } from "node:path";
import { spawnSync } from "node:child_process";
import { spawn } from "node:child_process";
import type { Theme } from "@singularity-forge/pi-coding-agent";
import { truncateToWidth, visibleWidth, matchesKey, Key } from "@singularity-forge/pi-tui";
@ -18,6 +18,24 @@ import { truncateToWidth, visibleWidth, matchesKey, Key } from "@singularity-for
import { formatDuration, STATUS_GLYPH, STATUS_COLOR } from "../shared/mod.js";
import { formattedShortcutPair } from "./shortcut-defs.js";
// ─── Async SQLite Helper ──────────────────────────────────────────────────
function runSqliteAsync(dbPath: string, sql: string): Promise<string> {
return new Promise((resolve) => {
const child = spawn("sqlite3", [dbPath, sql], { timeout: 3000 });
const chunks: Buffer[] = [];
child.stdout.on("data", (chunk: Buffer) => chunks.push(chunk));
child.on("close", (code) => {
if (code !== 0) {
resolve("");
} else {
resolve(Buffer.concat(chunks).toString("utf-8"));
}
});
child.on("error", () => resolve(""));
});
}
// ─── Types ────────────────────────────────────────────────────────────────
interface StatusJson {
@ -125,15 +143,14 @@ function discoverWorkers(basePath: string): string[] {
return [...mids].sort();
}
function querySliceProgress(basePath: string, mid: string): SliceProgress[] {
async function querySliceProgress(basePath: string, mid: string): Promise<SliceProgress[]> {
const dbPath = join(basePath, ".sf", "worktrees", mid, ".sf", "sf.db");
if (!existsSync(dbPath)) return [];
try {
const sql = `SELECT s.id, s.status, COUNT(t.id), SUM(CASE WHEN t.status='complete' THEN 1 ELSE 0 END) FROM slices s LEFT JOIN tasks t ON s.milestone_id=t.milestone_id AND s.id=t.slice_id WHERE s.milestone_id='${mid}' GROUP BY s.id ORDER BY s.id`;
const result = spawnSync("sqlite3", [dbPath, sql], { timeout: 3000, encoding: "utf-8" });
const out = (result.stdout || "").trim();
if (!out || result.status !== 0) return [];
const out = (await runSqliteAsync(dbPath, sql)).trim();
if (!out) return [];
return out.split("\n").map((line) => {
const [id, status, total, done] = line.split("|");
return { id, status, total: parseInt(total, 10), done: parseInt(done || "0", 10) };
@ -165,14 +182,13 @@ function extractCostFromNdjson(basePath: string, mid: string): number {
}
}
function queryRecentCompletions(basePath: string, mid: string): string[] {
async function queryRecentCompletions(basePath: string, mid: string): Promise<string[]> {
const dbPath = join(basePath, ".sf", "worktrees", mid, ".sf", "sf.db");
if (!existsSync(dbPath)) return [];
try {
const sql = `SELECT id, slice_id, one_liner FROM tasks WHERE milestone_id='${mid}' AND status='complete' AND completed_at IS NOT NULL ORDER BY completed_at DESC LIMIT 5`;
const result = spawnSync("sqlite3", [dbPath, sql], { timeout: 3000, encoding: "utf-8" });
const out = (result.stdout || "").trim();
if (!out || result.status !== 0) return [];
const out = (await runSqliteAsync(dbPath, sql)).trim();
if (!out) return [];
return out.split("\n").map((line) => {
const [taskId, sliceId, oneLiner] = line.split("|");
return `${mid}/${sliceId}/${taskId}${oneLiner ? ": " + oneLiner : ""}`;
@ -182,15 +198,19 @@ function queryRecentCompletions(basePath: string, mid: string): string[] {
}
}
function collectWorkerData(basePath: string): WorkerView[] {
async function collectWorkerData(basePath: string): Promise<WorkerView[]> {
const mids = discoverWorkers(basePath);
const parallelDir = join(basePath, ".sf", "parallel");
const allSlices = await Promise.all(mids.map((mid) => querySliceProgress(basePath, mid)));
const workers: WorkerView[] = [];
for (const mid of mids) {
for (let i = 0; i < mids.length; i++) {
const mid = mids[i];
const slices = allSlices[i];
const status = readJsonSafe<StatusJson>(join(parallelDir, `${mid}.status.json`));
const lock = readJsonSafe<AutoLock>(join(basePath, ".sf", "worktrees", mid, ".sf", "auto.lock"));
const slices = querySliceProgress(basePath, mid);
const pid = lock?.pid || status?.pid || 0;
const alive = pid ? isPidAlive(pid) : false;
@ -319,16 +339,21 @@ export class ParallelMonitorOverlay {
process.stdout.on("resize", this.resizeHandler);
this.refresh();
this.refreshTimer = setInterval(() => this.refresh(), 5000);
this.refreshTimer = setInterval(() => this.refresh(), 15_000);
}
private refresh(): void {
private async refresh(): Promise<void> {
if (this.disposed) return;
this.workers = collectWorkerData(this.basePath);
const workers = await collectWorkerData(this.basePath);
if (this.disposed) return;
this.workers = workers;
// Collect completion events
for (const wk of this.workers) {
const completions = queryRecentCompletions(this.basePath, wk.mid);
// Collect completion events in parallel across workers
const allCompletions = await Promise.all(
this.workers.map((wk) => queryRecentCompletions(this.basePath, wk.mid)),
);
if (this.disposed) return;
for (const completions of allCompletions) {
for (const evt of completions) {
if (!this.events.includes(evt)) this.events.push(evt);
}
@ -393,7 +418,7 @@ export class ParallelMonitorOverlay {
lines.push(
t.fg("muted", ` ${now}${aliveCount}/${this.workers.length} alive │ Total: `) +
t.bold(`$${totalCost.toFixed(2)}`) +
t.fg("muted", " │ 5s refresh"),
t.fg("muted", " │ 15s refresh"),
);
lines.push(t.fg("muted", "─".repeat(w)));

View file

@ -72,13 +72,10 @@ export async function composeInlinedContext(
const manifest: UnitContextManifest | null = resolveManifest(unitType);
if (!manifest) return "";
const blocks: string[] = [];
for (const key of manifest.artifacts.inline) {
const body = await resolveArtifact(key);
if (body !== null && body.length > 0) {
blocks.push(body);
}
}
// Resolve all artifact keys in parallel — each resolver is an independent
// DB read or file load, so sequential awaiting just serializes I/O for no gain.
const results = await Promise.all(manifest.artifacts.inline.map(resolveArtifact));
const blocks = results.filter((b): b is string => b !== null && b.length > 0);
return blocks.join("\n\n---\n\n");
}
@ -153,20 +150,24 @@ export async function composeUnitContext(
const manifest: UnitContextManifest | null = resolveManifest(unitType);
if (!manifest) return { prepend: "", inline: "" };
const prependBlocks = await runComputed(manifest.prepend ?? [], opts);
const inlineBlocks: string[] = [];
// Resolve prepend computed artifacts, inline keys, excerpt keys, and inline
// computed artifacts all in parallel — they are independent I/O operations.
const [prependBlocks, inlineResolved, excerptResolved, computedBlocks] = await Promise.all([
runComputed(manifest.prepend ?? [], opts),
opts.resolveArtifact
? Promise.all(manifest.artifacts.inline.map(opts.resolveArtifact))
: Promise.resolve([] as (string | null)[]),
opts.resolveExcerpt
? Promise.all(manifest.artifacts.excerpt.map(opts.resolveExcerpt))
: Promise.resolve([] as (string | null)[]),
runComputed(manifest.artifacts.computed ?? [], opts),
]);
for (const key of manifest.artifacts.inline) {
if (!opts.resolveArtifact) break;
const body = await opts.resolveArtifact(key);
if (body && body.length > 0) inlineBlocks.push(body);
}
for (const key of manifest.artifacts.excerpt) {
if (!opts.resolveExcerpt) break;
const body = await opts.resolveExcerpt(key);
if (body && body.length > 0) inlineBlocks.push(body);
}
inlineBlocks.push(...await runComputed(manifest.artifacts.computed ?? [], opts));
const inlineBlocks: string[] = [
...inlineResolved.filter((b): b is string => !!b && b.length > 0),
...excerptResolved.filter((b): b is string => !!b && b.length > 0),
...computedBlocks,
];
return {
prepend: prependBlocks.join(SECTION_SEPARATOR),
@ -184,14 +185,16 @@ async function runComputed(
opts: ComposeUnitContextOptions,
): Promise<string[]> {
if (ids.length === 0 || !opts.computed) return [];
const out: string[] = [];
for (const id of ids) {
const entry = opts.computed[id] as
| { build: (i: unknown, b: BaseResolverContext) => Promise<string | null>; inputs: unknown }
| undefined;
if (!entry) continue;
const body = await entry.build(entry.inputs, opts.base);
if (body && body.length > 0) out.push(body);
}
return out;
// Computed builders are independent — run them in parallel and filter
// nulls/empties while preserving the manifest's declared order.
const results = await Promise.all(
ids.map(async (id) => {
const entry = opts.computed![id] as
| { build: (i: unknown, b: BaseResolverContext) => Promise<string | null>; inputs: unknown }
| undefined;
if (!entry) return null;
return entry.build(entry.inputs, opts.base);
}),
);
return results.filter((b): b is string => !!b && b.length > 0);
}

View file

@ -57,6 +57,19 @@ function runtimePath(basePath: string, unitType: string, unitId: string): string
return join(runtimeDir(basePath), `${sanitizedUnitType}-${sanitizedUnitId}.json`);
}
// ─── In-memory runtime record cache ─────────────────────────────────────────
// Avoids repeated disk reads for the same unit within a single dispatch cycle.
const _runtimeCache = new Map<string, AutoUnitRuntimeRecord>();
function readUnitRuntimeRecordFromDisk(path: string): AutoUnitRuntimeRecord | null {
if (!existsSync(path)) return null;
try {
return JSON.parse(readFileSync(path, "utf-8")) as AutoUnitRuntimeRecord;
} catch {
return null;
}
}
export function writeUnitRuntimeRecord(
basePath: string,
unitType: string,
@ -67,7 +80,7 @@ export function writeUnitRuntimeRecord(
const dir = runtimeDir(basePath);
mkdirSync(dir, { recursive: true });
const path = runtimePath(basePath, unitType, unitId);
const prev = readUnitRuntimeRecord(basePath, unitType, unitId);
const prev = _runtimeCache.get(path) ?? null;
const next: AutoUnitRuntimeRecord = {
version: 1,
unitType,
@ -86,21 +99,22 @@ export function writeUnitRuntimeRecord(
lastRecoveryReason: updates.lastRecoveryReason ?? prev?.lastRecoveryReason,
};
writeFileSync(path, JSON.stringify(next, null, 2) + "\n", "utf-8");
_runtimeCache.set(path, next);
return next;
}
export function readUnitRuntimeRecord(basePath: string, unitType: string, unitId: string): AutoUnitRuntimeRecord | null {
const path = runtimePath(basePath, unitType, unitId);
if (!existsSync(path)) return null;
try {
return JSON.parse(readFileSync(path, "utf-8")) as AutoUnitRuntimeRecord;
} catch {
return null;
}
const cached = _runtimeCache.get(path);
if (cached !== undefined) return cached;
const record = readUnitRuntimeRecordFromDisk(path);
if (record !== null) _runtimeCache.set(path, record);
return record;
}
export function clearUnitRuntimeRecord(basePath: string, unitType: string, unitId: string): void {
const path = runtimePath(basePath, unitType, unitId);
_runtimeCache.delete(path);
if (existsSync(path)) unlinkSync(path);
}

View file

@ -119,7 +119,7 @@ export class SFVisualizerOverlay {
this.invalidate();
this.tui.requestRender();
}).catch(() => {}); // retry on next interval
}, 5000);
}, 30_000);
}
private parseSGRMouse(data: string): { button: number; x: number; y: number; press: boolean } | null {