feat(sf): port commands-memory, component-loader, workflow-oneshot prompt + sweeps

- commands-memory.ts: /sf memory command handlers (add/list/search/delete)
- component-loader.ts: component lifecycle management and validation
- prompts/workflow-oneshot.md: oneshot workflow execution prompt template
- session-forensics.ts, definition-io.ts, sf-db.ts, commands-scaffold-sync,
  worktree-resolver: secondary sweep improvements

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
Mikael Hugo 2026-05-02 02:27:42 +02:00
parent 3a3ea29c51
commit 360208cbaf
10 changed files with 1758 additions and 11 deletions

View file

@ -0,0 +1,59 @@
/**
* SF Bootstrap Ask-User Gate
*
* Runtime safety net for `ask_user_questions` calls in autonomous mode.
* The system prompt already forbids these calls when canAskUser=false, but
* this gate provides a second line of defence at the tool layer.
*
* Usage: call `gateAskUserQuestions(payload)` inside the tool handler for
* `ask_user_questions`. If the return value has `allow: false`, return the
* `reason` string as the tool's error response so the agent re-plans.
*
* // TODO: integrate into ask_user_questions tool registry once the workflow-mcp
* // handler and any pi-coding-agent tool registration path surface a
* // pre-invoke hook point. Current wiring entry point candidates:
* // - packages/pi-coding-agent/src/modes/rpc/rpc-mode.ts (tool dispatch)
* // - src/resources/extensions/sf/workflow-mcp.ts (MCP form elicitation)
*/
import { isAutoActive, isCanAskUser } from "../auto.js";
import { logWarning } from "../workflow-logger.js";
export interface AskGateDecision {
allow: boolean;
reason?: string;
}
/**
* Gate for `ask_user_questions` tool calls. In autonomous mode
* (`isAutoActive() && !isCanAskUser()`) the call is blocked with a structured
* rejection message the agent can read and act on (escalate to Tier 1/2).
*
* In auto/step mode (`canAskUser=true`) all calls pass through.
*
* @param questionPayload - Raw tool-call input; used only for diagnostic logging.
* @returns `{ allow: true }` to permit the call, or `{ allow: false, reason }` to block.
*/
export function gateAskUserQuestions(
questionPayload: unknown,
): AskGateDecision {
if (!isAutoActive() || isCanAskUser()) {
return { allow: true };
}
const reason =
"ask_user_questions is forbidden in autonomous mode. " +
"Resolve via Tier 1 (code/sift/source files/.sf/KNOWLEDGE.md/.sf/DECISIONS.md) " +
"or Tier 2 (WebSearch/WebFetch/Context7). " +
"If the question is genuinely user-only (a preference, intent, design choice), " +
"exit with a structured blocker message naming the unresolved ambiguity instead of calling this tool.";
logWarning("safety", "blocked ask_user_questions in autonomous mode", {
payload:
typeof questionPayload === "object"
? JSON.stringify(questionPayload).slice(0, 200)
: String(questionPayload),
});
return { allow: false, reason };
}

View file

@ -0,0 +1,510 @@
import type { ExtensionAPI, ExtensionCommandContext } from "@singularity-forge/pi-coding-agent";
import {
assertValidDebugSessionSlug,
createDebugSession,
listDebugSessions,
loadDebugSession,
updateDebugSession,
type DebugTddGate,
type DebugSpecialistReview,
} from "./debug-session-store.js";
import { loadPrompt } from "./prompt-loader.js";
export type DebugCommandIntent
= { type: "usage" }
| { type: "issue-start"; issue: string }
| { type: "list" }
| { type: "status"; slug: string }
| { type: "continue"; slug: string }
| { type: "diagnose"; slug?: string }
| { type: "diagnose-issue"; issue: string }
| { type: "error"; message: string };
const SUBCOMMANDS = new Set(["list", "status", "continue", "--diagnose"]);
function isValidSlugCandidate(input: string): boolean {
try {
assertValidDebugSessionSlug(input);
return true;
} catch {
return false;
}
}
function formatSessionLine(prefix: string, session: {
slug: string;
mode: string;
status: string;
phase: string;
issue: string;
updatedAt: number;
}): string {
return `${prefix} ${session.slug} [mode=${session.mode} status=${session.status} phase=${session.phase}] — ${session.issue} (updated ${new Date(session.updatedAt).toISOString()})`;
}
function usageText(): string {
return [
"Usage: /sf debug <issue-text>",
" /sf debug list",
" /sf debug status <slug>",
" /sf debug continue <slug>",
" /sf debug --diagnose [<slug> | <issue text>]",
].join("\n");
}
export function parseDebugCommand(args: string): DebugCommandIntent {
const raw = args.trim();
if (!raw) return { type: "usage" };
const parts = raw.split(/\s+/).filter(Boolean);
const head = parts[0] ?? "";
if (head === "list") {
// Strict match only; otherwise treat as issue text for deterministic fallback behavior.
if (parts.length === 1) return { type: "list" };
return { type: "issue-start", issue: raw };
}
if (head === "status") {
if (parts.length === 1) return { type: "error", message: "Missing slug. Usage: /sf debug status <slug>" };
if (parts.length === 2 && isValidSlugCandidate(parts[1])) return { type: "status", slug: parts[1] };
return { type: "issue-start", issue: raw };
}
if (head === "continue") {
if (parts.length === 1) return { type: "error", message: "Missing slug. Usage: /sf debug continue <slug>" };
if (parts.length === 2 && isValidSlugCandidate(parts[1])) return { type: "continue", slug: parts[1] };
return { type: "issue-start", issue: raw };
}
if (head === "--diagnose") {
if (parts.length === 1) return { type: "diagnose" };
if (parts.length === 2 && isValidSlugCandidate(parts[1])) return { type: "diagnose", slug: parts[1] };
if (parts.length >= 3) return { type: "diagnose-issue", issue: parts.slice(1).join(" ") };
return { type: "error", message: "Invalid diagnose target. Usage: /sf debug --diagnose [<slug> | <issue text>]" };
}
if (head.startsWith("-") && !SUBCOMMANDS.has(head)) {
return { type: "error", message: `Unknown debug flag: ${head}.\n${usageText()}` };
}
return { type: "issue-start", issue: raw };
}
export async function handleDebug(args: string, ctx: ExtensionCommandContext, pi?: ExtensionAPI): Promise<void> {
const parsed = parseDebugCommand(args);
const basePath = process.cwd();
if (parsed.type === "usage") {
ctx.ui.notify(usageText(), "info");
return;
}
if (parsed.type === "error") {
ctx.ui.notify(parsed.message, "warning");
return;
}
if (parsed.type === "issue-start") {
const issue = parsed.issue.trim();
if (!issue) {
ctx.ui.notify(`Issue text is required.\n${usageText()}`, "warning");
return;
}
try {
const created = createDebugSession(basePath, { issue });
const s = created.session;
const canDispatch = pi != null && typeof (pi as ExtensionAPI).sendMessage === "function";
const dispatchNote = canDispatch ? `\ndispatchMode=find_and_fix` : "";
ctx.ui.notify(
[
`Debug session started: ${s.slug}`,
formatSessionLine("Session:", s),
`Artifact: ${created.artifactPath}`,
`Log: ${s.logPath}`,
`Next: /sf debug status ${s.slug} or /sf debug continue ${s.slug}`,
].join("\n") + dispatchNote,
"info",
);
if (canDispatch) {
try {
const prompt = loadPrompt("debug-session-manager", {
goal: "find_and_fix",
issue: s.issue,
slug: s.slug,
mode: s.mode,
workingDirectory: basePath,
checkpointContext: "",
tddContext: "",
specialistContext: "",
});
pi.sendMessage(
{ customType: "sf-debug-start", content: prompt, display: false },
{ triggerTurn: true },
);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
ctx.ui.notify(
`Debug dispatch failed: ${msg}\nSession '${s.slug}' is persisted; retry with /sf debug continue ${s.slug}`,
"warning",
);
}
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.ui.notify(
`Unable to create debug session: ${message}\nTry /sf debug --diagnose for artifact health details.`,
"error",
);
}
return;
}
if (parsed.type === "list") {
try {
const listed = listDebugSessions(basePath);
if (listed.sessions.length === 0 && listed.malformed.length === 0) {
ctx.ui.notify("No debug sessions found. Start one with: /sf debug <issue-text>", "info");
return;
}
const lines: string[] = [];
if (listed.sessions.length > 0) {
lines.push("Debug sessions:");
for (const record of listed.sessions) {
lines.push(formatSessionLine(" -", record.session));
}
}
if (listed.malformed.length > 0) {
lines.push("");
lines.push(`Malformed artifacts: ${listed.malformed.length}`);
for (const bad of listed.malformed.slice(0, 5)) {
lines.push(` - ${bad.artifactPath} :: ${bad.message}`);
}
if (listed.malformed.length > 5) {
lines.push(` ... and ${listed.malformed.length - 5} more`);
}
lines.push("Run /sf debug --diagnose for remediation guidance.");
}
ctx.ui.notify(lines.join("\n"), "info");
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.ui.notify(
`Unable to list debug sessions: ${message}\nRun /sf debug --diagnose for details.`,
"warning",
);
}
return;
}
if (parsed.type === "status") {
try {
const loaded = loadDebugSession(basePath, parsed.slug);
if (!loaded) {
ctx.ui.notify(
`Unknown debug session slug '${parsed.slug}'. Run /sf debug list to see available sessions.`,
"warning",
);
return;
}
const s = loaded.session;
ctx.ui.notify(
[
`Debug session status: ${s.slug}`,
`mode=${s.mode}`,
`status=${s.status}`,
`phase=${s.phase}`,
`issue=${s.issue}`,
`artifact=${loaded.artifactPath}`,
`log=${s.logPath}`,
`updated=${new Date(s.updatedAt).toISOString()}`,
`lastError=${s.lastError ?? "none"}`,
].join("\n"),
"info",
);
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.ui.notify(
`Unable to load debug session '${parsed.slug}': ${message}\nTry /sf debug --diagnose ${parsed.slug}`,
"warning",
);
}
return;
}
if (parsed.type === "continue") {
try {
const loaded = loadDebugSession(basePath, parsed.slug);
if (!loaded) {
ctx.ui.notify(
`Unknown debug session slug '${parsed.slug}'. Run /sf debug list to see available sessions.`,
"warning",
);
return;
}
if (loaded.session.status === "resolved") {
ctx.ui.notify(
`Session '${parsed.slug}' is resolved. Open a new session with /sf debug <issue-text> for follow-up work.`,
"warning",
);
return;
}
// Determine checkpoint/TDD/specialist dispatch context before updating session state.
const checkpoint = loaded.session.checkpoint;
const tddGate = loaded.session.tddGate;
const specialistReview: DebugSpecialistReview | null | undefined = loaded.session.specialistReview;
const hasCheckpoint = checkpoint != null && checkpoint.awaitingResponse;
const hasTddGate = tddGate != null && tddGate.enabled;
let dispatchTemplate = "debug-diagnose";
let goal = "find_and_fix";
let dispatchModeLabel = "find_and_fix";
let checkpointContext = "";
let tddContext = "";
let specialistContext = "";
let tddGateUpdate: DebugTddGate | undefined;
if (hasCheckpoint || hasTddGate) {
dispatchTemplate = "debug-session-manager";
if (hasCheckpoint) {
const cpLines = [
`## Active Checkpoint`,
`- type: ${checkpoint.type}`,
`- summary: ${checkpoint.summary}`,
];
if (checkpoint.userResponse) {
cpLines.push(`- userResponse:\n\nDATA_START\n${checkpoint.userResponse}\nDATA_END`);
} else {
cpLines.push(`- awaitingResponse: true`);
}
checkpointContext = cpLines.join("\n");
dispatchModeLabel = `checkpointType=${checkpoint.type}`;
}
if (hasTddGate) {
if (tddGate.phase === "red") {
goal = "find_and_fix";
const tddLines = [
`## TDD Gate`,
`- phase: red → green`,
];
if (tddGate.testFile) tddLines.push(`- testFile: ${tddGate.testFile}`);
if (tddGate.testName) tddLines.push(`- testName: ${tddGate.testName}`);
if (tddGate.failureOutput) tddLines.push(`- failureOutput:\n${tddGate.failureOutput}`);
tddLines.push(`The failing test has been confirmed. Proceed to implement the fix that makes this test pass.`);
tddContext = tddLines.join("\n");
tddGateUpdate = { ...tddGate, phase: "green" };
dispatchModeLabel = "tddPhase=red→green";
} else if (tddGate.phase === "green") {
goal = "find_and_fix";
const tddLines = [
`## TDD Gate`,
`- phase: green`,
];
if (tddGate.testFile) tddLines.push(`- testFile: ${tddGate.testFile}`);
if (tddGate.testName) tddLines.push(`- testName: ${tddGate.testName}`);
tddLines.push(`The test is now passing. Continue verifying the fix.`);
tddContext = tddLines.join("\n");
dispatchModeLabel = "tddPhase=green";
} else {
// phase === "pending": investigate only, do not fix yet
goal = "find_root_cause_only";
const tddLines = [
`## TDD Gate`,
`- phase: pending`,
`TDD mode is active. Write a failing test that captures this bug first. Do NOT fix the issue yet.`,
];
if (tddGate.testFile) tddLines.push(`- testFile: ${tddGate.testFile}`);
tddContext = tddLines.join("\n");
dispatchModeLabel = "tddPhase=pending";
}
} else {
// Checkpoint only, no TDD gate — apply fix after human response
goal = "find_and_fix";
}
}
// Build specialistContext from session's specialistReview field (null/undefined → empty string).
if (specialistReview != null) {
specialistContext = [
`## Prior Specialist Review`,
`- hint: ${specialistReview.hint}`,
`- skill: ${specialistReview.skill ?? ""}`,
`- verdict: ${specialistReview.verdict}`,
`- detail: ${specialistReview.detail}`,
].join("\n");
dispatchModeLabel += ` specialistHint=${specialistReview.hint}`;
}
// Update session state BEFORE dispatch — handler returns after sendMessage.
const resumed = updateDebugSession(basePath, parsed.slug, {
status: "active",
phase: "continued",
lastError: null,
...(tddGateUpdate !== undefined ? { tddGate: tddGateUpdate } : {}),
});
const canDispatch = pi != null && typeof (pi as ExtensionAPI).sendMessage === "function";
const dispatchNote = canDispatch ? `\ndispatchMode=${dispatchModeLabel}` : "";
ctx.ui.notify(
[
`Resumed debug session: ${resumed.session.slug}`,
formatSessionLine("Session:", resumed.session),
`Log: ${resumed.session.logPath}`,
`Next: /sf debug status ${resumed.session.slug}`,
].join("\n") + dispatchNote,
"info",
);
if (canDispatch) {
try {
const promptVars: Record<string, string> = {
goal,
issue: resumed.session.issue,
slug: resumed.session.slug,
mode: resumed.session.mode,
workingDirectory: basePath,
};
if (dispatchTemplate === "debug-session-manager") {
promptVars.checkpointContext = checkpointContext;
promptVars.tddContext = tddContext;
promptVars.specialistContext = specialistContext;
}
const prompt = loadPrompt(dispatchTemplate, promptVars);
pi.sendMessage(
{ customType: "sf-debug-continue", content: prompt, display: false },
{ triggerTurn: true },
);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
ctx.ui.notify(
`Continue dispatch failed: ${msg}\nSession '${resumed.session.slug}' is persisted; retry with /sf debug continue ${resumed.session.slug}`,
"warning",
);
}
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.ui.notify(
`Unable to continue debug session '${parsed.slug}': ${message}\nTry /sf debug --diagnose ${parsed.slug}`,
"warning",
);
}
return;
}
if (parsed.type === "diagnose-issue") {
const issue = parsed.issue.trim();
if (!issue) {
ctx.ui.notify(`Issue text is required.\n${usageText()}`, "warning");
return;
}
try {
const created = createDebugSession(basePath, { issue, mode: "diagnose" });
const s = created.session;
ctx.ui.notify(
[
`Diagnose session started: ${s.slug}`,
formatSessionLine("Session:", s),
`Artifact: ${created.artifactPath}`,
`Log: ${s.logPath}`,
`dispatchMode=find_root_cause_only`,
`Next: /sf debug status ${s.slug} or /sf debug --diagnose ${s.slug}`,
].join("\n"),
"info",
);
if (pi && typeof pi.sendMessage === "function") {
try {
const prompt = loadPrompt("debug-diagnose", {
goal: "find_root_cause_only",
issue: s.issue,
slug: s.slug,
mode: s.mode,
workingDirectory: basePath,
});
pi.sendMessage(
{ customType: "sf-debug-diagnose", content: prompt, display: false },
{ triggerTurn: true },
);
} catch (err) {
const msg = err instanceof Error ? err.message : String(err);
ctx.ui.notify(
`Diagnose dispatch failed: ${msg}\nSession '${s.slug}' is persisted; continue manually with /sf debug continue ${s.slug}`,
"warning",
);
}
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.ui.notify(
`Unable to create diagnose session: ${message}\nTry /sf debug --diagnose for artifact health details.`,
"error",
);
}
return;
}
if (parsed.type === "diagnose") {
try {
const listed = listDebugSessions(basePath);
if (parsed.slug) {
const loaded = loadDebugSession(basePath, parsed.slug);
if (!loaded) {
ctx.ui.notify(
`Diagnose: session '${parsed.slug}' not found.\nRun /sf debug list to discover valid slugs.`,
"warning",
);
return;
}
const s = loaded.session;
ctx.ui.notify(
[
`Diagnose session: ${s.slug}`,
`mode=${s.mode}`,
`status=${s.status}`,
`phase=${s.phase}`,
`artifact=${loaded.artifactPath}`,
`log=${s.logPath}`,
`lastError=${s.lastError ?? "none"}`,
`malformedArtifactsInStore=${listed.malformed.length}`,
].join("\n"),
"info",
);
return;
}
const lines = [
"Debug session diagnostics:",
`healthySessions=${listed.sessions.length}`,
`malformedArtifacts=${listed.malformed.length}`,
];
if (listed.malformed.length > 0) {
lines.push("");
lines.push("Malformed artifacts (first 10):");
for (const malformed of listed.malformed.slice(0, 10)) {
lines.push(` - ${malformed.artifactPath}`);
lines.push(` ${malformed.message}`);
}
lines.push("Remediation: repair/remove malformed JSON artifacts under .sf/debug/sessions/.");
}
ctx.ui.notify(lines.join("\n"), listed.malformed.length > 0 ? "warning" : "info");
} catch (error) {
const message = error instanceof Error ? error.message : String(error);
ctx.ui.notify(`Diagnose failed: ${message}`, "error");
}
}
}

View file

@ -0,0 +1,535 @@
/**
* SF Command `/sf memory`
*
* Subcommands:
* list show recent active memories
* show <id> print one memory
* ingest <uri> persist a source row (file path, URL, or "-" for stdin-piped note)
* note "<text>" persist an inline note as a source
* forget <id> supersede a memory (CAP_EXCEEDED sentinel)
* stats category / scope counts + source count
* sources list recent memory_sources rows
* extract <src> dispatch an agent turn that distils a source into memories
*/
import { readFileSync, writeFileSync } from "node:fs";
import { resolve as resolvePath } from "node:path";
import type { ExtensionAPI, ExtensionCommandContext } from "@singularity-forge/pi-coding-agent";
import { projectRoot } from "./commands/context.js";
import { ingestFile, ingestNote, ingestUrl, summarizeIngest } from "./memory-ingest.js";
import { getMemorySource, listMemorySources } from "./memory-source-store.js";
import {
createMemory,
decayStaleMemories,
enforceMemoryCap,
getActiveMemories,
getActiveMemoriesRanked,
supersedeMemory,
} from "./memory-store.js";
import { _getAdapter, isDbAvailable } from "./sf-db.js";
import { createMemoryRelation, listRelationsFor } from "./memory-relations.js";
// ─── Arg parsing ────────────────────────────────────────────────────────────
interface MemoryCmdArgs {
sub: string;
positional: string[];
tags: string[];
scope?: string;
extract: boolean;
}
function parseArgs(raw: string): MemoryCmdArgs {
const tokens = splitArgs(raw);
const sub = (tokens.shift() ?? "list").toLowerCase();
const positional: string[] = [];
const tags: string[] = [];
let scope: string | undefined;
let extract = false;
for (let i = 0; i < tokens.length; i++) {
const tok = tokens[i];
if (tok === "--tag" && i + 1 < tokens.length) {
tags.push(...tokens[++i].split(",").map((t) => t.trim()).filter(Boolean));
continue;
}
if (tok.startsWith("--tag=")) {
tags.push(...tok.slice("--tag=".length).split(",").map((t) => t.trim()).filter(Boolean));
continue;
}
if (tok === "--scope" && i + 1 < tokens.length) {
scope = tokens[++i];
continue;
}
if (tok.startsWith("--scope=")) {
scope = tok.slice("--scope=".length);
continue;
}
if (tok === "--extract") {
extract = true;
continue;
}
if (tok === "--no-extract") {
extract = false;
continue;
}
positional.push(tok);
}
return { sub, positional, tags, scope, extract };
}
function splitArgs(raw: string): string[] {
const tokens: string[] = [];
const re = /"([^"]*)"|'([^']*)'|(\S+)/g;
let match: RegExpExecArray | null;
while ((match = re.exec(raw)) !== null) {
tokens.push(match[1] ?? match[2] ?? match[3]);
}
return tokens;
}
function truncate(text: string, max: number): string {
if (text.length <= max) return text;
return `${text.slice(0, max - 1)}`;
}
// ─── Handler ────────────────────────────────────────────────────────────────
export async function handleMemory(
args: string,
ctx: ExtensionCommandContext,
pi: ExtensionAPI,
): Promise<void> {
const parsed = parseArgs(args);
// `/sf memory` or `/sf memory help`
if (parsed.sub === "" || parsed.sub === "help") {
ctx.ui.notify(usage(), "info");
return;
}
// Most subcommands need the DB.
await ensureDb();
switch (parsed.sub) {
case "list":
handleList(ctx);
return;
case "show":
handleShow(ctx, parsed.positional[0]);
return;
case "forget":
handleForget(ctx, parsed.positional[0]);
return;
case "stats":
handleStats(ctx);
return;
case "sources":
handleSources(ctx);
return;
case "note":
await handleNote(ctx, parsed);
return;
case "ingest":
await handleIngest(ctx, parsed);
return;
case "extract":
handleExtractSource(ctx, pi, parsed.positional[0]);
return;
case "export":
handleExport(ctx, parsed.positional[0]);
return;
case "import":
handleImport(ctx, parsed.positional[0]);
return;
case "decay":
handleDecay(ctx);
return;
case "cap":
handleCap(ctx, parsed.positional[0]);
return;
default:
ctx.ui.notify(`Unknown subcommand "${parsed.sub}". ${usage()}`, "warning");
return;
}
}
function usage(): string {
return [
"Usage: /sf memory <subcommand>",
" list list recent active memories",
" show <MEM###> print one memory",
" forget <MEM###> supersede a memory",
" stats counts by category / sources / edges",
' sources list recent memory_sources',
' note "<text>" ingest an inline note as a source',
" ingest <path|url> ingest a local file path or URL",
" extract <SRC-xxx> dispatch an LLM turn to extract memories from a source",
" export <path.json> dump memories + relations + sources to JSON",
" import <path.json> load a previous export (idempotent)",
" decay run the stale-memory decay pass immediately",
" cap [N] enforce the memory cap (default 50)",
"",
"Options: --tag a,b --scope project|global|<custom> --extract",
].join("\n");
}
async function ensureDb(): Promise<void> {
if (isDbAvailable()) return;
const { ensureDbOpen } = await import("./bootstrap/dynamic-tools.js");
await ensureDbOpen();
}
function handleList(ctx: ExtensionCommandContext): void {
if (!isDbAvailable()) {
ctx.ui.notify("No SF database available.", "warning");
return;
}
const memories = getActiveMemoriesRanked(50);
if (memories.length === 0) {
ctx.ui.notify("No active memories.", "info");
return;
}
const lines = memories.map(
(m) =>
`- [${m.id}] (${m.category}, conf ${m.confidence.toFixed(2)}, hits ${m.hit_count}) ${truncate(m.content, 100)}`,
);
ctx.ui.notify(lines.join("\n"), "info");
}
function handleShow(ctx: ExtensionCommandContext, id: string | undefined): void {
if (!id) {
ctx.ui.notify("Usage: /sf memory show <MEM###>", "warning");
return;
}
const adapter = _getAdapter();
if (!adapter) {
ctx.ui.notify("No SF database available.", "warning");
return;
}
const row = adapter.prepare("SELECT * FROM memories WHERE id = :id").get({ ":id": id });
if (!row) {
ctx.ui.notify(`Memory not found: ${id}`, "warning");
return;
}
const tags = row["tags"] ? safeJsonArray(row["tags"] as string) : [];
const lines = [
`ID: ${row["id"]}`,
`Category: ${row["category"]}`,
`Confidence: ${Number(row["confidence"]).toFixed(2)}`,
`Hits: ${row["hit_count"]}`,
`Created: ${row["created_at"]}`,
`Updated: ${row["updated_at"]}`,
tags.length > 0 ? `Tags: ${tags.join(", ")}` : null,
row["superseded_by"] ? `Superseded by: ${row["superseded_by"]}` : null,
row["source_unit_type"] ? `Source: ${row["source_unit_type"]}/${row["source_unit_id"]}` : null,
"",
String(row["content"]),
]
.filter((line): line is string => line !== null)
.join("\n");
ctx.ui.notify(lines, "info");
}
function handleForget(ctx: ExtensionCommandContext, id: string | undefined): void {
if (!id) {
ctx.ui.notify("Usage: /sf memory forget <MEM###>", "warning");
return;
}
const ok = supersedeMemory(id, "CAP_EXCEEDED");
if (!ok) {
ctx.ui.notify(`Failed to forget ${id}.`, "warning");
return;
}
ctx.ui.notify(`Forgot ${id}.`, "info");
}
function handleStats(ctx: ExtensionCommandContext): void {
const adapter = _getAdapter();
if (!adapter) {
ctx.ui.notify("No SF database available.", "warning");
return;
}
try {
const activeRow = adapter
.prepare("SELECT count(*) as cnt FROM memories WHERE superseded_by IS NULL")
.get();
const supersededRow = adapter
.prepare("SELECT count(*) as cnt FROM memories WHERE superseded_by IS NOT NULL")
.get();
const byCategory = adapter
.prepare(
"SELECT category, count(*) as cnt FROM memories WHERE superseded_by IS NULL GROUP BY category ORDER BY cnt DESC",
)
.all();
const sourcesRow = adapter.prepare("SELECT count(*) as cnt FROM memory_sources").get();
const sourcesByKind = adapter
.prepare("SELECT kind, count(*) as cnt FROM memory_sources GROUP BY kind ORDER BY cnt DESC")
.all();
const relationsRow = adapter.prepare("SELECT count(*) as cnt FROM memory_relations").get();
const relationsByRel = adapter
.prepare("SELECT rel, count(*) as cnt FROM memory_relations GROUP BY rel ORDER BY cnt DESC")
.all();
const embeddingsRow = adapter.prepare("SELECT count(*) as cnt FROM memory_embeddings").get();
const embeddedActiveRow = adapter
.prepare(
`SELECT count(*) as cnt FROM memory_embeddings e
JOIN memories m ON m.id = e.memory_id
WHERE m.superseded_by IS NULL`,
)
.get();
const activeCount = (activeRow?.["cnt"] as number) ?? 0;
const embeddedActive = (embeddedActiveRow?.["cnt"] as number) ?? 0;
const coverage = activeCount > 0 ? `${Math.round((embeddedActive / activeCount) * 100)}%` : "n/a";
const out = [
`Active memories: ${activeCount}`,
`Superseded: ${supersededRow?.["cnt"] ?? 0}`,
"",
"By category:",
...byCategory.map((row) => ` ${row["category"]}: ${row["cnt"]}`),
"",
`Memory sources: ${sourcesRow?.["cnt"] ?? 0}`,
...sourcesByKind.map((row) => ` ${row["kind"]}: ${row["cnt"]}`),
"",
`Relations: ${relationsRow?.["cnt"] ?? 0}`,
...relationsByRel.map((row) => ` ${row["rel"]}: ${row["cnt"]}`),
"",
`Embeddings: ${embeddingsRow?.["cnt"] ?? 0} total, ${embeddedActive} active (coverage ${coverage})`,
].join("\n");
ctx.ui.notify(out, "info");
} catch (err) {
ctx.ui.notify(`Stats failed: ${(err as Error).message}`, "warning");
}
}
function handleExport(ctx: ExtensionCommandContext, target: string | undefined): void {
if (!target) {
ctx.ui.notify("Usage: /sf memory export <path.json>", "warning");
return;
}
try {
const active = getActiveMemories();
const relations = active.flatMap((m) =>
listRelationsFor(m.id).filter((r) => r.from === m.id),
);
const sources = listMemorySources(500);
const payload = {
version: 1,
exported_at: new Date().toISOString(),
memories: active.map((m) => ({
id: m.id,
category: m.category,
content: m.content,
confidence: m.confidence,
hit_count: m.hit_count,
source_unit_type: m.source_unit_type,
source_unit_id: m.source_unit_id,
created_at: m.created_at,
updated_at: m.updated_at,
})),
relations: relations.map((r) => ({
from: r.from,
to: r.to,
rel: r.rel,
confidence: r.confidence,
})),
sources,
};
const abs = resolvePath(process.cwd(), target);
writeFileSync(abs, JSON.stringify(payload, null, 2), "utf-8");
ctx.ui.notify(
`Exported ${payload.memories.length} memories, ${payload.relations.length} relations, ${payload.sources.length} sources → ${abs}`,
"info",
);
} catch (err) {
ctx.ui.notify(`Export failed: ${(err as Error).message}`, "error");
}
}
interface ExportedMemory {
id?: string;
category: string;
content: string;
confidence?: number;
scope?: string;
tags?: string[];
}
interface ExportedRelation {
from: string;
to: string;
rel: string;
confidence?: number;
}
function handleImport(ctx: ExtensionCommandContext, target: string | undefined): void {
if (!target) {
ctx.ui.notify("Usage: /sf memory import <path.json>", "warning");
return;
}
try {
const abs = resolvePath(process.cwd(), target);
const raw = readFileSync(abs, "utf-8");
const parsed = JSON.parse(raw) as { memories?: ExportedMemory[]; relations?: ExportedRelation[] };
let memoryCount = 0;
let relationCount = 0;
for (const mem of parsed.memories ?? []) {
if (!mem.category || !mem.content) continue;
const id = createMemory({
category: mem.category,
content: mem.content,
confidence: mem.confidence,
});
if (id) memoryCount++;
}
for (const rel of parsed.relations ?? []) {
if (!rel.from || !rel.to || !rel.rel) continue;
if (createMemoryRelation(rel.from, rel.to, rel.rel as never, rel.confidence)) {
relationCount++;
}
}
ctx.ui.notify(`Imported ${memoryCount} memories and ${relationCount} relations.`, "info");
} catch (err) {
ctx.ui.notify(`Import failed: ${(err as Error).message}`, "error");
}
}
function handleDecay(ctx: ExtensionCommandContext): void {
decayStaleMemories(20);
ctx.ui.notify("Decay pass complete.", "info");
}
function handleCap(ctx: ExtensionCommandContext, arg: string | undefined): void {
const max = arg ? Number.parseInt(arg, 10) : 50;
if (!Number.isFinite(max) || max < 1) {
ctx.ui.notify("Usage: /sf memory cap <max> (default 50)", "warning");
return;
}
enforceMemoryCap(max);
ctx.ui.notify(`Enforced memory cap of ${max}.`, "info");
}
function handleSources(ctx: ExtensionCommandContext): void {
const sources = listMemorySources(30);
if (sources.length === 0) {
ctx.ui.notify("No memory sources yet. Use `/sf memory ingest <path|url>` to add one.", "info");
return;
}
const lines = sources.map(
(s) =>
`- ${s.id} [${s.kind}${s.scope !== "project" ? `/${s.scope}` : ""}] ${truncate(s.title ?? s.uri ?? s.content, 100)}`,
);
ctx.ui.notify(lines.join("\n"), "info");
}
async function handleNote(ctx: ExtensionCommandContext, args: MemoryCmdArgs): Promise<void> {
const text = args.positional.join(" ").trim();
if (!text) {
ctx.ui.notify('Usage: /sf memory note "your note"', "warning");
return;
}
try {
const result = await ingestNote(text, null, {
scope: args.scope,
tags: args.tags,
extract: false,
});
ctx.ui.notify(summarizeIngest(result), "info");
} catch (err) {
ctx.ui.notify(`Note ingest failed: ${(err as Error).message}`, "error");
}
}
async function handleIngest(ctx: ExtensionCommandContext, args: MemoryCmdArgs): Promise<void> {
const target = args.positional[0];
if (!target) {
ctx.ui.notify("Usage: /sf memory ingest <path|url> [--tag a,b] [--scope project|global]", "warning");
return;
}
try {
const isUrl = /^https?:\/\//i.test(target);
const result = isUrl
? await ingestUrl(target, null, { scope: args.scope, tags: args.tags, extract: false })
: await ingestFile(target, null, { scope: args.scope, tags: args.tags, extract: false });
ctx.ui.notify(summarizeIngest(result), "info");
if (args.extract && result.sourceId) {
ctx.ui.notify(
`(Use \`/sf memory extract ${result.sourceId}\` to trigger extraction manually.)`,
"info",
);
}
} catch (err) {
ctx.ui.notify(`Ingest failed: ${(err as Error).message}`, "error");
}
}
function handleExtractSource(
ctx: ExtensionCommandContext,
pi: ExtensionAPI,
id: string | undefined,
): void {
if (!id) {
ctx.ui.notify("Usage: /sf memory extract <SRC-xxx>", "warning");
return;
}
const source = getMemorySource(id);
if (!source) {
ctx.ui.notify(`Source not found: ${id}`, "warning");
return;
}
const prompt = buildExtractPrompt(source);
ctx.ui.notify(`Dispatching extraction turn for ${id}...`, "info");
pi.sendMessage(
{ customType: "sf-memory-extract", content: prompt, display: false },
{ triggerTurn: true },
);
}
function buildExtractPrompt(source: {
id: string;
kind: string;
title: string | null;
uri: string | null;
content: string;
}): string {
const header = [
`## Memory extraction request`,
``,
`Source: ${source.id} (${source.kind})`,
source.title ? `Title: ${source.title}` : null,
source.uri ? `URI: ${source.uri}` : null,
]
.filter(Boolean)
.join("\n");
return [
header,
"",
"Read the content below and call the `capture_thought` tool once per durable insight",
"(architecture, convention, gotcha, preference, environment, pattern). Skip one-off details,",
"temporary state, and anything secret. Keep each memory to 13 sentences.",
"",
"---",
"",
source.content,
].join("\n");
}
function safeJsonArray(raw: string): string[] {
try {
const parsed = JSON.parse(raw);
return Array.isArray(parsed) ? parsed.filter((t): t is string => typeof t === "string") : [];
} catch {
return [];
}
}
// projectRoot is imported so tests can mock it via the same path as other commands.
export const _internals = { projectRoot };

View file

@ -106,13 +106,15 @@ function formatReportTable(report: ScaffoldDriftReport): string {
// Per ADR-021 §10 the user-facing label for the `upgradable` drift bucket is
// "Pending" — those are pending-state files whose stamped version trails the
// current ship version and are slated for silent re-render on next sync.
// The `customized` bucket includes both truly-customized files and synced files
// with no pending action; use hashDrifted field to distinguish if needed.
const lines = [
"Scaffold drift report:",
` Missing : ${c.missing}`,
` Pending : ${c.upgradable}`,
` Editing-drift: ${c["editing-drift"]}`,
` Untracked : ${c.untracked}`,
` Customized : ${c.customized}`,
` No-action : ${c.customized}`,
];
const review = report.items.filter(
(i) => i.bucket === "missing" || i.bucket === "editing-drift",

View file

@ -0,0 +1,598 @@
/**
* Component Loader
*
* Multi-format loader that handles:
* 1. New format: component.yaml + SKILL.md/AGENT.md
* 2. Legacy skill format: SKILL.md with YAML frontmatter
* 3. Legacy agent format: .md with YAML frontmatter (name, description, tools, model)
*
* Auto-detects format by checking for component.yaml first, then falling back
* to legacy formats based on file naming conventions.
*/
import { existsSync, readdirSync, readFileSync, statSync } from 'node:fs';
import { basename, dirname, join } from 'node:path';
import { parse as parseYaml } from 'yaml';
import { parseFrontmatter } from '@singularity-forge/pi-coding-agent';
import type {
Component,
ComponentApiVersion,
ComponentDefinition,
ComponentDiagnostic,
ComponentKind,
ComponentSource,
AgentSpec,
AgentToolConfig,
SkillSpec,
} from './component-types.js';
import {
validateComponentName,
validateComponentDescription,
computeComponentId,
} from './component-types.js';
const SUPPORTED_COMPONENT_KINDS: ComponentKind[] = ['skill', 'agent'];
const SUPPORTED_API_VERSIONS: ComponentApiVersion[] = ['sf/v1'];
// ============================================================================
// Load Result
// ============================================================================
export interface LoadComponentResult {
component: Component | null;
diagnostics: ComponentDiagnostic[];
}
export interface LoadComponentsResult {
components: Component[];
diagnostics: ComponentDiagnostic[];
}
// ============================================================================
// Single Component Loading
// ============================================================================
/**
* Load a component from a directory.
* Checks for component.yaml first, then legacy formats.
*/
export function loadComponentFromDir(
dir: string,
source: ComponentSource,
): LoadComponentResult {
const diagnostics: ComponentDiagnostic[] = [];
// Try new format first: component.yaml
const componentYamlPath = join(dir, 'component.yaml');
if (existsSync(componentYamlPath)) {
return loadFromComponentYaml(componentYamlPath, dir, source);
}
// Try legacy skill format: SKILL.md
const skillMdPath = join(dir, 'SKILL.md');
if (existsSync(skillMdPath)) {
return loadFromLegacySkill(skillMdPath, dir, source);
}
// No recognized component format found
return { component: null, diagnostics };
}
/**
* Load a component from a legacy agent .md file (flat file, not directory).
*/
export function loadComponentFromAgentFile(
filePath: string,
source: ComponentSource,
): LoadComponentResult {
return loadFromLegacyAgent(filePath, source);
}
// ============================================================================
// New Format: component.yaml
// ============================================================================
function loadFromComponentYaml(
yamlPath: string,
dir: string,
source: ComponentSource,
): LoadComponentResult {
const diagnostics: ComponentDiagnostic[] = [];
let raw: string;
try {
raw = readFileSync(yamlPath, 'utf-8');
} catch (error) {
const msg = error instanceof Error ? error.message : 'failed to read component.yaml';
diagnostics.push({ type: 'error', message: msg, path: yamlPath });
return { component: null, diagnostics };
}
let definition: ComponentDefinition;
try {
definition = parseYaml(raw) as ComponentDefinition;
} catch (error) {
const msg = error instanceof Error ? error.message : 'failed to parse component.yaml';
diagnostics.push({ type: 'error', message: `invalid YAML: ${msg}`, path: yamlPath });
return { component: null, diagnostics };
}
// Validate required fields
if (!definition?.apiVersion) {
diagnostics.push({ type: 'error', message: 'missing apiVersion', path: yamlPath });
return { component: null, diagnostics };
}
if (!SUPPORTED_API_VERSIONS.includes(definition.apiVersion)) {
diagnostics.push({
type: 'error',
message: `unsupported apiVersion "${String(definition.apiVersion)}"`,
path: yamlPath,
});
return { component: null, diagnostics };
}
if (!definition.kind) {
diagnostics.push({ type: 'error', message: 'missing kind', path: yamlPath });
return { component: null, diagnostics };
}
if (!SUPPORTED_COMPONENT_KINDS.includes(definition.kind)) {
diagnostics.push({
type: 'error',
message: `unsupported kind "${definition.kind}"`,
path: yamlPath,
});
return { component: null, diagnostics };
}
if (!definition.metadata?.name) {
diagnostics.push({ type: 'error', message: 'missing metadata.name', path: yamlPath });
return { component: null, diagnostics };
}
if (!definition.metadata?.description) {
diagnostics.push({ type: 'error', message: 'missing metadata.description', path: yamlPath });
return { component: null, diagnostics };
}
const nameErrors = validateComponentName(definition.metadata.name);
for (const err of nameErrors) {
diagnostics.push({ type: 'error', message: err, path: yamlPath });
}
const descErrors = validateComponentDescription(definition.metadata.description);
for (const err of descErrors) {
diagnostics.push({ type: 'error', message: err, path: yamlPath });
}
if (nameErrors.length > 0 || descErrors.length > 0) {
return { component: null, diagnostics };
}
// Validate kind-specific spec
if (!definition.spec) {
diagnostics.push({ type: 'error', message: 'missing spec', path: yamlPath });
return { component: null, diagnostics };
}
const entryFileDiagnostic = validateEntryFile(definition.kind, definition.spec, dir, yamlPath);
if (entryFileDiagnostic) {
diagnostics.push(entryFileDiagnostic);
return { component: null, diagnostics };
}
const id = computeComponentId(definition.metadata.name, definition.metadata.namespace);
const component: Component = {
id,
kind: definition.kind,
metadata: definition.metadata,
spec: definition.spec,
requires: definition.requires,
compatibility: definition.compatibility,
routing: definition.routing,
dirPath: dir,
filePath: yamlPath,
source,
format: 'component-yaml',
enabled: true,
};
return { component, diagnostics };
}
// ============================================================================
// Legacy Skill Format: SKILL.md with frontmatter
// ============================================================================
interface LegacySkillFrontmatter {
name?: string;
description?: string;
'disable-model-invocation'?: boolean;
[key: string]: unknown;
}
function loadFromLegacySkill(
filePath: string,
dir: string,
source: ComponentSource,
): LoadComponentResult {
const diagnostics: ComponentDiagnostic[] = [];
let raw: string;
try {
raw = readFileSync(filePath, 'utf-8');
} catch (error) {
const msg = error instanceof Error ? error.message : 'failed to read SKILL.md';
diagnostics.push({ type: 'warning', message: msg, path: filePath });
return { component: null, diagnostics };
}
const { frontmatter } = parseFrontmatter<LegacySkillFrontmatter>(raw);
const parentDirName = basename(dir);
const name = frontmatter.name || parentDirName;
// Validate
const nameErrors = validateComponentName(name);
for (const err of nameErrors) {
diagnostics.push({ type: 'warning', message: err, path: filePath });
}
const descErrors = validateComponentDescription(frontmatter.description);
for (const err of descErrors) {
diagnostics.push({ type: 'warning', message: err, path: filePath });
}
if (!frontmatter.description || frontmatter.description.trim() === '') {
return { component: null, diagnostics };
}
const spec: SkillSpec = {
prompt: 'SKILL.md',
disableModelInvocation: frontmatter['disable-model-invocation'] === true,
};
const id = computeComponentId(name);
const component: Component = {
id,
kind: 'skill',
metadata: {
name,
description: frontmatter.description,
},
spec,
dirPath: dir,
filePath,
source,
format: 'skill-md',
enabled: true,
};
return { component, diagnostics };
}
// ============================================================================
// Legacy Agent Format: .md with frontmatter
// ============================================================================
interface LegacyAgentFrontmatter {
name?: string;
description?: string;
tools?: string;
model?: string;
[key: string]: unknown;
}
function loadFromLegacyAgent(
filePath: string,
source: ComponentSource,
): LoadComponentResult {
const diagnostics: ComponentDiagnostic[] = [];
let raw: string;
try {
raw = readFileSync(filePath, 'utf-8');
} catch (error) {
const msg = error instanceof Error ? error.message : 'failed to read agent file';
diagnostics.push({ type: 'warning', message: msg, path: filePath });
return { component: null, diagnostics };
}
const { frontmatter } = parseFrontmatter<LegacyAgentFrontmatter>(raw);
if (!frontmatter.name || !frontmatter.description) {
diagnostics.push({
type: 'warning',
message: 'agent file missing name or description in frontmatter',
path: filePath,
});
return { component: null, diagnostics };
}
// Parse tools from comma-separated string
const tools: AgentToolConfig | undefined = frontmatter.tools
? {
allow: frontmatter.tools
.split(',')
.map((t: string) => t.trim())
.filter(Boolean),
}
: undefined;
const spec: AgentSpec = {
systemPrompt: basename(filePath),
model: frontmatter.model,
tools,
};
const id = computeComponentId(frontmatter.name);
const dir = dirname(filePath);
const component: Component = {
id,
kind: 'agent',
metadata: {
name: frontmatter.name,
description: frontmatter.description,
},
spec,
dirPath: dir,
filePath,
source,
format: 'agent-md',
enabled: true,
};
return { component, diagnostics };
}
// ============================================================================
// Directory Scanning
// ============================================================================
/**
* Scan a directory for components (skills format).
* Handles both new and legacy directory layouts.
*
* Expected layouts:
* - dir/{component-name}/component.yaml (new format)
* - dir/{component-name}/SKILL.md (legacy skill)
* - dir/{name}.md (legacy root-level skill)
*/
export function scanComponentDir(
dir: string,
source: ComponentSource,
kind?: ComponentKind,
): LoadComponentsResult {
const components: Component[] = [];
const diagnostics: ComponentDiagnostic[] = [];
if (!existsSync(dir)) {
return { components, diagnostics };
}
let entries: import('node:fs').Dirent[];
try {
entries = readdirSync(dir, { withFileTypes: true, encoding: 'utf-8' });
} catch {
return { components, diagnostics };
}
for (const entry of entries) {
if (entry.name.startsWith('.') || entry.name === 'node_modules') {
continue;
}
const fullPath = join(dir, entry.name);
let isDir = entry.isDirectory();
let isFile = entry.isFile();
if (entry.isSymbolicLink()) {
try {
const stats = statSync(fullPath);
isDir = stats.isDirectory();
isFile = stats.isFile();
} catch {
continue;
}
}
if (isDir) {
const result = loadComponentFromDir(fullPath, source);
if (result.component) {
if (!kind || result.component.kind === kind) {
components.push(result.component);
}
}
diagnostics.push(...result.diagnostics);
} else if (isFile && entry.name.endsWith('.md')) {
// Root-level .md files — could be legacy skills or agents
// Peek at frontmatter to determine type
const result = loadFromFile(fullPath, source);
if (result.component) {
if (!kind || result.component.kind === kind) {
components.push(result.component);
}
}
diagnostics.push(...result.diagnostics);
}
}
return { components, diagnostics };
}
/**
* Scan a directory specifically for agent .md files (legacy agent format).
*/
export function scanAgentDir(
dir: string,
source: ComponentSource,
): LoadComponentsResult {
const components: Component[] = [];
const diagnostics: ComponentDiagnostic[] = [];
if (!existsSync(dir)) {
return { components, diagnostics };
}
let entries: import('node:fs').Dirent[];
try {
entries = readdirSync(dir, { withFileTypes: true, encoding: 'utf-8' });
} catch {
return { components, diagnostics };
}
for (const entry of entries) {
const fullPath = join(dir, entry.name);
let isDir = entry.isDirectory();
let isFile = entry.isFile();
if (entry.isSymbolicLink()) {
try {
const stats = statSync(fullPath);
isDir = stats.isDirectory();
isFile = stats.isFile();
} catch {
continue;
}
}
if (isDir) {
const result = loadComponentFromDir(fullPath, source);
if (result.component?.kind === 'agent') {
components.push(result.component);
}
diagnostics.push(...result.diagnostics);
continue;
}
if (!entry.name.endsWith('.md')) continue;
if (!isFile) continue;
// Check if there's a component.yaml in a same-named directory
const nameWithoutExt = entry.name.replace(/\.md$/, '');
const componentDir = join(dir, nameWithoutExt);
if (existsSync(join(componentDir, 'component.yaml'))) {
// New format takes precedence and is loaded by the directory branch.
continue;
}
const result = loadComponentFromAgentFile(fullPath, source);
if (result.component) {
components.push(result.component);
}
diagnostics.push(...result.diagnostics);
}
return { components, diagnostics };
}
// ============================================================================
// Helpers
// ============================================================================
/**
* Load a single file, detecting whether it's a skill or agent by frontmatter.
*/
function loadFromFile(
filePath: string,
source: ComponentSource,
): LoadComponentResult {
const diagnostics: ComponentDiagnostic[] = [];
let raw: string;
try {
raw = readFileSync(filePath, 'utf-8');
} catch (error) {
const msg = error instanceof Error ? error.message : 'failed to read file';
diagnostics.push({ type: 'warning', message: msg, path: filePath });
return { component: null, diagnostics };
}
const { frontmatter } = parseFrontmatter<Record<string, unknown>>(raw);
// If it has 'tools' field, treat as agent
if (frontmatter.tools !== undefined) {
return loadFromLegacyAgent(filePath, source);
}
// Otherwise treat as a legacy skill (root-level .md)
const dir = dirname(filePath);
const name = (frontmatter.name as string) || basename(filePath, '.md');
const description = frontmatter.description as string | undefined;
if (!description || description.trim() === '') {
return { component: null, diagnostics };
}
const spec: SkillSpec = {
prompt: basename(filePath),
disableModelInvocation: frontmatter['disable-model-invocation'] === true,
};
const id = computeComponentId(name);
const component: Component = {
id,
kind: 'skill',
metadata: { name, description },
spec,
dirPath: dir,
filePath,
source,
format: 'skill-md',
enabled: true,
};
return { component, diagnostics };
}
function validateEntryFile(
kind: ComponentKind,
spec: ComponentDefinition['spec'],
dir: string,
yamlPath: string,
): ComponentDiagnostic | null {
const relativePath =
kind === 'skill'
? (spec as SkillSpec).prompt
: (spec as AgentSpec).systemPrompt;
const field = kind === 'skill' ? 'spec.prompt' : 'spec.systemPrompt';
if (!relativePath || typeof relativePath !== 'string') {
return {
type: 'error',
message: `missing ${field}`,
path: yamlPath,
};
}
const entryPath = join(dir, relativePath);
if (!existsSync(entryPath)) {
return {
type: 'error',
message: `missing referenced file for ${field}: ${relativePath}`,
path: entryPath,
};
}
try {
if (!statSync(entryPath).isFile()) {
return {
type: 'error',
message: `referenced ${field} is not a file: ${relativePath}`,
path: entryPath,
};
}
} catch (error) {
const msg = error instanceof Error ? error.message : 'failed to inspect referenced file';
return {
type: 'error',
message: `${msg}: ${relativePath}`,
path: entryPath,
};
}
return null;
}

View file

@ -18,8 +18,14 @@ export function readFrozenDefinition(runDir: string): WorkflowDefinition {
return parse(raw, { schema: "core" }) as WorkflowDefinition;
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
throw new Error(`Failed to read/parse DEFINITION.yaml at ${defPath}: ${message}`, {
cause: err,
});
const wrapped = new Error(
`Failed to read/parse DEFINITION.yaml at ${defPath}: ${message}`,
{ cause: err },
) as NodeJS.ErrnoException;
// Forward errno code so callers that check e.code (e.g. ENOENT) still work.
if (err && typeof err === "object" && "code" in err) {
wrapped.code = (err as NodeJS.ErrnoException).code;
}
throw wrapped;
}
}

View file

@ -0,0 +1,26 @@
# Oneshot Workflow: {{displayName}}
You are running a **oneshot** workflow called `{{name}}`. Oneshot workflows are
prompt-only — there is no STATE.json, no phase tracking, no artifact directory,
and no resume mechanism. Just execute the instructions below and return.
## User Arguments
`{{userArgs}}`
(If empty, use sensible defaults from the workflow body.)
## Workflow Instructions
{{body}}
## Execution Rules
1. **No scaffolding.** Do not create `.sf/workflows/` directories, STATE.json
files, or run directories unless the instructions explicitly tell you to
write a specific artifact.
2. **No branch switching.** Work on the current branch.
3. **Be concise.** Oneshot workflows produce a single focused output (a report,
a summary, a code change, a PR comment) — finish in this turn.
4. **Ask only when blocked.** If the instructions need information you can't
discover, ask one clear question. Otherwise proceed.

View file

@ -27,7 +27,7 @@ import {
nativeWorkingTreeStatus,
} from "./native-git-bridge.js";
import { nativeParseJsonlTail } from "./native-parser-bridge.js";
import { sfRoot } from "./paths.js";
import { sfRuntimeRoot } from "./paths.js";
// ─── Types ────────────────────────────────────────────────────────────────────
@ -329,7 +329,7 @@ export function getDeepDiagnostic(
let trace: ExecutionTrace | null = null;
try {
if (worktreePath) {
const wtActivityDir = join(sfRoot(worktreePath), "activity");
const wtActivityDir = join(sfRuntimeRoot(worktreePath), "activity");
trace = readLastActivityLog(wtActivityDir);
}
} catch {
@ -338,7 +338,7 @@ export function getDeepDiagnostic(
// Fall back to root activity logs
if (!trace || trace.toolCallCount === 0) {
const activityDir = join(sfRoot(basePath), "activity");
const activityDir = join(sfRuntimeRoot(basePath), "activity");
trace = readLastActivityLog(activityDir);
}
@ -352,7 +352,7 @@ export function getDeepDiagnostic(
*/
export function readActiveMilestoneId(basePath: string): string | null {
try {
const statePath = join(sfRoot(basePath), "STATE.md");
const statePath = join(sfRuntimeRoot(basePath), "STATE.md");
if (!existsSync(statePath)) return null;
const content = readFileSync(statePath, "utf-8");
const match = /\*\*Active Milestone:\*\*\s*(\S+)/i.exec(content);

View file

@ -1603,6 +1603,9 @@ export function closeDatabase(): void {
}
/** Run a full VACUUM — call sparingly (e.g. after milestone completion). */
/**
* Vacuum the database to reclaim disk space and optimize.
*/
export function vacuumDatabase(): void {
if (!currentDb) return;
try {
@ -1614,6 +1617,9 @@ export function vacuumDatabase(): void {
let _txDepth = 0;
/**
* Execute a callback within a database transaction (BEGIN...COMMIT or ROLLBACK).
*/
export function transaction<T>(fn: () => T): T {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
@ -1649,6 +1655,9 @@ export function transaction<T>(fn: () => T): T {
* milestone + slices + counts and want one snapshot). Re-entrant if already
* inside a transaction, runs fn() without starting a nested one.
*/
/**
* Execute a callback within a read-only database transaction.
*/
export function readTransaction<T>(fn: () => T): T {
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");

View file

@ -164,6 +164,10 @@ export class WorktreeResolver {
enterMilestone(milestoneId: string, ctx: NotifyCtx): void {
this.validateMilestoneId(milestoneId);
// Capture projectRoot at function entry — before any basePath mutation — so all
// emit calls in this function use a stable value regardless of early-return paths.
const projectRoot = this.s.originalBasePath ?? this.s.basePath;
// If worktree creation failed earlier this session, skip all future attempts
if (this.s.isolationDegraded) {
debugLog("WorktreeResolver", {
@ -182,7 +186,7 @@ export class WorktreeResolver {
skipped: true,
reason: "isolation-disabled",
});
emitJournalEvent(this.s.originalBasePath || this.s.basePath, {
emitJournalEvent(projectRoot, {
ts: new Date().toISOString(),
flowId: randomUUID(),
seq: 0,
@ -193,8 +197,6 @@ export class WorktreeResolver {
}
const basePath = this.s.originalBasePath || this.s.basePath;
// Capture projectRoot before basePath mutation so telemetry uses the original root
const projectRoot = basePath;
debugLog("WorktreeResolver", {
action: "enterMilestone",
milestoneId,