fix: make sift the codebase indexer

This commit is contained in:
Mikael Hugo 2026-05-05 14:27:03 +02:00
parent 3ba2f8a501
commit 3af4185b20
18 changed files with 4357 additions and 4247 deletions

View file

@ -7,11 +7,14 @@
},
"files": {
"includes": [
"**",
"**/*.{js,cjs,mjs,ts,tsx,json,jsonc,css,html}",
"!!.sf",
"!!.omg",
"!!**/dist",
"!!**/dist-test",
"!!**/rust-engine/npm",
"!!src/resources/skills/create-sf-extension/templates/**"
"!!**/*.min.js",
"!!src/resources/skills/create-sf-extension/templates"
]
},
"formatter": {
@ -48,6 +51,11 @@
"quoteStyle": "double"
}
},
"css": {
"parser": {
"tailwindDirectives": true
}
},
"assist": {
"enabled": true,
"actions": {

View file

@ -0,0 +1,23 @@
/**
* shell-env.test.ts Regression coverage for automated shell environment.
*
* Purpose: keep agent-run git commands non-interactive so operations such as
* `git rebase --continue` cannot hang by opening an editor.
*/
import assert from "node:assert/strict";
import { describe, it } from "vitest";
import { getShellEnv } from "./shell.js";
describe("getShellEnv", () => {
it("getShellEnv_when_git_rebase_continues_disables_editor_prompts", () => {
const env = getShellEnv();
assert.equal(env.GIT_TERMINAL_PROMPT, "0");
assert.equal(env.GIT_EDITOR, "true");
assert.equal(env.GIT_SEQUENCE_EDITOR, "true");
assert.equal(env.GIT_ASKPASS, "");
assert.equal(env.VISUAL, "true");
assert.equal(env.EDITOR, "true");
});
});

View file

@ -1,6 +1,6 @@
import { spawn, spawnSync } from "node:child_process";
import { existsSync } from "node:fs";
import { delimiter } from "node:path";
import { spawn, spawnSync } from "child_process";
import { getBinDir, getSettingsPath } from "../config.js";
import { SettingsManager } from "../core/settings-manager.js";
@ -13,7 +13,10 @@ function findBashOnPath(): string | null {
if (process.platform === "win32") {
// Windows: Use 'where' and verify file exists (where can return non-existent paths)
try {
const result = spawnSync("where", ["bash.exe"], { encoding: "utf-8", timeout: 5000 });
const result = spawnSync("where", ["bash.exe"], {
encoding: "utf-8",
timeout: 5000,
});
if (result.status === 0 && result.stdout) {
const firstMatch = result.stdout.trim().split(/\r?\n/)[0];
if (firstMatch && existsSync(firstMatch)) {
@ -28,7 +31,10 @@ function findBashOnPath(): string | null {
// Unix: Use 'which' and trust its output (handles Termux and special filesystems)
try {
const result = spawnSync("which", ["bash"], { encoding: "utf-8", timeout: 5000 });
const result = spawnSync("which", ["bash"], {
encoding: "utf-8",
timeout: 5000,
});
if (result.status === 0 && result.stdout) {
const firstMatch = result.stdout.trim().split(/\r?\n/)[0];
if (firstMatch) {
@ -126,20 +132,35 @@ export function getShellConfig(): { shell: string; args: string[] } {
*/
export function sanitizeCommand(command: string): string {
if (process.platform !== "win32") return command;
return command.replace(/(\d*>>?) *\bNUL\b(?=\s|;|\||&|\)|$)/gi, "$1 /dev/null");
return command.replace(
/(\d*>>?) *\bNUL\b(?=\s|;|\||&|\)|$)/gi,
"$1 /dev/null",
);
}
export function getShellEnv(): NodeJS.ProcessEnv {
const binDir = getBinDir();
const pathKey = Object.keys(process.env).find((key) => key.toLowerCase() === "path") ?? "PATH";
const pathKey =
Object.keys(process.env).find((key) => key.toLowerCase() === "path") ??
"PATH";
const currentPath = process.env[pathKey] ?? "";
const pathEntries = currentPath.split(delimiter).filter(Boolean);
const hasBinDir = pathEntries.includes(binDir);
const updatedPath = hasBinDir ? currentPath : [binDir, currentPath].filter(Boolean).join(delimiter);
const updatedPath = hasBinDir
? currentPath
: [binDir, currentPath].filter(Boolean).join(delimiter);
return {
...process.env,
[pathKey]: updatedPath,
// Agent-run shells must not open an editor or credential prompt. Commands
// such as `git rebase --continue` should either complete or fail visibly.
GIT_TERMINAL_PROMPT: "0",
GIT_EDITOR: "true",
GIT_SEQUENCE_EDITOR: "true",
GIT_ASKPASS: "",
VISUAL: "true",
EDITOR: "true",
};
}

View file

@ -1,28 +1,61 @@
export const PROJECT_RAG_MCP_SERVER_NAME: string;
export function detectProjectRag(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): unknown;
export function resolveProjectRagBinary(env?: NodeJS.ProcessEnv): string | null;
export function resolveSiftBinary(env?: NodeJS.ProcessEnv): string | null;
export function resolveSiftWarmupRuntimeDirs(projectRoot: string): { searchCache: string; tmpDir: string };
export function ensureSiftRuntimeDirs(projectRoot: string): { searchCache: string; tmpDir: string };
export function buildSiftEnv(projectRoot: string, env: NodeJS.ProcessEnv): NodeJS.ProcessEnv;
export function resolveSiftSearchScope(projectRoot: string, scope?: string): string;
export function detectSift(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): unknown;
export function ensureSiftIndexWarmup(projectRoot: string, prefs: Record<string, unknown>, options?: Record<string, unknown>): Promise<unknown>;
export function resolveProjectRagBuildJobs(env?: NodeJS.ProcessEnv): number;
export function findProjectRagSourceDir(projectRoot: string, env?: NodeJS.ProcessEnv): string | null;
export function resolveProjectRagBinaryForProject(projectRoot: string, env?: NodeJS.ProcessEnv): string | null;
export function buildProjectRagMcpServerConfig(projectRoot?: string, env?: NodeJS.ProcessEnv): Record<string, unknown>;
export function buildProjectRagBinary(projectRoot: string, env?: NodeJS.ProcessEnv): boolean;
export function ensureProjectRagMcpConfig(projectRoot: string, env?: NodeJS.ProcessEnv): void;
export function resolveCodebaseIndexerBackendName(prefs: Record<string, unknown>): string;
export function resolveEffectiveCodebaseIndexerBackendName(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function getCodebaseIndexerBackend(prefsOrName: Record<string, unknown> | string): unknown;
export function detectCodebaseIndexer(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): unknown;
export function formatCodebaseIndexerStatus(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function buildCodeIntelligenceContextBlock(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function formatProjectRagStatus(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function formatSiftStatus(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export const PROJECT_RAG_CODEBASE_INDEXER_BACKEND: Record<string, unknown>;
export function resolveSiftWarmupRuntimeDirs(projectRoot: string): {
searchCache: string;
tmpDir: string;
};
export function ensureSiftRuntimeDirs(projectRoot: string): {
searchCache: string;
tmpDir: string;
};
export function buildSiftEnv(
projectRoot: string,
env: NodeJS.ProcessEnv,
): NodeJS.ProcessEnv;
export function resolveSiftSearchScope(
projectRoot: string,
scope?: string,
): string;
export function detectSift(
projectRoot: string,
prefs: Record<string, unknown>,
env?: NodeJS.ProcessEnv,
): unknown;
export function ensureSiftIndexWarmup(
projectRoot: string,
prefs: Record<string, unknown>,
options?: Record<string, unknown>,
): Promise<unknown>;
export function resolveCodebaseIndexerBackendName(
prefs: Record<string, unknown>,
): string;
export function resolveEffectiveCodebaseIndexerBackendName(
projectRoot: string,
prefs: Record<string, unknown>,
env?: NodeJS.ProcessEnv,
): string;
export function getCodebaseIndexerBackend(
prefsOrName: Record<string, unknown> | string,
): unknown;
export function detectCodebaseIndexer(
projectRoot: string,
prefs: Record<string, unknown>,
env?: NodeJS.ProcessEnv,
): unknown;
export function formatCodebaseIndexerStatus(
projectRoot: string,
prefs: Record<string, unknown>,
env?: NodeJS.ProcessEnv,
): string;
export function buildCodeIntelligenceContextBlock(
projectRoot: string,
prefs: Record<string, unknown>,
env?: NodeJS.ProcessEnv,
): string;
export function formatSiftStatus(
projectRoot: string,
prefs: Record<string, unknown>,
env?: NodeJS.ProcessEnv,
): string;
export const SIFT_CODEBASE_INDEXER_BACKEND: Record<string, unknown>;
export const NO_CODEBASE_INDEXER_BACKEND: Record<string, unknown>;
export const CODEBASE_INDEXER_BACKENDS: Record<string, unknown>;

File diff suppressed because it is too large Load diff

View file

@ -1,271 +1,357 @@
import { importExtensionModule, } from "@singularity-forge/pi-coding-agent";
import { importExtensionModule } from "@singularity-forge/pi-coding-agent";
import { workflowTemplateCommandDefinitions } from "./workflow-templates.js";
const TOP_LEVEL_SUBCOMMANDS = [
{ cmd: "help", desc: "Categorized command reference with descriptions" },
{ cmd: "next", desc: "Explicit step mode (same as /sf)" },
{
cmd: "autonomous",
desc: "Autonomous mode — research, plan, execute, commit, repeat",
},
{ cmd: "stop", desc: "Stop autonomous mode gracefully" },
{
cmd: "pause",
desc: "Pause autonomous mode (preserves state, /sf autonomous to resume)",
},
{ cmd: "status", desc: "Progress dashboard" },
{ cmd: "visualize", desc: "Open workflow visualizer" },
{ cmd: "queue", desc: "Queue and reorder future milestones" },
{ cmd: "quick", desc: "Execute a quick task without full planning overhead" },
{ cmd: "discuss", desc: "Discuss architecture and decisions" },
{ cmd: "capture", desc: "Fire-and-forget thought capture" },
{ cmd: "changelog", desc: "Show categorized release notes" },
{ cmd: "triage", desc: "Manually trigger triage of pending captures" },
{ cmd: "dispatch", desc: "Dispatch a specific phase directly" },
{ cmd: "history", desc: "View execution history" },
{ cmd: "undo", desc: "Revert last completed unit" },
{ cmd: "skip", desc: "Prevent a unit from auto-mode dispatch" },
{ cmd: "export", desc: "Export milestone or slice results" },
{ cmd: "cleanup", desc: "Remove merged branches or snapshots" },
{ cmd: "mode", desc: "Switch workflow mode (solo/team)" },
{ cmd: "prefs", desc: "Manage preferences" },
{ cmd: "config", desc: "Set API keys for external tools" },
{ cmd: "keys", desc: "API key manager" },
{ cmd: "hooks", desc: "Show configured hooks" },
{ cmd: "run-hook", desc: "Manually trigger a specific hook" },
{ cmd: "skill-health", desc: "Skill lifecycle dashboard" },
{ cmd: "doctor", desc: "Runtime health checks with auto-fix" },
{ cmd: "logs", desc: "Browse activity logs, debug logs, and metrics" },
{ cmd: "forensics", desc: "Examine execution logs" },
{ cmd: "init", desc: "Project init wizard" },
{ cmd: "setup", desc: "Global setup status and configuration" },
{ cmd: "migrate", desc: "Migrate a v1 .planning directory to .sf format" },
{ cmd: "remote", desc: "Control remote auto-mode" },
{ cmd: "steer", desc: "Hard-steer plan documents during execution" },
{ cmd: "inspect", desc: "Show SQLite DB diagnostics" },
{ cmd: "knowledge", desc: "Add persistent project knowledge" },
{
cmd: "new-milestone",
desc: "Create a milestone from a specification document",
},
{ cmd: "parallel", desc: "Parallel milestone orchestration" },
{ cmd: "park", desc: "Park a milestone" },
{ cmd: "unpark", desc: "Reactivate a parked milestone" },
{ cmd: "update", desc: "Update SF to the latest version" },
{ cmd: "start", desc: "Start a workflow template" },
{ cmd: "templates", desc: "List available workflow templates" },
{ cmd: "extensions", desc: "Manage extensions" },
{
cmd: "codebase",
desc: "Generate, refresh, and inspect the codebase map cache",
},
{
cmd: "scaffold",
desc: "Inspect or refresh ADR-021 versioned scaffold docs",
},
{ cmd: "help", desc: "Categorized command reference with descriptions" },
{ cmd: "next", desc: "Explicit step mode (same as /sf)" },
{
cmd: "autonomous",
desc: "Autonomous mode — research, plan, execute, commit, repeat",
},
{ cmd: "stop", desc: "Stop autonomous mode gracefully" },
{
cmd: "pause",
desc: "Pause autonomous mode (preserves state, /sf autonomous to resume)",
},
{ cmd: "status", desc: "Progress dashboard" },
{ cmd: "visualize", desc: "Open workflow visualizer" },
{ cmd: "queue", desc: "Queue and reorder future milestones" },
{ cmd: "quick", desc: "Execute a quick task without full planning overhead" },
{ cmd: "discuss", desc: "Discuss architecture and decisions" },
{ cmd: "capture", desc: "Fire-and-forget thought capture" },
{ cmd: "changelog", desc: "Show categorized release notes" },
{ cmd: "triage", desc: "Manually trigger triage of pending captures" },
{ cmd: "dispatch", desc: "Dispatch a specific phase directly" },
{ cmd: "history", desc: "View execution history" },
{ cmd: "undo", desc: "Revert last completed unit" },
{ cmd: "skip", desc: "Prevent a unit from auto-mode dispatch" },
{ cmd: "export", desc: "Export milestone or slice results" },
{ cmd: "cleanup", desc: "Remove merged branches or snapshots" },
{ cmd: "mode", desc: "Switch workflow mode (solo/team)" },
{ cmd: "prefs", desc: "Manage preferences" },
{ cmd: "config", desc: "Set API keys for external tools" },
{ cmd: "keys", desc: "API key manager" },
{ cmd: "hooks", desc: "Show configured hooks" },
{ cmd: "run-hook", desc: "Manually trigger a specific hook" },
{ cmd: "skill-health", desc: "Skill lifecycle dashboard" },
{ cmd: "doctor", desc: "Runtime health checks with auto-fix" },
{ cmd: "logs", desc: "Browse activity logs, debug logs, and metrics" },
{ cmd: "forensics", desc: "Examine execution logs" },
{ cmd: "init", desc: "Project init wizard" },
{ cmd: "setup", desc: "Global setup status and configuration" },
{ cmd: "migrate", desc: "Migrate a v1 .planning directory to .sf format" },
{ cmd: "remote", desc: "Control remote auto-mode" },
{ cmd: "steer", desc: "Hard-steer plan documents during execution" },
{ cmd: "inspect", desc: "Show SQLite DB diagnostics" },
{ cmd: "knowledge", desc: "Add persistent project knowledge" },
{
cmd: "new-milestone",
desc: "Create a milestone from a specification document",
},
{ cmd: "parallel", desc: "Parallel milestone orchestration" },
{ cmd: "park", desc: "Park a milestone" },
{ cmd: "unpark", desc: "Reactivate a parked milestone" },
{ cmd: "update", desc: "Update SF to the latest version" },
{ cmd: "start", desc: "Start a workflow template" },
{ cmd: "templates", desc: "List available workflow templates" },
{ cmd: "extensions", desc: "Manage extensions" },
{
cmd: "codebase",
desc: "Generate, refresh, and inspect the codebase map cache",
},
{
cmd: "scaffold",
desc: "Inspect or refresh ADR-021 versioned scaffold docs",
},
];
function filterStartsWith(partial, options, prefix = "") {
const normalizedPrefix = prefix.length > 0 ? `${prefix} ` : "";
return options
.filter((option) => option.cmd.startsWith(partial))
.map((option) => ({
value: `${normalizedPrefix}${option.cmd}`,
label: option.cmd,
description: option.desc,
}));
const normalizedPrefix = prefix.length > 0 ? `${prefix} ` : "";
return options
.filter((option) => option.cmd.startsWith(partial))
.map((option) => ({
value: `${normalizedPrefix}${option.cmd}`,
label: option.cmd,
description: option.desc,
}));
}
function getSfArgumentCompletions(prefix) {
const parts = prefix.trim().split(/\s+/);
if (parts.length <= 1) {
return filterStartsWith(parts[0] ?? "", TOP_LEVEL_SUBCOMMANDS);
}
const partial = parts[1] ?? "";
if ((parts[0] === "auto" || parts[0] === "autonomous") && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "--verbose", desc: "Show detailed execution output" },
{ cmd: "--debug", desc: "Enable debug logging" },
], parts[0]);
}
if (parts[0] === "next" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "--verbose", desc: "Show detailed step output" },
{ cmd: "--dry-run", desc: "Preview next step without executing" },
], "next");
}
if (parts[0] === "mode" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "global", desc: "Edit global workflow mode" },
{ cmd: "project", desc: "Edit project-specific workflow mode" },
], "mode");
}
if (parts[0] === "parallel" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "start", desc: "Start parallel milestone orchestration" },
{ cmd: "status", desc: "Show parallel worker statuses" },
{ cmd: "stop", desc: "Stop all parallel workers" },
{ cmd: "pause", desc: "Pause a specific worker" },
{ cmd: "resume", desc: "Resume a paused worker" },
{ cmd: "merge", desc: "Merge completed milestone branches" },
], "parallel");
}
if (parts[0] === "setup" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "llm", desc: "Configure LLM provider settings" },
{ cmd: "search", desc: "Configure web search provider" },
{ cmd: "remote", desc: "Configure remote integrations" },
{ cmd: "keys", desc: "Manage API keys" },
{ cmd: "prefs", desc: "Configure global preferences" },
], "setup");
}
if (parts[0] === "logs" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "debug", desc: "List or view debug log files" },
{ cmd: "tail", desc: "Show last N activity log summaries" },
{ cmd: "clear", desc: "Remove old activity and debug logs" },
], "logs");
}
if (parts[0] === "keys" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "list", desc: "Show key status dashboard" },
{ cmd: "add", desc: "Add a key for a provider" },
{ cmd: "remove", desc: "Remove a key" },
{ cmd: "test", desc: "Validate key(s) with API call" },
{ cmd: "rotate", desc: "Replace an existing key" },
{ cmd: "doctor", desc: "Health check all keys" },
], "keys");
}
if (parts[0] === "prefs" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "global", desc: "Edit global preferences file" },
{ cmd: "project", desc: "Edit project preferences file" },
{ cmd: "status", desc: "Show effective preferences" },
{ cmd: "wizard", desc: "Interactive preferences wizard" },
{ cmd: "setup", desc: "First-time preferences setup" },
{ cmd: "import-claude", desc: "Import settings from Claude Code" },
], "prefs");
}
if (parts[0] === "remote" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "slack", desc: "Configure Slack integration" },
{ cmd: "discord", desc: "Configure Discord integration" },
{ cmd: "status", desc: "Show remote connection status" },
{ cmd: "disconnect", desc: "Disconnect remote integrations" },
], "remote");
}
if (parts[0] === "history" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "--cost", desc: "Show cost breakdown per entry" },
{ cmd: "--phase", desc: "Filter by phase type" },
{ cmd: "--model", desc: "Filter by model used" },
{ cmd: "10", desc: "Show last 10 entries" },
{ cmd: "20", desc: "Show last 20 entries" },
{ cmd: "50", desc: "Show last 50 entries" },
], "history");
}
if (parts[0] === "export" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "--json", desc: "Export as JSON" },
{ cmd: "--markdown", desc: "Export as Markdown" },
{ cmd: "--html", desc: "Export as HTML" },
{ cmd: "--html --all", desc: "Export all milestones as HTML" },
], "export");
}
if (parts[0] === "cleanup" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "branches", desc: "Remove merged milestone branches" },
{ cmd: "snapshots", desc: "Remove old execution snapshots" },
], "cleanup");
}
if (parts[0] === "knowledge" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "rule", desc: "Add a project rule" },
{ cmd: "pattern", desc: "Add a code pattern" },
{ cmd: "lesson", desc: "Record a lesson learned" },
], "knowledge");
}
if (parts[0] === "start" && parts.length <= 2) {
return filterStartsWith(partial, [
...workflowTemplateCommandDefinitions(),
{ cmd: "resume", desc: "Resume an in-progress workflow" },
{ cmd: "--list", desc: "List all available templates" },
{ cmd: "--dry-run", desc: "Preview workflow without executing" },
], "start");
}
if (parts[0] === "templates" && parts.length <= 2) {
return filterStartsWith(partial, [{ cmd: "info", desc: "Show detailed template info" }], "templates");
}
if (parts[0] === "extensions" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "list", desc: "List all extensions and their status" },
{ cmd: "enable", desc: "Enable a disabled extension" },
{ cmd: "disable", desc: "Disable an extension" },
{ cmd: "info", desc: "Show extension details" },
], "extensions");
}
if (parts[0] === "codebase" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "generate", desc: "Generate or regenerate CODEBASE.md" },
{ cmd: "update", desc: "Refresh the CODEBASE.md cache immediately" },
{
cmd: "stats",
desc: "Show codebase-map coverage and generation time",
},
{
cmd: "rag",
desc: "Inspect optional project-rag code search backend",
},
{
cmd: "rag build",
desc: "Build vendored Rust project-rag and configure MCP",
},
{ cmd: "help", desc: "Show usage and subcommands" },
], "codebase");
}
if (parts[0] === "triage" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "--source", desc: "Triage source (captures|todo)" },
], "triage");
}
if (parts[0] === "triage" && parts[1] === "--source" && parts.length <= 3) {
return filterStartsWith(partial, [
{ cmd: "captures", desc: "Triage pending captures (default)" },
{ cmd: "todo", desc: "Triage repo-root TODO.md" },
], "triage --source");
}
if (parts[0] === "doctor" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "fix", desc: "Auto-fix detected issues" },
{ cmd: "heal", desc: "AI-driven deep healing" },
{ cmd: "audit", desc: "Run health audit without fixing" },
], "doctor");
}
if (parts[0] === "scaffold" && parts.length <= 2) {
return filterStartsWith(partial, [
{
cmd: "sync",
desc: "Refresh ADR-021 scaffold docs (drift report + apply pending upgrades)",
},
], "scaffold");
}
if (parts[0] === "dispatch" && parts.length <= 2) {
return filterStartsWith(partial, [
{ cmd: "research", desc: "Run research phase" },
{ cmd: "plan", desc: "Run planning phase" },
{ cmd: "execute", desc: "Run execution phase" },
{ cmd: "complete", desc: "Run completion phase" },
{ cmd: "reassess", desc: "Reassess current progress" },
{ cmd: "uat", desc: "Run user acceptance testing" },
{ cmd: "replan", desc: "Replan the current slice" },
], "dispatch");
}
return null;
const parts = prefix.trim().split(/\s+/);
if (parts.length <= 1) {
return filterStartsWith(parts[0] ?? "", TOP_LEVEL_SUBCOMMANDS);
}
const partial = parts[1] ?? "";
if ((parts[0] === "auto" || parts[0] === "autonomous") && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "--verbose", desc: "Show detailed execution output" },
{ cmd: "--debug", desc: "Enable debug logging" },
],
parts[0],
);
}
if (parts[0] === "next" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "--verbose", desc: "Show detailed step output" },
{ cmd: "--dry-run", desc: "Preview next step without executing" },
],
"next",
);
}
if (parts[0] === "mode" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "global", desc: "Edit global workflow mode" },
{ cmd: "project", desc: "Edit project-specific workflow mode" },
],
"mode",
);
}
if (parts[0] === "parallel" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "start", desc: "Start parallel milestone orchestration" },
{ cmd: "status", desc: "Show parallel worker statuses" },
{ cmd: "stop", desc: "Stop all parallel workers" },
{ cmd: "pause", desc: "Pause a specific worker" },
{ cmd: "resume", desc: "Resume a paused worker" },
{ cmd: "merge", desc: "Merge completed milestone branches" },
],
"parallel",
);
}
if (parts[0] === "setup" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "llm", desc: "Configure LLM provider settings" },
{ cmd: "search", desc: "Configure web search provider" },
{ cmd: "remote", desc: "Configure remote integrations" },
{ cmd: "keys", desc: "Manage API keys" },
{ cmd: "prefs", desc: "Configure global preferences" },
],
"setup",
);
}
if (parts[0] === "logs" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "debug", desc: "List or view debug log files" },
{ cmd: "tail", desc: "Show last N activity log summaries" },
{ cmd: "clear", desc: "Remove old activity and debug logs" },
],
"logs",
);
}
if (parts[0] === "keys" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "list", desc: "Show key status dashboard" },
{ cmd: "add", desc: "Add a key for a provider" },
{ cmd: "remove", desc: "Remove a key" },
{ cmd: "test", desc: "Validate key(s) with API call" },
{ cmd: "rotate", desc: "Replace an existing key" },
{ cmd: "doctor", desc: "Health check all keys" },
],
"keys",
);
}
if (parts[0] === "prefs" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "global", desc: "Edit global preferences file" },
{ cmd: "project", desc: "Edit project preferences file" },
{ cmd: "status", desc: "Show effective preferences" },
{ cmd: "wizard", desc: "Interactive preferences wizard" },
{ cmd: "setup", desc: "First-time preferences setup" },
{ cmd: "import-claude", desc: "Import settings from Claude Code" },
],
"prefs",
);
}
if (parts[0] === "remote" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "slack", desc: "Configure Slack integration" },
{ cmd: "discord", desc: "Configure Discord integration" },
{ cmd: "status", desc: "Show remote connection status" },
{ cmd: "disconnect", desc: "Disconnect remote integrations" },
],
"remote",
);
}
if (parts[0] === "history" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "--cost", desc: "Show cost breakdown per entry" },
{ cmd: "--phase", desc: "Filter by phase type" },
{ cmd: "--model", desc: "Filter by model used" },
{ cmd: "10", desc: "Show last 10 entries" },
{ cmd: "20", desc: "Show last 20 entries" },
{ cmd: "50", desc: "Show last 50 entries" },
],
"history",
);
}
if (parts[0] === "export" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "--json", desc: "Export as JSON" },
{ cmd: "--markdown", desc: "Export as Markdown" },
{ cmd: "--html", desc: "Export as HTML" },
{ cmd: "--html --all", desc: "Export all milestones as HTML" },
],
"export",
);
}
if (parts[0] === "cleanup" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "branches", desc: "Remove merged milestone branches" },
{ cmd: "snapshots", desc: "Remove old execution snapshots" },
],
"cleanup",
);
}
if (parts[0] === "knowledge" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "rule", desc: "Add a project rule" },
{ cmd: "pattern", desc: "Add a code pattern" },
{ cmd: "lesson", desc: "Record a lesson learned" },
],
"knowledge",
);
}
if (parts[0] === "start" && parts.length <= 2) {
return filterStartsWith(
partial,
[
...workflowTemplateCommandDefinitions(),
{ cmd: "resume", desc: "Resume an in-progress workflow" },
{ cmd: "--list", desc: "List all available templates" },
{ cmd: "--dry-run", desc: "Preview workflow without executing" },
],
"start",
);
}
if (parts[0] === "templates" && parts.length <= 2) {
return filterStartsWith(
partial,
[{ cmd: "info", desc: "Show detailed template info" }],
"templates",
);
}
if (parts[0] === "extensions" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "list", desc: "List all extensions and their status" },
{ cmd: "enable", desc: "Enable a disabled extension" },
{ cmd: "disable", desc: "Disable an extension" },
{ cmd: "info", desc: "Show extension details" },
],
"extensions",
);
}
if (parts[0] === "codebase" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "generate", desc: "Generate or regenerate CODEBASE.md" },
{ cmd: "update", desc: "Refresh the CODEBASE.md cache immediately" },
{
cmd: "stats",
desc: "Show codebase-map coverage and generation time",
},
{
cmd: "indexer",
desc: "Inspect Sift code search backend",
},
{ cmd: "help", desc: "Show usage and subcommands" },
],
"codebase",
);
}
if (parts[0] === "triage" && parts.length <= 2) {
return filterStartsWith(
partial,
[{ cmd: "--source", desc: "Triage source (captures|todo)" }],
"triage",
);
}
if (parts[0] === "triage" && parts[1] === "--source" && parts.length <= 3) {
return filterStartsWith(
partial,
[
{ cmd: "captures", desc: "Triage pending captures (default)" },
{ cmd: "todo", desc: "Triage repo-root TODO.md" },
],
"triage --source",
);
}
if (parts[0] === "doctor" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "fix", desc: "Auto-fix detected issues" },
{ cmd: "heal", desc: "AI-driven deep healing" },
{ cmd: "audit", desc: "Run health audit without fixing" },
],
"doctor",
);
}
if (parts[0] === "scaffold" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{
cmd: "sync",
desc: "Refresh ADR-021 scaffold docs (drift report + apply pending upgrades)",
},
],
"scaffold",
);
}
if (parts[0] === "dispatch" && parts.length <= 2) {
return filterStartsWith(
partial,
[
{ cmd: "research", desc: "Run research phase" },
{ cmd: "plan", desc: "Run planning phase" },
{ cmd: "execute", desc: "Run execution phase" },
{ cmd: "complete", desc: "Run completion phase" },
{ cmd: "reassess", desc: "Reassess current progress" },
{ cmd: "uat", desc: "Run user acceptance testing" },
{ cmd: "replan", desc: "Replan the current slice" },
],
"dispatch",
);
}
return null;
}
export function registerLazySFCommand(pi) {
pi.registerCommand("sf", {
description: "SF — Singularity Forge",
getArgumentCompletions: getSfArgumentCompletions,
handler: async (args, ctx) => {
const { handleSFCommand } = await importExtensionModule(import.meta.url, "./commands.js");
await handleSFCommand(args, ctx, pi);
},
});
pi.registerCommand("sf", {
description: "SF — Singularity Forge",
getArgumentCompletions: getSfArgumentCompletions,
handler: async (args, ctx) => {
const { handleSFCommand } = await importExtensionModule(
import.meta.url,
"./commands.js",
);
await handleSFCommand(args, ctx, pi);
},
});
}

View file

@ -2,176 +2,150 @@
* SF Command /sf codebase
*
* Generate and manage the codebase map (.sf/CODEBASE.md).
* Subcommands: generate, update, stats, indexer, rag, help
* Subcommands: generate, update, stats, indexer, help
*/
import { buildProjectRagBinary, ensureProjectRagMcpConfig, formatCodebaseIndexerStatus, } from "./code-intelligence.js";
import { generateCodebaseMap, getCodebaseMapStats, readCodebaseMap, updateCodebaseMap, writeCodebaseMap, } from "./codebase-generator.js";
import { formatCodebaseIndexerStatus } from "./code-intelligence.js";
import {
generateCodebaseMap,
getCodebaseMapStats,
readCodebaseMap,
updateCodebaseMap,
writeCodebaseMap,
} from "./codebase-generator.js";
import { loadEffectiveSFPreferences } from "./preferences.js";
const USAGE = "Usage: /sf codebase [generate|update|stats|indexer|rag]\n\n" +
" generate [--max-files N] [--collapse-threshold N] — Generate or regenerate CODEBASE.md\n" +
" update [--max-files N] [--collapse-threshold N] — Refresh the CODEBASE.md cache immediately\n" +
" stats — Show file count, coverage, and generation time\n" +
" indexer [status] — Inspect selected optional codebase-indexer backend\n" +
" rag [status|init|build] — Inspect selected backend, or build/configure project-rag MCP\n" +
" help — Show this help\n\n" +
"With no subcommand, shows stats if a map exists or help if not.\n" +
"SF also refreshes CODEBASE.md automatically before prompt injection and after completed units when tracked files change.\n\n" +
"Configure defaults via preferences.md:\n" +
" codebase:\n" +
' exclude_patterns: ["docs/", "fixtures/"]\n' +
" max_files: 1000\n" +
" collapse_threshold: 15\n" +
" indexer_backend: sift # projectRag | sift | none; omit for auto-detect\n" +
" project_rag: auto # auto | off | required\n" +
" project_rag_auto_index: true";
const USAGE =
"Usage: /sf codebase [generate|update|stats|indexer]\n\n" +
" generate [--max-files N] [--collapse-threshold N] — Generate or regenerate CODEBASE.md\n" +
" update [--max-files N] [--collapse-threshold N] — Refresh the CODEBASE.md cache immediately\n" +
" stats — Show file count, coverage, and generation time\n" +
" indexer [status] — Inspect Sift codebase-indexer status\n" +
" help — Show this help\n\n" +
"With no subcommand, shows stats if a map exists or help if not.\n" +
"SF also refreshes CODEBASE.md automatically before prompt injection and after completed units when tracked files change.\n\n" +
"Configure defaults via preferences.md:\n" +
" codebase:\n" +
' exclude_patterns: ["docs/", "fixtures/"]\n' +
" max_files: 1000\n" +
" collapse_threshold: 15\n" +
" indexer_backend: sift # sift | none; omit for auto-detect";
export async function handleCodebase(args, ctx, _pi) {
const basePath = process.cwd();
const parts = args.trim().split(/\s+/);
const sub = parts[0] ?? "";
switch (sub) {
case "generate": {
const options = resolveCodebaseOptions(args, ctx);
if (options === false)
return; // validation failed, message already shown
const existing = readCodebaseMap(basePath);
const existingDescriptions = existing
? (await import("./codebase-generator.js")).parseCodebaseMap(existing)
: undefined;
const result = generateCodebaseMap(basePath, options, existingDescriptions);
if (result.fileCount === 0) {
ctx.ui.notify("Codebase map generated with 0 files.\n" +
"Is this a git repository? Run 'git ls-files' to verify.", "warning");
return;
}
const outPath = writeCodebaseMap(basePath, result.content);
ctx.ui.notify(`Codebase map generated: ${result.fileCount} files\n` +
`Written to: ${outPath}` +
(result.truncated
? `\n⚠ Truncated — increase --max-files to include all files`
: ""), "success");
return;
}
case "update": {
const existing = readCodebaseMap(basePath);
if (!existing) {
ctx.ui.notify("No codebase map found. Run /sf codebase generate to create one.", "warning");
return;
}
const options = resolveCodebaseOptions(args, ctx);
if (options === false)
return;
const result = updateCodebaseMap(basePath, options);
writeCodebaseMap(basePath, result.content);
ctx.ui.notify(`Codebase map updated: ${result.fileCount} files\n` +
` Added: ${result.added} | Removed: ${result.removed} | Unchanged: ${result.unchanged}` +
(result.truncated
? `\n⚠ Truncated — increase --max-files to include all files`
: ""), "success");
return;
}
case "stats": {
showStats(basePath, ctx);
return;
}
case "indexer": {
const action = (parts[1] ?? "status").toLowerCase();
const prefs = loadEffectiveSFPreferences()?.preferences?.codebase;
if (action === "status") {
ctx.ui.notify(formatCodebaseIndexerStatus(basePath, prefs), "info");
return;
}
ctx.ui.notify(`Unknown /sf codebase indexer action "${action}". Use status.`, "warning");
return;
}
case "rag": {
const action = (parts[1] ?? "status").toLowerCase();
const prefs = loadEffectiveSFPreferences()?.preferences?.codebase;
if (action === "status") {
ctx.ui.notify(formatCodebaseIndexerStatus(basePath, prefs), "info");
return;
}
if (action === "init") {
try {
const result = ensureProjectRagMcpConfig(basePath);
ctx.ui.notify([
result.status === "created"
? "Created project-rag MCP config."
: result.status === "updated"
? "Updated project-rag MCP config."
: "Project-rag MCP config is already up to date.",
"",
`Server: ${result.serverName}`,
`Config: ${result.configPath}`,
"",
"Restart the MCP client session so the new server and tools are loaded.",
].join("\n"), "success");
}
catch (err) {
ctx.ui.notify(`Could not initialize project-rag MCP config: ${err instanceof Error ? err.message : String(err)}`, "warning");
}
return;
}
if (action === "build") {
try {
const build = buildProjectRagBinary(basePath);
const result = ensureProjectRagMcpConfig(basePath, {
...process.env,
SF_PROJECT_RAG_BIN: build.binaryPath,
});
ctx.ui.notify([
"Built project-rag release binary.",
"",
`Source: ${build.sourceDir}`,
`Binary: ${build.binaryPath}`,
`Cargo jobs: ${build.buildJobs} (override with SF_PROJECT_RAG_BUILD_JOBS)`,
`MCP config: ${result.configPath} (${result.status})`,
"",
"Restart the MCP client session so the new server and tools are loaded.",
].join("\n"), "success");
}
catch (err) {
ctx.ui.notify(`Could not build project-rag: ${err instanceof Error ? err.message : String(err)}`, "warning");
}
return;
}
ctx.ui.notify(`Unknown /sf codebase rag action "${action}". Use status, init, or build.`, "warning");
return;
}
case "help":
ctx.ui.notify(USAGE, "info");
return;
case "": {
// Safe default: show stats if map exists, help if not
const existing = readCodebaseMap(basePath);
if (existing) {
showStats(basePath, ctx);
}
else {
ctx.ui.notify(USAGE, "info");
}
return;
}
default:
ctx.ui.notify(`Unknown subcommand "${sub}".\n\n${USAGE}`, "warning");
}
const basePath = process.cwd();
const parts = args.trim().split(/\s+/);
const sub = parts[0] ?? "";
switch (sub) {
case "generate": {
const options = resolveCodebaseOptions(args, ctx);
if (options === false) return; // validation failed, message already shown
const existing = readCodebaseMap(basePath);
const existingDescriptions = existing
? (await import("./codebase-generator.js")).parseCodebaseMap(existing)
: undefined;
const result = generateCodebaseMap(
basePath,
options,
existingDescriptions,
);
if (result.fileCount === 0) {
ctx.ui.notify(
"Codebase map generated with 0 files.\n" +
"Is this a git repository? Run 'git ls-files' to verify.",
"warning",
);
return;
}
const outPath = writeCodebaseMap(basePath, result.content);
ctx.ui.notify(
`Codebase map generated: ${result.fileCount} files\n` +
`Written to: ${outPath}` +
(result.truncated
? `\n⚠ Truncated — increase --max-files to include all files`
: ""),
"success",
);
return;
}
case "update": {
const existing = readCodebaseMap(basePath);
if (!existing) {
ctx.ui.notify(
"No codebase map found. Run /sf codebase generate to create one.",
"warning",
);
return;
}
const options = resolveCodebaseOptions(args, ctx);
if (options === false) return;
const result = updateCodebaseMap(basePath, options);
writeCodebaseMap(basePath, result.content);
ctx.ui.notify(
`Codebase map updated: ${result.fileCount} files\n` +
` Added: ${result.added} | Removed: ${result.removed} | Unchanged: ${result.unchanged}` +
(result.truncated
? `\n⚠ Truncated — increase --max-files to include all files`
: ""),
"success",
);
return;
}
case "stats": {
showStats(basePath, ctx);
return;
}
case "indexer": {
const action = (parts[1] ?? "status").toLowerCase();
const prefs = loadEffectiveSFPreferences()?.preferences?.codebase;
if (action === "status") {
ctx.ui.notify(formatCodebaseIndexerStatus(basePath, prefs), "info");
return;
}
ctx.ui.notify(
`Unknown /sf codebase indexer action "${action}". Use status.`,
"warning",
);
return;
}
case "help":
ctx.ui.notify(USAGE, "info");
return;
case "": {
// Safe default: show stats if map exists, help if not
const existing = readCodebaseMap(basePath);
if (existing) {
showStats(basePath, ctx);
} else {
ctx.ui.notify(USAGE, "info");
}
return;
}
default:
ctx.ui.notify(`Unknown subcommand "${sub}".\n\n${USAGE}`, "warning");
}
}
function showStats(basePath, ctx) {
const stats = getCodebaseMapStats(basePath);
if (!stats.exists) {
ctx.ui.notify("No codebase map found. Run /sf codebase generate to create one.", "info");
return;
}
const coverage = stats.fileCount > 0
? Math.round((stats.describedCount / stats.fileCount) * 100)
: 0;
ctx.ui.notify(`Codebase Map Stats:\n` +
` Files: ${stats.fileCount}\n` +
` Described: ${stats.describedCount} (${coverage}%)\n` +
` Undescribed: ${stats.undescribedCount}\n` +
` Generated: ${stats.generatedAt ?? "unknown"}\n\n` +
(stats.undescribedCount > 0
? `Tip: Auto-refresh keeps the cache current, but /sf codebase update forces an immediate refresh.`
: `Coverage is complete.`), "info");
const stats = getCodebaseMapStats(basePath);
if (!stats.exists) {
ctx.ui.notify(
"No codebase map found. Run /sf codebase generate to create one.",
"info",
);
return;
}
const coverage =
stats.fileCount > 0
? Math.round((stats.describedCount / stats.fileCount) * 100)
: 0;
ctx.ui.notify(
`Codebase Map Stats:\n` +
` Files: ${stats.fileCount}\n` +
` Described: ${stats.describedCount} (${coverage}%)\n` +
` Undescribed: ${stats.undescribedCount}\n` +
` Generated: ${stats.generatedAt ?? "unknown"}\n\n` +
(stats.undescribedCount > 0
? `Tip: Auto-refresh keeps the cache current, but /sf codebase update forces an immediate refresh.`
: `Coverage is complete.`),
"info",
);
}
/**
* Resolve codebase map options by merging preferences with CLI flags.
@ -179,39 +153,45 @@ function showStats(basePath, ctx) {
* Returns false if validation failed (error already shown to user).
*/
function resolveCodebaseOptions(args, ctx) {
// Load preferences defaults
const prefs = loadEffectiveSFPreferences()?.preferences?.codebase;
// Parse CLI flags
const maxFilesStr = extractFlag(args, "--max-files");
const collapseStr = extractFlag(args, "--collapse-threshold");
// Validate --max-files
let maxFiles;
if (maxFilesStr) {
maxFiles = parseInt(maxFilesStr, 10);
if (Number.isNaN(maxFiles) || maxFiles < 1) {
ctx.ui.notify("--max-files must be a positive integer (e.g. --max-files 200).", "warning");
return false;
}
}
// Validate --collapse-threshold
let collapseThreshold;
if (collapseStr) {
collapseThreshold = parseInt(collapseStr, 10);
if (Number.isNaN(collapseThreshold) || collapseThreshold < 1) {
ctx.ui.notify("--collapse-threshold must be a positive integer (e.g. --collapse-threshold 15).", "warning");
return false;
}
}
return {
// CLI flags override preferences
maxFiles: maxFiles ?? prefs?.max_files,
collapseThreshold: collapseThreshold ?? prefs?.collapse_threshold,
excludePatterns: prefs?.exclude_patterns,
};
// Load preferences defaults
const prefs = loadEffectiveSFPreferences()?.preferences?.codebase;
// Parse CLI flags
const maxFilesStr = extractFlag(args, "--max-files");
const collapseStr = extractFlag(args, "--collapse-threshold");
// Validate --max-files
let maxFiles;
if (maxFilesStr) {
maxFiles = parseInt(maxFilesStr, 10);
if (Number.isNaN(maxFiles) || maxFiles < 1) {
ctx.ui.notify(
"--max-files must be a positive integer (e.g. --max-files 200).",
"warning",
);
return false;
}
}
// Validate --collapse-threshold
let collapseThreshold;
if (collapseStr) {
collapseThreshold = parseInt(collapseStr, 10);
if (Number.isNaN(collapseThreshold) || collapseThreshold < 1) {
ctx.ui.notify(
"--collapse-threshold must be a positive integer (e.g. --collapse-threshold 15).",
"warning",
);
return false;
}
}
return {
// CLI flags override preferences
maxFiles: maxFiles ?? prefs?.max_files,
collapseThreshold: collapseThreshold ?? prefs?.collapse_threshold,
excludePatterns: prefs?.exclude_patterns,
};
}
function extractFlag(args, flag) {
const escaped = flag.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
const regex = new RegExp(`${escaped}[=\\s]+(\\S+)`);
const match = args.match(regex);
return match?.[1];
const escaped = flag.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
const regex = new RegExp(`${escaped}[=\\s]+(\\S+)`);
const match = args.match(regex);
return match?.[1];
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -171,18 +171,12 @@ Setting `prefer_skills: []` does **not** disable skill discovery — it just mea
- `skip_reassess`: boolean — force-disable roadmap reassessment even if `reassess_after_slice` is enabled. Default: `false`.
- `skip_slice_research`: boolean — skip per-slice research. Default: `false`.
- `codebase`: configures `.sf/CODEBASE.md` and the optional code-intelligence backend. Keys:
- `codebase`: configures fallback `.sf/CODEBASE.md` and the optional Sift code-intelligence backend. Keys:
- `exclude_patterns`: string[] — extra file or directory patterns to omit from CODEBASE.md.
- `max_files`: number — maximum files to include in CODEBASE.md. Default: `500`.
- `collapse_threshold`: number — files-per-directory threshold before collapsing a directory summary. Default: `20`.
- `indexer_backend`: `"projectRag"`, `"sift"`, or `"none"` — codebase-indexer backend used for prompt guidance and `/sf codebase indexer status`. Default: use Sift when it is on `PATH`; set `projectRag` explicitly to use the MCP RAG backend.
- `project_rag`: `"auto"`, `"off"`, or `"required"` — use Brainwires/project-rag MCP search when configured. Default: `"auto"`.
- `project_rag_server`: string — explicit MCP server name when the server cannot be detected from command or args.
- `project_rag_auto_index`: boolean — whether agents should prefer indexing before querying a configured Project RAG backend. Default: `true`.
- `/sf codebase indexer status` reports the selected backend status. For `sift`, install `rupurt/sift` on `PATH` or set `SIFT_PATH`.
- `/sf codebase rag status` reports whether the Rust backend is actually operational.
- `/sf codebase rag init` writes a `.mcp.json` entry when a `project-rag` binary is available.
- `/sf codebase rag build` builds vendored Brainwires/project-rag from `vendor/project-rag` (or `SF_PROJECT_RAG_SOURCE`) with `cargo build --release`, then writes the MCP config. The build defaults to `CARGO_BUILD_JOBS=2` so it does not saturate the workstation; override with `SF_PROJECT_RAG_BUILD_JOBS`.
- `indexer_backend`: `"sift"` or `"none"` — codebase-indexer backend used for prompt guidance and `/sf codebase indexer status`. Default: `"sift"`.
- `/sf codebase indexer status` reports Sift status. Install `rupurt/sift` on `PATH` or set `SIFT_PATH`.
- `remote_questions`: route interactive questions to Slack/Discord for headless auto-mode. Keys:
- `channel`: `"slack"` or `"discord"` — channel type.

View file

@ -2,13 +2,17 @@
* Shared git constants used across git-service and native-git-bridge.
*/
/**
* Environment overlay suppressing interactive git prompts and git-svn noise.
* Set GIT_TERMINAL_PROMPT=0 to disable credential prompt, LC_ALL=C for English output.
* Environment overlay suppressing interactive git prompts, editors, and git-svn noise.
* Set LC_ALL=C for English output so stderr string checks work across locales.
*/
export const GIT_NO_PROMPT_ENV = {
...process.env,
GIT_TERMINAL_PROMPT: "0",
GIT_ASKPASS: "",
GIT_SVN_ID: "",
LC_ALL: "C", // force English git output so stderr string checks work on all locales (#1997)
...process.env,
GIT_TERMINAL_PROMPT: "0",
GIT_EDITOR: "true",
GIT_SEQUENCE_EDITOR: "true",
GIT_ASKPASS: "",
VISUAL: "true",
EDITOR: "true",
GIT_SVN_ID: "",
LC_ALL: "C", // force English git output so stderr string checks work on all locales (#1997)
};

File diff suppressed because it is too large Load diff

View file

@ -76,7 +76,7 @@ Before anything else, form a diagnosis: What is the core challenge? What is brok
- **Measure coverage**: find untested critical paths
- **Scan for dead code, stubs, and commented-out features** — abandoned attempts are signals
- **Discover needed skills**: identify repo languages, frameworks, data stores, external services, build tools, and domain-specific competencies. Check installed skills first; record installed, missing, and potentially useful skills in `.sf/CODEBASE.md` and `.sf/PM-STRATEGY.md`.
- **Use code intelligence**: start with `.sf/CODEBASE.md`, in-process `grep`/`find`/`ls`, and `lsp` for broad orientation. Use `codebase_search` or `sift_search` only with a scoped path and only when the `PROJECT CODE INTELLIGENCE` block says Sift is healthy enough for this repo; if Sift is degraded, slow, empty, or timing out, keep using grep/find/ls and direct reads. Use Project RAG tools first for broad retrieval if Project RAG is configured.
- **Use code intelligence**: start with in-process `grep`/`find`/`ls` and `lsp` for broad orientation. Use scoped `codebase_search` or `sift_search` as the live code index when the `PROJECT CODE INTELLIGENCE` block says Sift is healthy enough for this repo. Use `.sf/CODEBASE.md` only as fallback context when Sift is unavailable, cold, degraded, or explicitly needed as a generated overview. If Sift is degraded, slow, empty, or timing out, keep using grep/find/ls, lsp, direct reads, and fallback CODEBASE context.
- Use in-process `grep`, `find`, `ls`, and `lsp` before shelling out. Fall back to shell `rg`, `find`, `ast-grep`, or `ls -la` only when the native/in-process tool surface is insufficient.
### Step 2: Check library and ecosystem facts
@ -311,7 +311,7 @@ After writing final context and roadmap, say exactly: "Milestone {{milestoneId}}
- **Preserve the specification's terminology** — don't paraphrase domain-specific language
- **Document assumptions** — every judgment call gets noted in CONTEXT.md under "Assumptions" with reasoning
- **Investigate thoroughly** — scout codebase, check library docs, web search. Same rigor as interactive mode.
- **Build project knowledge first** — update `.sf/CODEBASE.md` with stack signals, critical paths, verification commands, skill needs, file descriptions, and unresolved gaps before writing context.
- **Build project knowledge first** — use Sift/grep/lsp evidence to identify stack signals, critical paths, verification commands, skill needs, file descriptions, and unresolved gaps before writing context. Update `.sf/CODEBASE.md` only when you need a refreshed durable fallback snapshot.
- **Do focused research** — identify table stakes, domain standards, omissions, scope traps. Same rigor as interactive mode.
- **Use proper tools**`sf_plan_milestone` for roadmaps, `sf_decision_save` for decisions, `sf_milestone_generate_id` for IDs
- **Print artifacts in chat** — requirements table, roadmap preview, depth summary. The TUI scrollback is the user's audit trail.

View file

@ -34,7 +34,7 @@ After reflection is confirmed, decide the approach based on the actual scope —
Before asking your first question, do a mandatory investigation pass. This is not optional.
1. **Scout the codebase** — start with in-process `grep`, `find`, `ls`, `.sf/CODEBASE.md`, and `lsp` for broad orientation. Use `codebase_search` or `sift_search` only with a scoped path and only when the `PROJECT CODE INTELLIGENCE` block says Sift is healthy enough for this repo; if Sift is degraded, slow, empty, or timing out, keep using grep/find/ls and direct reads. Use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
1. **Scout the codebase** — start with in-process `grep`, `find`, `ls`, and `lsp` for broad orientation. Use scoped `codebase_search` or `sift_search` as the live code index when the `PROJECT CODE INTELLIGENCE` block says Sift is healthy enough for this repo. Use `.sf/CODEBASE.md` only as durable fallback context when Sift is unavailable, cold, degraded, or explicitly needed as a generated overview. If Sift is degraded, slow, empty, or timing out, keep using grep/find/ls, lsp, direct reads, and fallback CODEBASE context. Use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
2. **Check library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) as the default for any GitHub-hosted library or framework the user mentioned. Fall back to `resolve_library` / `get_library_docs` (Context7) for npm/pypi/crates packages DeepWiki doesn't have. **Context7 free tier is capped at 1000 req/month — spend those on cases DeepWiki can't cover.** Get current facts about capabilities, constraints, API shapes, version-specific behavior.
3. **Web search**`search-the-web` if the domain is unfamiliar, if you need current best practices, or if the user referenced external services/APIs you need facts about. Use `fetch_page` for full content when snippets aren't enough.

View file

@ -15,8 +15,7 @@ Apply `pm-planning` skill thinking throughout: use Working Backwards to anchor o
### Before your first question round
Do a lightweight targeted investigation so your questions are grounded in reality:
- Scout the codebase: start with in-process `grep`, `find`, `ls`, `.sf/CODEBASE.md`, and `lsp` for broad orientation. Use `codebase_search` or `sift_search` only with a scoped path and only when Sift is healthy for this repo; if Sift is degraded, slow, empty, or timing out, keep using grep/find/ls and direct reads. Use `scout` for broad unfamiliar areas that need a separate explorer.
- If the `PROJECT CODE INTELLIGENCE` block says Project RAG is configured, use its MCP search tools for broad concept, symbol, schema, and git-history lookup before manually reading files
- Scout the codebase: start with in-process `grep`, `find`, `ls`, and `lsp` for broad orientation. Use scoped `codebase_search` or `sift_search` as the live code index when Sift is healthy for this repo. Use `.sf/CODEBASE.md` only as durable fallback context when Sift is unavailable, cold, degraded, or explicitly needed as a generated overview. If Sift is degraded, slow, empty, or timing out, keep using grep/find/ls, lsp, direct reads, and fallback CODEBASE context. Use `scout` for broad unfamiliar areas that need a separate explorer.
- Check the roadmap context above (if present) to understand what surrounds this milestone
- **Library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) for any GitHub-hosted library. Fall back to `resolve_library` / `get_library_docs` (Context7) only when DeepWiki doesn't have it (Context7 is capped at 1000 req/month free tier).
- Identify the 35 biggest behavioural and architectural unknowns: things where the user's answer will materially change what gets built

View file

@ -11,7 +11,7 @@ Your goal is **not** to center the discussion on tech stack trivia, naming conve
### Before your first question round
Do a lightweight targeted investigation so your questions are grounded in reality:
- Scout the codebase: start with in-process `grep`, `find`, `ls`, `.sf/CODEBASE.md`, and `lsp` for broad orientation. Use `codebase_search` or `sift_search` only with a scoped path and only when Sift is healthy for this repo; if Sift is degraded, slow, empty, or timing out, keep using grep/find/ls and direct reads. Use `scout` for broad unfamiliar areas that need a separate explorer.
- Scout the codebase: start with in-process `grep`, `find`, `ls`, and `lsp` for broad orientation. Use scoped `codebase_search` or `sift_search` as the live code index when Sift is healthy for this repo. Use `.sf/CODEBASE.md` only as durable fallback context when Sift is unavailable, cold, degraded, or explicitly needed as a generated overview. If Sift is degraded, slow, empty, or timing out, keep using grep/find/ls, lsp, direct reads, and fallback CODEBASE context. Use `scout` for broad unfamiliar areas that need a separate explorer.
- Check the roadmap context above to understand what surrounds this slice — what comes before, what depends on it
- **Library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) for any GitHub-hosted library. Fall back to `resolve_library` / `get_library_docs` (Context7) only when DeepWiki doesn't have it (Context7 is capped at 1000 req/month free tier).
- Identify the 35 biggest behavioural unknowns: things where the user's answer will materially change what gets built

View file

@ -26,7 +26,7 @@ Never fabricate or simulate user input during this discussion. Never generate fa
- Check library docs **DeepWiki first** (`ask_question` / `read_wiki_structure` / `read_wiki_contents`) for any GitHub-hosted library or framework — AI-indexed, no free-tier cap. Fall back to Context7 (`resolve_library` / `get_library_docs`) for npm/pypi/crates packages DeepWiki doesn't cover. Context7 free tier is 1000 req/month — don't spend those on cases DeepWiki covers.
- Do web searches (`search-the-web`) to verify the landscape — what solutions exist, what's changed recently, what's the current best practice. Use `freshness` for recency-sensitive queries, `domain` to target specific sites. Use `fetch_page` to read the full content of promising URLs when snippets aren't enough. **Budget:** You have a limited number of web searches per turn (typically 3-5). Prefer DeepWiki → Context7 → web search for docs; use `search_and_read` for one-shot topic research. Do NOT repeat the same or similar queries. Distribute searches across turns rather than clustering them.
- Scout the codebase: start with in-process `grep`, `find`, `ls`, `.sf/CODEBASE.md`, and `lsp` for broad orientation. Use `codebase_search` or `sift_search` only with a scoped path and only when Sift is healthy for this repo; if Sift is degraded, slow, empty, or timing out, keep using grep/find/ls and direct reads. Use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
- Scout the codebase: start with in-process `grep`, `find`, `ls`, and `lsp` for broad orientation. Use scoped `codebase_search` or `sift_search` as the live code index when Sift is healthy for this repo. Use `.sf/CODEBASE.md` only as durable fallback context when Sift is unavailable, cold, degraded, or explicitly needed as a generated overview. If Sift is degraded, slow, empty, or timing out, keep using grep/find/ls, lsp, direct reads, and fallback CODEBASE context. Use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
Don't go deep — just enough that your next question reflects what's actually true rather than what you assume.

View file

@ -76,7 +76,7 @@ Titles live inside file content (headings, frontmatter), not in file or director
REQUIREMENTS.md (requirement contract - tracks active/validated/deferred/out-of-scope)
DECISIONS.md (append-only register of architectural and pattern decisions)
KNOWLEDGE.md (append-only register of project-specific rules, patterns, and lessons learned)
CODEBASE.md (generated codebase map cache — auto-refreshed when tracked files change)
CODEBASE.md (generated fallback codebase map cache — auto-refreshed when tracked files change)
OVERRIDES.md (user-issued overrides that supersede plan content via /sf steer)
QUEUE.md (append-only log of queued milestones via /sf queue)
STATE.md
@ -119,7 +119,7 @@ In all modes, slices commit sequentially on the active branch; there are no per-
- **REQUIREMENTS.md** tracks the requirement contract — requirements move between Active, Validated, Deferred, Blocked, and Out of Scope as slices prove or invalidate them. Update at slice completion when evidence supports a status change.
- **DECISIONS.md** is an append-only register of architectural and pattern decisions - read it during planning/research, append to it during execution when a meaningful decision is made
- **KNOWLEDGE.md** is an append-only register of project-specific rules, patterns, and lessons learned. Read it at the start of every unit. Append to it when you discover a recurring issue, a non-obvious pattern, or a rule that future agents should follow.
- **CODEBASE.md** is a generated structural cache of the tracked repository. SF auto-refreshes it when tracked files change and injects it into system context when available. Use `/sf codebase update` only when you need to force an immediate refresh.
- **CODEBASE.md** is a generated fallback snapshot of the tracked repository. SF may inject it when available, but healthy Sift is the preferred live code index. Use CODEBASE only when Sift is unavailable, cold, degraded, or when you need a durable overview. Use `/sf codebase update` only when you need to force an immediate refresh.
- **CONTEXT.md** files (milestone or slice level) capture the brief — scope, goals, constraints, and key decisions from discussion. When present, they are the authoritative source for what a milestone or slice is trying to achieve. Read them before planning or executing.
- **Milestones** are major project phases (M001, M002, ...)
- **Slices** are demoable vertical increments (S01, S02, ...) ordered by risk. After each slice completes, the roadmap is reassessed before the next slice begins.
@ -147,7 +147,7 @@ Templates showing the expected format for each artifact type are in:
- `/sf status` - progress dashboard overlay
- `/sf queue` - queue future milestones (safe while auto-mode is running)
- `/sf quick <task>` - quick task with SF guarantees (atomic commits, state tracking) but no milestone ceremony
- `/sf codebase [generate|update|stats|rag]` - manage `.sf/CODEBASE.md` and optional code search
- `/sf codebase [generate|update|stats|indexer]` - manage fallback `.sf/CODEBASE.md` and Sift code search
- `{{shortcutDashboard}}` - toggle dashboard overlay
- `{{shortcutShell}}` - show shell processes
@ -161,7 +161,7 @@ Templates showing the expected format for each artifact type are in:
**Code navigation:** Use `lsp` for definition, type_definition, implementation, references, incoming_calls, outgoing_calls, hover, signature, symbols, rename, code_actions, format, and diagnostics. Falls back gracefully if no server is available. Never `grep` for a symbol definition when `lsp` can resolve it semantically. Never shell out to prettier/rustfmt/gofmt when `lsp format` is available. After editing code, use `lsp diagnostics` to verify no type errors were introduced.
**Codebase exploration:** Start broad orientation with in-process `grep`, `find`, `ls`, `.sf/CODEBASE.md`, and `lsp`. Use `codebase_search` for conceptual, behavioral, or architectural discovery only after choosing a narrow scope and checking the `PROJECT CODE INTELLIGENCE` block; if Sift is degraded, slow, empty, or timing out, keep using grep/find/ls and direct reads. For Sift-specific features — explicit strategy selection or planner configuration — use `sift_search` with a scoped `path`. Strategy guide: `bm25` (fast lexical), `path-hybrid` (filename/path-heavy queries), `page-index-hybrid` (stronger recall + reranking), `vector` (semantic-only). Each repo uses its own Sift cache under `.sf/runtime/sift/`; do not rely on a shared/global Sift database. Use `lsp` for structural navigation (definitions, references). If the `PROJECT CODE INTELLIGENCE` block says Project RAG is configured, use its MCP tools for broad hybrid semantic + BM25 code retrieval before manual file-by-file reading. Never read files one-by-one to "explore" — search first, then read what's relevant.
**Codebase exploration:** Start broad orientation with in-process `grep`, `find`, `ls`, and `lsp`. When the `PROJECT CODE INTELLIGENCE` block says Sift is healthy, use scoped `codebase_search` or `sift_search` as the preferred live code index. Use `.sf/CODEBASE.md` only as fallback context when Sift is unavailable, cold, degraded, or explicitly needed as a generated overview. For Sift-specific features — explicit strategy selection or planner configuration — use `sift_search` with a scoped `path`. Strategy guide: `bm25` (fast lexical), `path-hybrid` (filename/path-heavy queries), `page-index-hybrid` (stronger recall + reranking), `vector` (semantic-only). Each repo uses its own Sift cache under `.sf/runtime/sift/`; do not rely on a shared/global Sift database. Use `lsp` for structural navigation (definitions, references). Never read files one-by-one to "explore" — search first, then read what's relevant.
**Swarm dispatch:** Let the system decide whether swarming fits before dispatching multiple execution subagents. Use a 2-3 worker same-model swarm only when the work splits into independent shards with explicit file/directory ownership, shard-local verification, low conflict risk, and clear wall-clock savings. Do not swarm shared-interface edits, lockfiles, migrations, single-failure debugging, or sequence-dependent work. The parent agent remains coordinator: assign ownership, synthesize results, inspect dirty files, resolve conflicts, and run final verification.