From 2dea73398dff141285e5bd1be7e654289504d06e Mon Sep 17 00:00:00 2001 From: Mikael Hugo Date: Sun, 10 May 2026 23:18:02 +0200 Subject: [PATCH] fix(learning): add save_knowledge to manifest, failure_mode to aggregator SELECT + index Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- packages/pi-agent-core/package.json | 25 + packages/pi-agent-core/src/db/errors.ts | 26 + .../pi-agent-core/src/db/gate-registry.ts | 221 + packages/pi-agent-core/src/db/index.ts | 5 + packages/pi-agent-core/src/db/sf-db.ts | 8937 +++++++++++++++++ .../pi-agent-core/src/db/task-frontmatter.ts | 481 + .../pi-agent-core/src/db/uok/trace-writer.ts | 76 + .../pi-agent-core/src/db/workflow-logger.ts | 94 + packages/pi-agent-core/src/index.ts | 1 + packages/pi-agent-core/tsconfig.json | 38 + .../extensions/bg-shell/bg-shell-lifecycle.js | 2 +- .../extensions/bg-shell/bg-shell-tool.js | 2 +- .../search-the-web/native-search.js | 55 +- .../extensions/sf/auto-runaway-guard.js | 2 +- .../extensions/sf/commands-session-report.js | 2 +- src/resources/extensions/sf/commands-ship.js | 2 +- src/resources/extensions/sf/export-html.js | 2 +- src/resources/extensions/sf/export.js | 2 +- .../extensions/sf/extension-manifest.json | 1 + src/resources/extensions/sf/forensics.js | 2 +- src/resources/extensions/sf/history.js | 2 +- .../sf/learning/outcome-aggregator.mjs | 1 + src/resources/extensions/sf/metrics.js | 2 +- .../extensions/sf/migrate/parsers.js | 2 +- .../extensions/sf/preferences-validation.js | 2 +- src/resources/extensions/sf/preferences.js | 2 +- src/resources/extensions/sf/reports.js | 2 +- .../extensions/sf/session-forensics.js | 2 +- src/resources/extensions/sf/sf-db.js | 1 + .../extensions/shared/format-utils.js | 20 - src/resources/extensions/shared/mod.js | 4 +- .../extensions/shared/path-display.js | 12 - src/tests/native-search.test.ts | 124 +- 33 files changed, 9967 insertions(+), 185 deletions(-) create mode 100644 packages/pi-agent-core/package.json create mode 100644 packages/pi-agent-core/src/db/errors.ts create mode 100644 packages/pi-agent-core/src/db/gate-registry.ts create mode 100644 packages/pi-agent-core/src/db/index.ts create mode 100644 packages/pi-agent-core/src/db/sf-db.ts create mode 100644 packages/pi-agent-core/src/db/task-frontmatter.ts create mode 100644 packages/pi-agent-core/src/db/uok/trace-writer.ts create mode 100644 packages/pi-agent-core/src/db/workflow-logger.ts create mode 100644 packages/pi-agent-core/src/index.ts create mode 100644 packages/pi-agent-core/tsconfig.json delete mode 100644 src/resources/extensions/shared/format-utils.js delete mode 100644 src/resources/extensions/shared/path-display.js diff --git a/packages/pi-agent-core/package.json b/packages/pi-agent-core/package.json new file mode 100644 index 000000000..4995b66cb --- /dev/null +++ b/packages/pi-agent-core/package.json @@ -0,0 +1,25 @@ +{ + "name": "@singularity-forge/pi-agent-core", + "version": "2.75.3", + "description": "SF database abstraction layer and agent-core primitives (TypeScript)", + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + }, + "./db/sf-db": { + "types": "./dist/db/sf-db.d.ts", + "import": "./dist/db/sf-db.js" + } + }, + "scripts": { + "build": "tsc -p tsconfig.json" + }, + "dependencies": {}, + "engines": { + "node": ">=26.1.0" + } +} diff --git a/packages/pi-agent-core/src/db/errors.ts b/packages/pi-agent-core/src/db/errors.ts new file mode 100644 index 000000000..3eb2bdf61 --- /dev/null +++ b/packages/pi-agent-core/src/db/errors.ts @@ -0,0 +1,26 @@ +/** + * SF Error Types — Typed error hierarchy for diagnostics and crash recovery. + * + * All SF-specific errors extend SFError, which carries a stable `code` + * string suitable for programmatic matching. Error codes are defined as + * constants so callers can switch on them without string-matching. + */ + +// ─── Error Codes ────────────────────────────────────────────────────────────── +export const SF_STALE_STATE = "SF_STALE_STATE"; +export const SF_LOCK_HELD = "SF_LOCK_HELD"; +export const SF_ARTIFACT_MISSING = "SF_ARTIFACT_MISSING"; +export const SF_GIT_ERROR = "SF_GIT_ERROR"; +export const SF_MERGE_CONFLICT = "SF_MERGE_CONFLICT"; +export const SF_PARSE_ERROR = "SF_PARSE_ERROR"; +export const SF_IO_ERROR = "SF_IO_ERROR"; + +// ─── Base Error ─────────────────────────────────────────────────────────────── +export class SFError extends Error { + code: string; + constructor(code: string, message: string, options?: ErrorOptions) { + super(message, options); + this.name = "SFError"; + this.code = code; + } +} diff --git a/packages/pi-agent-core/src/db/gate-registry.ts b/packages/pi-agent-core/src/db/gate-registry.ts new file mode 100644 index 000000000..5cefb8686 --- /dev/null +++ b/packages/pi-agent-core/src/db/gate-registry.ts @@ -0,0 +1,221 @@ +/** + * SF Gate Registry — single source of truth for quality-gate ownership. + * + * Each gate declares which workflow turn owns it, the scope at which it is + * persisted in the `quality_gates` table, and the question/guidance text used + * in the prompt that turn sends. + */ +import { SF_PARSE_ERROR, SFError } from "./errors.js"; + +export type GateId = "Q3" | "Q4" | "Q5" | "Q6" | "Q7" | "Q8" | "MV01" | "MV02" | "MV03" | "MV04"; + +export interface GateDefinition { + id: GateId; + scope: "slice" | "task" | "milestone"; + ownerTurn: string; + question: string; + guidance: string; + promptSection: string; + minOmissionWords: number; +} + +export const GATE_REGISTRY: Record = { + Q3: { + id: "Q3", + scope: "slice", + ownerTurn: "gate-evaluate", + question: "How can this be exploited?", + guidance: [ + "Identify abuse scenarios: parameter tampering, replay attacks, privilege escalation.", + "Map data exposure risks: PII, tokens, secrets accessible through this slice.", + "Define input trust boundaries: untrusted user input reaching DB, API, or filesystem.", + "If none apply, return verdict 'omitted' with rationale explaining why.", + ].join("\n"), + promptSection: "Abuse Surface", + minOmissionWords: 20, + }, + Q4: { + id: "Q4", + scope: "slice", + ownerTurn: "gate-evaluate", + question: "What existing promises does this break?", + guidance: [ + "List which existing requirements (R001, R003, etc.) are touched by this slice.", + "Identify what must be re-tested after shipping.", + "Flag decisions that should be revisited given the new scope.", + "If no existing requirements are affected, return verdict 'omitted'.", + ].join("\n"), + promptSection: "Broken Promises", + minOmissionWords: 0, + }, + Q5: { + id: "Q5", + scope: "task", + ownerTurn: "execute-task", + question: "What breaks when dependencies fail?", + guidance: [ + "Enumerate the task's external dependencies (APIs, filesystem, network, subprocesses).", + "Describe the failure path for each: timeout, malformed response, connection loss.", + "Verify the implementation handles each failure or explicitly bubbles the error.", + "Return verdict 'omitted' only if the task has no external dependencies.", + ].join("\n"), + promptSection: "Failure Modes", + minOmissionWords: 15, + }, + Q6: { + id: "Q6", + scope: "task", + ownerTurn: "execute-task", + question: "What is the 10x load breakpoint?", + guidance: [ + "Identify the resource that saturates first at 10x the expected load.", + "Describe the protection applied (pool sizing, rate limiting, pagination, caching).", + "Return verdict 'omitted' if the task has no runtime load dimension.", + ].join("\n"), + promptSection: "Load Profile", + minOmissionWords: 10, + }, + Q7: { + id: "Q7", + scope: "task", + ownerTurn: "execute-task", + question: "What negative tests protect this task?", + guidance: [ + "List malformed inputs, error paths, and boundary conditions the tests cover.", + "Point to the specific test files or cases that assert each negative scenario.", + "Return verdict 'omitted' only if the task has no meaningful negative surface.", + ].join("\n"), + promptSection: "Negative Tests", + minOmissionWords: 15, + }, + Q8: { + id: "Q8", + scope: "slice", + ownerTurn: "complete-slice", + question: "How will ops know this slice is healthy or broken?", + guidance: [ + "Describe the health signal (metric, log line, dashboard) that proves the slice works.", + "Describe the failure signal that triggers an alert or paging.", + "Document the recovery procedure and any monitoring gaps.", + "Return verdict 'omitted' only for slices with no runtime behavior at all.", + ].join("\n"), + promptSection: "Operational Readiness", + minOmissionWords: 0, + }, + MV01: { + id: "MV01", + scope: "milestone", + ownerTurn: "validate-milestone", + question: "Is every success criterion in the milestone roadmap satisfied?", + guidance: [ + "Walk the success-criteria checklist from the milestone roadmap.", + "For each criterion, point to the slice / assessment / verification evidence that proves it.", + "Return verdict 'flag' if any criterion is unmet or unverifiable.", + ].join("\n"), + promptSection: "Success Criteria Checklist", + minOmissionWords: 0, + }, + MV02: { + id: "MV02", + scope: "milestone", + ownerTurn: "validate-milestone", + question: "Does every slice have a SUMMARY.md and a passing assessment?", + guidance: [ + "Confirm every slice listed in the roadmap has a SUMMARY.md.", + "Confirm each slice has an ASSESSMENT verdict of 'pass' (or justified 'omitted').", + "Flag missing artifacts and slices with outstanding follow-ups or known limitations.", + ].join("\n"), + promptSection: "Slice Delivery Audit", + minOmissionWords: 0, + }, + MV03: { + id: "MV03", + scope: "milestone", + ownerTurn: "validate-milestone", + question: "Do the slices integrate end-to-end?", + guidance: [ + "Trace at least one cross-slice flow proving the pieces compose.", + "Flag gaps where two slices were built in isolation with no integration evidence.", + ].join("\n"), + promptSection: "Cross-Slice Integration", + minOmissionWords: 0, + }, + MV04: { + id: "MV04", + scope: "milestone", + ownerTurn: "validate-milestone", + question: "Are all touched requirements covered and still coherent?", + guidance: [ + "For each requirement advanced, validated, surfaced, or invalidated across the milestone's slices, confirm the milestone-level evidence matches.", + "Flag requirements that slices claim to advance but no artifact proves.", + ].join("\n"), + promptSection: "Requirement Coverage", + minOmissionWords: 0, + }, +}; + +const ORDERED_GATES = Object.values(GATE_REGISTRY); + +export function getGatesForTurn(turn: string): GateDefinition[] { + return ORDERED_GATES.filter((g) => g.ownerTurn === turn); +} + +export function getGateIdsForTurn(turn: string): Set { + return new Set(getGatesForTurn(turn).map((g) => g.id)); +} + +export function getGateDefinition(id: string): GateDefinition | undefined { + return GATE_REGISTRY[id as GateId]; +} + +export function getOwnerTurn(id: string): string { + const def = GATE_REGISTRY[id as GateId]; + if (!def) { + throw new SFError(SF_PARSE_ERROR, `gate-registry: unknown gate id "${id}"`); + } + return def.ownerTurn; +} + +export interface PendingGateRow { + gate_id: string; + [key: string]: unknown; +} + +export function assertGateCoverage( + pending: PendingGateRow[], + turn: string, + options: { requireAll?: boolean } = {}, +): void { + const requireAll = options.requireAll ?? true; + const expected = getGateIdsForTurn(turn); + const pendingIds = new Set(pending.map((g) => g.gate_id)); + const unknown: string[] = []; + for (const id of pendingIds) { + const def = getGateDefinition(id); + if (!def) { + unknown.push(id); + continue; + } + if (def.ownerTurn !== turn) { + unknown.push(`${id} (owned by ${def.ownerTurn}, not ${turn})`); + } + } + if (unknown.length > 0) { + throw new SFError( + SF_PARSE_ERROR, + `assertGateCoverage: turn "${turn}" received pending gates it does not own: ${unknown.join(", ")}`, + ); + } + if (requireAll) { + const missing: string[] = []; + for (const id of expected) { + if (!pendingIds.has(id)) missing.push(id); + } + if (missing.length > 0) { + throw new SFError( + SF_PARSE_ERROR, + `assertGateCoverage: turn "${turn}" is missing required gates: ${missing.join(", ")}`, + ); + } + } +} diff --git a/packages/pi-agent-core/src/db/index.ts b/packages/pi-agent-core/src/db/index.ts new file mode 100644 index 000000000..8b2886a0f --- /dev/null +++ b/packages/pi-agent-core/src/db/index.ts @@ -0,0 +1,5 @@ +export * from "./sf-db.js"; +export * from "./errors.js"; +export * from "./gate-registry.js"; +export * from "./task-frontmatter.js"; +export * from "./workflow-logger.js"; diff --git a/packages/pi-agent-core/src/db/sf-db.ts b/packages/pi-agent-core/src/db/sf-db.ts new file mode 100644 index 000000000..0a7af817b --- /dev/null +++ b/packages/pi-agent-core/src/db/sf-db.ts @@ -0,0 +1,8937 @@ +// ─── Type declarations ───────────────────────────────────────────────────── + +/** Generic DB row returned by prepared statement queries. */ +export type DbRow = Record; + +/** Wrapper around node:sqlite prepared statements. */ +export interface DbStatement { + run(...params: unknown[]): { changes?: number; lastInsertRowid?: number | bigint }; + get(...params: unknown[]): DbRow | undefined; + all(...params: unknown[]): DbRow[]; +} + +/** Adapter wrapping a DatabaseSync instance with a statement cache. */ +export interface DbAdapter { + exec(sql: string): void; + prepare(sql: string): DbStatement; + close(): void; +} + +// ─── Input interfaces ─────────────────────────────────────────────────────── + +export interface DecisionInput { + id: string; + when_context?: string; + scope?: string; + decision?: string; + choice?: string; + rationale?: string; + revisable?: string; + made_by?: string; + superseded_by?: string | null; + [key: string]: unknown; +} + +export interface RequirementInput { + id: string; + class?: string; + status?: string; + description?: string; + why?: string; + source?: string; + primary_owner?: string; + supporting_slices?: string; + validation?: string; + notes?: string; + full_content?: string; + [key: string]: unknown; +} + +export interface ArtifactInput { + path: string; + artifact_type?: string; + milestone_id?: string | null; + slice_id?: string | null; + task_id?: string | null; + full_content?: string; + [key: string]: unknown; +} + +export interface PlanningPayload { + vision?: string; + successCriteria?: unknown[]; + keyRisks?: unknown[]; + proofStrategy?: unknown[]; + verificationContract?: string; + verificationIntegration?: string; + verificationOperational?: string; + verificationUat?: string; + definitionOfDone?: unknown[]; + requirementCoverage?: string; + boundaryMapMarkdown?: string; + visionMeeting?: unknown; + productResearch?: unknown; + [key: string]: unknown; +} + +export interface SlicePlanningPayload { + goal?: string; + successCriteria?: string; + proofLevel?: string; + integrationClosure?: string; + observabilityImpact?: string; + adversarialReview?: { partner?: string; combatant?: string; architect?: string }; + planningMeeting?: unknown; + [key: string]: unknown; +} + +export interface MilestoneInput { + id: string; + title?: string; + status?: string; + vision?: string; + created_at?: string; + planning?: PlanningPayload; + depends_on?: string[]; + sequence?: number; + [key: string]: unknown; +} + +export interface SliceInput { + milestone_id: string; + id: string; + title?: string; + status?: string; + created_at?: string; + planning?: SlicePlanningPayload; + risk?: string; + depends?: string[]; + demo?: string; + sequence?: number; + isSketch?: boolean; + sketchScope?: string; + [key: string]: unknown; +} + +export interface TaskPlanningPayload { + description?: string; + estimate?: string; + files?: unknown[]; + verify?: string; + inputs?: unknown[]; + expectedOutput?: string; + observabilityImpact?: string; + specIntent?: string; + [key: string]: unknown; +} + +export interface TaskInput { + milestone_id: string; + slice_id: string; + id: string; + title?: string; + status?: string; + created_at?: string; + planning?: TaskPlanningPayload; + [key: string]: unknown; +} + +export interface VerificationEvidenceInput { + task_id: string; + slice_id: string; + milestone_id: string; + command: string; + exit_code?: number; + verdict: string; + duration_ms?: number; + created_at: string; + [key: string]: unknown; +} + +export interface QualityGateInput { + milestone_id: string; + slice_id: string; + gate_id: string; + scope?: string; + task_id?: string; + status?: string; + verdict?: string; + rationale?: string; + findings?: string; + evaluated_at?: string | null; + [key: string]: unknown; +} + +// SF Database Abstraction Layer +// Provides a SQLite database via node:sqlite (Node >= 26 built-in). +// +// Exposes a unified sync API for decisions and requirements storage. +// Schema is initialized on first open with WAL mode for file-backed DBs. +// +// ─── Single-writer invariant ───────────────────────────────────────────── +// This file is the ONLY place in the codebase that issues write SQL +// (INSERT / UPDATE / DELETE / REPLACE / BEGIN-COMMIT transactions) against +// the engine database at `.sf/sf.db`. All other modules must call the +// typed wrappers exported here. The structural test +// `tests/single-writer-invariant.test.ts` fails CI if a new bypass appears. +// +// `_getAdapter()` is retained for read-only SELECTs in query modules +// (context-store, memory-store queries, doctor checks, projections). +// Do NOT use it for writes — add a wrapper here instead. +// +// The separate `.sf/unit-claims.db` managed by `unit-ownership.ts` is an +// intentionally independent store for cross-worktree claim races and is +// excluded from this invariant. +import { + copyFileSync, + existsSync, + mkdirSync, + readdirSync, + readFileSync, + realpathSync, + statSync, + unlinkSync, + writeFileSync, +} from "node:fs"; +import { dirname, join } from "node:path"; +import { DatabaseSync } from "node:sqlite"; +import { SF_STALE_STATE, SFError } from "./errors.js"; +import { getGateIdsForTurn } from "./gate-registry.js"; +import { + normalizeSchedulerStatus, + normalizeTaskStatus, + taskFrontmatterFromRecord, + withTaskFrontmatter, +} from "./task-frontmatter.js"; +import { logError, logWarning } from "./workflow-logger.js"; +import { readTraceEvents } from "./uok/trace-writer.js"; + +let loadAttempted = false; +function loadProvider(): void { + if (loadAttempted) return; + loadAttempted = true; + // node:sqlite is built-in in Node >= 26 +} +function normalizeRow(row: unknown): Record | undefined { + if (row == null) return undefined; + if (Object.getPrototypeOf(row) === null) { + return { ...(row as Record) }; + } + return row as Record; +} +function normalizeRows(rows: unknown[]): Record[] { + return rows.map((r) => normalizeRow(r)).filter((r): r is Record => r != null); +} +const DB_QUERY_TIMEOUT_MS = 30_000; +const DB_BACKUP_MIN_INTERVAL_MS = 15 * 60 * 1000; +const DB_BACKUP_RETENTION = 24; +const DB_FULL_VACUUM_MIN_INTERVAL_MS = 6 * 60 * 60 * 1000; + +function createAdapter(rawDb: import('node:sqlite').DatabaseSync): DbAdapter { + const db: import('node:sqlite').DatabaseSync = rawDb; + const stmtCache = new Map(); + function wrapStmt(raw: import('node:sqlite').StatementSync): DbStatement { + return { + run(...params: unknown[]) { + return raw.run(...params); + }, + get(...params: unknown[]) { + return normalizeRow(raw.get(...params)); + }, + all(...params: unknown[]) { + return normalizeRows(raw.all(...params) as unknown[]); + }, + }; + } + return { + exec(sql) { + db.exec(sql); + }, + prepare(sql) { + let cached = stmtCache.get(sql); + if (cached) return cached; + cached = wrapStmt(db.prepare(sql)); + stmtCache.set(sql, cached); + return cached; + }, + close() { + stmtCache.clear(); + db.close(); + }, + }; +} + +/** + * Execute a database query with timeout protection. + * Falls back to empty result if query exceeds timeout. + * + * Purpose: Prevent hanging reads from blocking autonomous dispatch. + * + * Consumer: memory-repository.js, context-store.js, and any read query + * that needs a safety ceiling. + */ +export function withQueryTimeout( + operation: () => T, + fallbackValue: T, + timeoutMs = DB_QUERY_TIMEOUT_MS, +): T { + try { + return operation(); + } catch (err) { + const errMsg = (err as Error)?.message; +if (errMsg?.includes("timeout") || errMsg?.includes("busy")) { + logWarning( + "sf-db", + `Query timed out after ${timeoutMs}ms, returning fallback`, + ); + return fallbackValue; + } + throw err; + } +} +function openRawDb(path: string): import('node:sqlite').DatabaseSync { + loadProvider(); + return new DatabaseSync(path); +} +function sqliteStringLiteral(value: string | number): string { + return `'${String(value).replaceAll("'", "''")}'`; +} +function databaseBackupDir(path: string): string { + return join(dirname(path), "backups", "db"); +} +function latestDatabaseBackupMtime(dir: string): number { + if (!existsSync(dir)) return 0; + let latest = 0; + for (const entry of readdirSync(dir)) { + if (!entry.startsWith("sf.db.")) continue; + const file = join(dir, entry); + try { + const stat = statSync(file); + if (stat.isFile() && stat.mtimeMs > latest) latest = stat.mtimeMs; + } catch { + // Ignore files that disappear during pruning. + } + } + return latest; +} +function pruneDatabaseBackups(dir: string): void { + if (!existsSync(dir)) return; + const backups = []; + for (const entry of readdirSync(dir)) { + if (!entry.startsWith("sf.db.")) continue; + const file = join(dir, entry); + try { + const stat = statSync(file); + if (stat.isFile()) backups.push({ file, mtimeMs: stat.mtimeMs }); + } catch { + // Ignore files that disappear during pruning. + } + } + backups.sort((a, b) => b.mtimeMs - a.mtimeMs); + for (const backup of backups.slice(DB_BACKUP_RETENTION)) { + try { + unlinkSync(backup.file); + } catch { + // Best-effort retention; never block DB open on pruning. + } + } +} +function databaseMaintenancePath(path: string): string { + return join(databaseBackupDir(path), "maintenance.json"); +} +function readDatabaseMaintenanceState(path: string): Record { + try { + return JSON.parse(readFileSync(databaseMaintenancePath(path), "utf-8")); + } catch { + return {}; + } +} +function writeDatabaseMaintenanceState(path: string, state: Record): void { + try { + writeFileSync( + databaseMaintenancePath(path), + JSON.stringify(state, null, 2) + "\n", + "utf-8", + ); + } catch { + // Best-effort maintenance metadata. + } +} +function createDatabaseSnapshot(rawDb: import('node:sqlite').DatabaseSync, path: string): void { + if (path === ":memory:" || process.env.SF_DB_BACKUP_DISABLE === "1") return; + const dir = databaseBackupDir(path); + try { + mkdirSync(dir, { recursive: true }); + const latest = latestDatabaseBackupMtime(dir); + if (latest > 0 && Date.now() - latest < DB_BACKUP_MIN_INTERVAL_MS) return; + const stamp = new Date().toISOString().replace(/[:.]/g, "-"); + const backupPath = join(dir, `sf.db.${stamp}`); + rawDb.exec(`VACUUM INTO ${sqliteStringLiteral(backupPath)}`); + pruneDatabaseBackups(dir); + } catch (err) { + logWarning( + "sf-db", + `database snapshot failed: ${err instanceof Error ? err.message : String(err)}`, + ); + } +} +function performDatabaseMaintenance(rawDb: import('node:sqlite').DatabaseSync, path: string): void { + if (path === ":memory:" || process.env.SF_DB_MAINTENANCE_DISABLE === "1") + return; + try { + const quickCheck = rawDb.prepare("PRAGMA quick_check").get(); + if (quickCheck?.quick_check !== "ok") { + logWarning("sf-db", "database quick_check failed; skipping maintenance"); + return; + } + rawDb.exec("PRAGMA wal_checkpoint(PASSIVE)"); + rawDb.exec("PRAGMA optimize"); + rawDb.exec("PRAGMA incremental_vacuum(128)"); + + const state = readDatabaseMaintenanceState(path); + const lastFullVacuumAt = + typeof state.lastFullVacuumAt === "string" + ? Date.parse(state.lastFullVacuumAt) + : 0; + if ( + !Number.isFinite(lastFullVacuumAt) || + Date.now() - lastFullVacuumAt >= DB_FULL_VACUUM_MIN_INTERVAL_MS + ) { + rawDb.exec("VACUUM"); + writeDatabaseMaintenanceState(path, { + ...state, + lastFullVacuumAt: new Date().toISOString(), + }); + } + } catch (err) { + logWarning( + "sf-db", + `database maintenance failed: ${err instanceof Error ? err.message : String(err)}`, + ); + } +} +const SCHEMA_VERSION = 58; +function indexExists(db: DbAdapter, name: string): boolean { + return !!db + .prepare( + "SELECT 1 as present FROM sqlite_master WHERE type = 'index' AND name = ?", + ) + .get(name); +} +function dedupeVerificationEvidenceRows(db: DbAdapter): void { + db.exec(` + DELETE FROM verification_evidence + WHERE rowid NOT IN ( + SELECT MIN(rowid) + FROM verification_evidence + GROUP BY task_id, slice_id, milestone_id, command, verdict + ) + `); +} +function ensureVerificationEvidenceDedupIndex(db: DbAdapter) { + if (indexExists(db, "idx_verification_evidence_dedup")) return; + dedupeVerificationEvidenceRows(db); + db.exec( + "CREATE UNIQUE INDEX IF NOT EXISTS idx_verification_evidence_dedup ON verification_evidence(task_id, slice_id, milestone_id, command, verdict)", + ); +} +function ensureRepoProfileTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS repo_profiles ( + profile_id TEXT PRIMARY KEY, + project_hash TEXT NOT NULL, + project_root TEXT NOT NULL DEFAULT '', + head TEXT DEFAULT NULL, + branch TEXT DEFAULT NULL, + remote_hash TEXT DEFAULT NULL, + dirty INTEGER NOT NULL DEFAULT 0, + profile_json TEXT NOT NULL DEFAULT '{}', + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS repo_file_observations ( + path TEXT PRIMARY KEY, + latest_profile_id TEXT NOT NULL, + git_status TEXT NOT NULL, + ownership TEXT NOT NULL, + language TEXT DEFAULT NULL, + size_bytes INTEGER NOT NULL DEFAULT 0, + content_hash TEXT DEFAULT NULL, + summary TEXT DEFAULT NULL, + first_seen_at TEXT NOT NULL, + last_seen_at TEXT NOT NULL, + adopted_at TEXT DEFAULT NULL, + adoption_unit_id TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_repo_profiles_created ON repo_profiles(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_repo_file_observations_status ON repo_file_observations(git_status, ownership)", + ); +} +function ensureBacklogTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS backlog_items ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + note TEXT NOT NULL DEFAULT '', + source TEXT NOT NULL DEFAULT '', + triage_run_id TEXT DEFAULT NULL, + sequence INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + promoted_at TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_backlog_items_status_sequence ON backlog_items(status, sequence, id)", + ); +} +function ensureScheduleTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS schedule_entries ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + scope TEXT NOT NULL DEFAULT 'project', + id TEXT NOT NULL, + schema_version INTEGER NOT NULL DEFAULT 1, + kind TEXT NOT NULL DEFAULT 'reminder', + status TEXT NOT NULL DEFAULT 'pending', + due_at TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + snoozed_at TEXT DEFAULT NULL, + payload_json TEXT NOT NULL DEFAULT '{}', + created_by TEXT NOT NULL DEFAULT 'user', + autonomous_dispatch INTEGER NOT NULL DEFAULT 0, + full_json TEXT NOT NULL DEFAULT '{}', + imported_from TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_id_created ON schedule_entries(scope, id, created_at DESC, seq DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_due ON schedule_entries(scope, status, due_at)", + ); + ensureColumn( + db, + "schedule_entries", + "autonomous_dispatch", + "ALTER TABLE schedule_entries ADD COLUMN autonomous_dispatch INTEGER NOT NULL DEFAULT 0", + ); +} +function ensureSolverEvalTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS solver_eval_runs ( + run_id TEXT PRIMARY KEY, + suite_source TEXT NOT NULL DEFAULT '', + cases_count INTEGER NOT NULL DEFAULT 0, + summary_json TEXT NOT NULL DEFAULT '{}', + report_path TEXT NOT NULL DEFAULT '', + results_path TEXT NOT NULL DEFAULT '', + db_recorded INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS solver_eval_case_results ( + run_id TEXT NOT NULL, + case_id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + mode TEXT NOT NULL, + passed INTEGER NOT NULL DEFAULT 0, + false_complete INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER DEFAULT NULL, + command_status INTEGER DEFAULT NULL, + solver_outcome TEXT DEFAULT NULL, + pdd_complete INTEGER DEFAULT NULL, + result_json TEXT NOT NULL DEFAULT '{}', + created_at TEXT NOT NULL, + PRIMARY KEY (run_id, case_id, mode), + FOREIGN KEY (run_id) REFERENCES solver_eval_runs(run_id) ON DELETE CASCADE + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_solver_eval_runs_created ON solver_eval_runs(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_solver_eval_case_lookup ON solver_eval_case_results(run_id, case_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_solver_eval_case_false_complete ON solver_eval_case_results(false_complete, mode)", + ); +} +function ensureSessionTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS sessions ( + session_id TEXT PRIMARY KEY, + trace_id TEXT DEFAULT NULL, + mode TEXT NOT NULL DEFAULT 'interactive', + cwd TEXT NOT NULL DEFAULT '', + repo TEXT DEFAULT NULL, + branch TEXT DEFAULT NULL, + summary TEXT DEFAULT NULL, + summary_count INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS turns ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, + turn_index INTEGER NOT NULL, + user_message TEXT, + assistant_response TEXT, + ts TEXT NOT NULL, + UNIQUE(session_id, turn_index) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS session_file_touches ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, + path TEXT NOT NULL, + tool_name TEXT DEFAULT NULL, + turn_id INTEGER DEFAULT NULL REFERENCES turns(id), + first_seen_at TEXT NOT NULL, + UNIQUE(session_id, path) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS session_refs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, + ref_type TEXT NOT NULL, + ref_value TEXT NOT NULL, + turn_id INTEGER DEFAULT NULL REFERENCES turns(id), + created_at TEXT NOT NULL, + UNIQUE(session_id, ref_type, ref_value) + ) + `); + // FTS5 external-content table over turns for keyword recall. + // content_rowid links to turns.id; triggers below keep it in sync. + db.exec(` + CREATE VIRTUAL TABLE IF NOT EXISTS turns_fts USING fts5( + user_message, + assistant_response, + content='turns', + content_rowid='id' + ) + `); + db.exec(` + CREATE TRIGGER IF NOT EXISTS turns_fts_insert AFTER INSERT ON turns BEGIN + INSERT INTO turns_fts(rowid, user_message, assistant_response) + VALUES (new.id, new.user_message, new.assistant_response); + END + `); + db.exec(` + CREATE TRIGGER IF NOT EXISTS turns_fts_update AFTER UPDATE ON turns BEGIN + INSERT INTO turns_fts(turns_fts, rowid, user_message, assistant_response) + VALUES ('delete', old.id, old.user_message, old.assistant_response); + INSERT INTO turns_fts(rowid, user_message, assistant_response) + VALUES (new.id, new.user_message, new.assistant_response); + END + `); + db.exec(` + CREATE TRIGGER IF NOT EXISTS turns_fts_delete AFTER DELETE ON turns BEGIN + INSERT INTO turns_fts(turns_fts, rowid, user_message, assistant_response) + VALUES ('delete', old.id, old.user_message, old.assistant_response); + END + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_sessions_created ON sessions(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_sessions_repo ON sessions(repo, created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_turns_session ON turns(session_id, turn_index)", + ); + db.exec("CREATE INDEX IF NOT EXISTS idx_turns_ts ON turns(ts DESC)"); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_file_touches_session ON session_file_touches(session_id, first_seen_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_file_touches_path ON session_file_touches(path, session_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_refs_session ON session_refs(session_id, created_at DESC)", + ); +} +function ensureSessionSnapshotTable(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS session_snapshots ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + -- Session that triggered this checkpoint. FK to sessions(session_id). + session_id TEXT NOT NULL, + -- Zero-based counter within the session (first snapshot = 0). + snapshot_index INTEGER NOT NULL DEFAULT 0, + -- Optional git stash ref so the snapshot can be restored exactly. + -- NULL when the working tree had no changes to stash. + git_stash_ref TEXT, + -- Free-text label for the snapshot (e.g. "before migration deploy"). + label TEXT, + ts TEXT NOT NULL, + UNIQUE(session_id, snapshot_index) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_snapshots_session ON session_snapshots(session_id, snapshot_index)", + ); +} +function ensureHeadlessRunTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS headless_runs ( + run_id TEXT PRIMARY KEY, + command TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT '', + exit_code INTEGER NOT NULL DEFAULT 0, + timed_out INTEGER NOT NULL DEFAULT 0, + interrupted INTEGER NOT NULL DEFAULT 0, + restart_count INTEGER NOT NULL DEFAULT 0, + max_restarts INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER NOT NULL DEFAULT 0, + total_events INTEGER NOT NULL DEFAULT 0, + tool_calls INTEGER NOT NULL DEFAULT 0, + solver_eval_run_id TEXT DEFAULT NULL, + solver_eval_report_path TEXT DEFAULT NULL, + details_json TEXT NOT NULL DEFAULT '{}', + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_headless_runs_created ON headless_runs(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_headless_runs_status ON headless_runs(status, created_at DESC)", + ); +} +function ensureUokMessageTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS uok_messages ( + id TEXT PRIMARY KEY, + from_agent TEXT NOT NULL, + to_agent TEXT NOT NULL, + body TEXT NOT NULL DEFAULT '', + metadata_json TEXT NOT NULL DEFAULT '{}', + sent_at TEXT NOT NULL DEFAULT '', + delivered_at TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS uok_message_reads ( + message_id TEXT NOT NULL, + agent_id TEXT NOT NULL, + read_at TEXT NOT NULL DEFAULT '', + PRIMARY KEY (message_id, agent_id), + FOREIGN KEY (message_id) REFERENCES uok_messages(id) ON DELETE CASCADE + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_messages_to ON uok_messages(to_agent, sent_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_messages_conversation ON uok_messages(from_agent, to_agent, sent_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_messages_sent ON uok_messages(sent_at DESC)", + ); +} +function ensureDeployTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS deploy_runs ( + id TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL, + target TEXT NOT NULL, + command TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + exit_code INTEGER DEFAULT NULL, + output TEXT DEFAULT NULL, + deployed_url TEXT DEFAULT NULL, + created_at TEXT NOT NULL, + finished_at TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS smoke_results ( + id TEXT PRIMARY KEY, + deploy_run_id TEXT NOT NULL, + milestone_id TEXT NOT NULL, + url TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT DEFAULT NULL, + checks_json TEXT NOT NULL DEFAULT '[]', + created_at TEXT NOT NULL, + finished_at TEXT DEFAULT NULL, + FOREIGN KEY (deploy_run_id) REFERENCES deploy_runs(id) ON DELETE CASCADE + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS release_records ( + id TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL, + version TEXT NOT NULL, + prev_version TEXT DEFAULT NULL, + changelog_entry TEXT DEFAULT NULL, + git_tag TEXT DEFAULT NULL, + published INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS rollback_runs ( + id TEXT PRIMARY KEY, + deploy_run_id TEXT NOT NULL, + milestone_id TEXT NOT NULL, + reason TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + output TEXT DEFAULT NULL, + created_at TEXT NOT NULL, + finished_at TEXT DEFAULT NULL, + FOREIGN KEY (deploy_run_id) REFERENCES deploy_runs(id) ON DELETE CASCADE + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_deploy_runs_milestone ON deploy_runs(milestone_id, created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_smoke_results_deploy ON smoke_results(deploy_run_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_release_records_milestone ON release_records(milestone_id, created_at DESC)", + ); +} +function ensureSleeptimeQueueTable(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS sleeptime_consolidation_queue ( + id TEXT PRIMARY KEY, + conversation_agent TEXT NOT NULL, + memory_agent TEXT NOT NULL, + content TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL, + processed_at TEXT DEFAULT NULL, + result TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_sleeptime_queue_status ON sleeptime_consolidation_queue(status, created_at ASC)", + ); +} +function ensureSelfFeedbackTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS self_feedback ( + id TEXT PRIMARY KEY, + ts TEXT NOT NULL, + kind TEXT NOT NULL, + severity TEXT NOT NULL, + blocking INTEGER NOT NULL DEFAULT 0, + repo_identity TEXT NOT NULL DEFAULT '', + sf_version TEXT NOT NULL DEFAULT '', + base_path TEXT NOT NULL DEFAULT '', + unit_type TEXT DEFAULT NULL, + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + summary TEXT NOT NULL DEFAULT '', + evidence TEXT NOT NULL DEFAULT '', + suggested_fix TEXT NOT NULL DEFAULT '', + full_json TEXT NOT NULL, + resolved_at TEXT DEFAULT NULL, + resolved_reason TEXT DEFAULT NULL, + resolved_by_sf_version TEXT DEFAULT NULL, + resolved_evidence_json TEXT DEFAULT NULL, + resolved_criteria_json TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)", + ); +} +function ensureRetrievalEvidenceTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS retrieval_evidence ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + backend TEXT NOT NULL, + source_kind TEXT NOT NULL DEFAULT 'code', + query TEXT NOT NULL DEFAULT '', + strategy TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + project_root TEXT NOT NULL DEFAULT '', + git_head TEXT DEFAULT NULL, + git_branch TEXT DEFAULT NULL, + worktree_dirty INTEGER NOT NULL DEFAULT 0, + freshness TEXT NOT NULL DEFAULT 'unknown', + status TEXT NOT NULL DEFAULT 'ok', + hit_count INTEGER NOT NULL DEFAULT 0, + elapsed_ms INTEGER NOT NULL DEFAULT 0, + cache_path TEXT DEFAULT NULL, + error TEXT DEFAULT NULL, + result_json TEXT NOT NULL DEFAULT '{}', + recorded_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_backend_recorded ON retrieval_evidence(backend, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_scope_recorded ON retrieval_evidence(scope, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_status_recorded ON retrieval_evidence(status, recorded_at DESC)", + ); +} +function ensureTriageTables(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS triage_runs ( + id TEXT PRIMARY KEY, + source_file TEXT, + status TEXT NOT NULL DEFAULT 'complete', + result_summary_json TEXT, + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS triage_evals ( + id TEXT PRIMARY KEY, + run_id TEXT NOT NULL REFERENCES triage_runs(id), + task_input TEXT NOT NULL, + expected_behavior TEXT, + evidence TEXT, + failure_mode TEXT, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS triage_items ( + id TEXT PRIMARY KEY, + run_id TEXT NOT NULL REFERENCES triage_runs(id), + kind TEXT NOT NULL, + content TEXT NOT NULL, + evidence TEXT, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS triage_skills ( + id TEXT PRIMARY KEY, + run_id TEXT NOT NULL REFERENCES triage_runs(id), + name TEXT, + description TEXT, + trigger TEXT, + raw_json TEXT, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_triage_evals_run ON triage_evals(run_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_triage_items_run_kind ON triage_items(run_id, kind)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_triage_skills_run ON triage_skills(run_id)", + ); +} +function ensureRuntimeCounterTable(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS runtime_counters ( + key TEXT PRIMARY KEY, + value INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL + ) + `); +} +function ensureValidationAttentionMarkersTable(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS validation_attention_markers ( + milestone_id TEXT PRIMARY KEY, + created_at TEXT NOT NULL, + source TEXT, + remediation_round INTEGER, + revalidation_round INTEGER, + revalidation_requested_at TEXT + ) + `); +} +function ensureSpecSchemaTables(db: DbAdapter) { + // Tier 1.3: Spec/Runtime/Evidence schema separation + // Creates 9 normalized tables for milestone, slice, task entities + // Each entity type has: _specs (immutable intent), (runtime state), _evidence (audit trail) + + // ── Milestone Spec Table (immutable record of intent) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS milestone_specs ( + id TEXT NOT NULL, + vision TEXT NOT NULL DEFAULT '', + success_criteria TEXT DEFAULT '', + key_risks TEXT DEFAULT '', + proof_strategy TEXT DEFAULT '', + verification_contract TEXT DEFAULT '', + verification_integration TEXT DEFAULT '', + verification_operational TEXT DEFAULT '', + verification_uat TEXT DEFAULT '', + definition_of_done TEXT DEFAULT '', + requirement_coverage TEXT DEFAULT '', + boundary_map_markdown TEXT DEFAULT '', + vision_meeting_json TEXT DEFAULT '', + product_research_json TEXT DEFAULT '', + spec_version INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY (id) REFERENCES milestones(id) + ) + `); + + // ── Slice Spec Table (immutable record of intent) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS slice_specs ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + goal TEXT NOT NULL DEFAULT '', + success_criteria TEXT DEFAULT '', + proof_level TEXT DEFAULT '', + integration_closure TEXT DEFAULT '', + observability_impact TEXT DEFAULT '', + adversarial_partner TEXT DEFAULT '', + adversarial_combatant TEXT DEFAULT '', + adversarial_architect TEXT DEFAULT '', + planning_meeting_json TEXT DEFAULT '', + spec_version INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + + // ── Task Spec Table (immutable record of intent) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS task_specs ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + task_id TEXT NOT NULL, + verify TEXT NOT NULL DEFAULT '', + inputs TEXT DEFAULT '', + expected_output TEXT DEFAULT '', + risk TEXT NOT NULL DEFAULT 'low', + mutation_scope TEXT NOT NULL DEFAULT 'isolated', + verification_type TEXT NOT NULL DEFAULT 'self-check', + plan_approval TEXT NOT NULL DEFAULT 'not-required', + estimated_effort INTEGER DEFAULT NULL, + dependencies TEXT NOT NULL DEFAULT '[]', + blocks_parallel INTEGER NOT NULL DEFAULT 0, + requires_user_input INTEGER NOT NULL DEFAULT 0, + auto_retry INTEGER NOT NULL DEFAULT 1, + max_retries INTEGER NOT NULL DEFAULT 2, + spec_version INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id, task_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + + // ── Milestone Evidence Table (append-only audit trail) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS milestone_evidence ( + milestone_id TEXT NOT NULL, + evidence_type TEXT NOT NULL, + content TEXT NOT NULL, + recorded_at TEXT NOT NULL, + phase_name TEXT DEFAULT '', + recorded_by TEXT DEFAULT '', + evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), + PRIMARY KEY (milestone_id, evidence_id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + + // ── Slice Evidence Table (append-only audit trail) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS slice_evidence ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + evidence_type TEXT NOT NULL, + content TEXT NOT NULL, + recorded_at TEXT NOT NULL, + phase_name TEXT DEFAULT '', + recorded_by TEXT DEFAULT '', + evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), + PRIMARY KEY (milestone_id, slice_id, evidence_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + + // ── Task Evidence Table (append-only audit trail) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS task_evidence ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + task_id TEXT NOT NULL, + evidence_type TEXT NOT NULL, + content TEXT NOT NULL, + recorded_at TEXT NOT NULL, + phase_name TEXT DEFAULT '', + recorded_by TEXT DEFAULT '', + evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), + PRIMARY KEY (milestone_id, slice_id, task_id, evidence_id), + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + + // Indices for efficient querying of evidence trails + db.exec(` + CREATE INDEX IF NOT EXISTS idx_milestone_evidence_type + ON milestone_evidence(milestone_id, evidence_type, recorded_at DESC) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_slice_evidence_type + ON slice_evidence(milestone_id, slice_id, evidence_type, recorded_at DESC) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_task_evidence_type + ON task_evidence(milestone_id, slice_id, task_id, evidence_type, recorded_at DESC) + `); +} +function initSchema(db: DbAdapter, fileBacked: boolean): void { + if (fileBacked) db.exec("PRAGMA journal_mode=WAL"); + if (fileBacked) db.exec("PRAGMA busy_timeout = 5000"); + if (fileBacked) db.exec("PRAGMA synchronous = NORMAL"); + // Disable SQLite's automatic WAL checkpoint (default: every 1000 pages). + // Auto-checkpoint fires at unpredictable times — if the process is killed + // mid-checkpoint (e.g., OOM), the main DB is partially written with an + // empty WAL and cannot be recovered. Explicit checkpoints are issued at + // safe loop boundaries instead (post-unit finalize, close). + if (fileBacked) db.exec("PRAGMA wal_autocheckpoint=0"); + if (fileBacked) db.exec("PRAGMA auto_vacuum = INCREMENTAL"); + if (fileBacked) db.exec("PRAGMA cache_size = -8000"); // 8 MB page cache + if (fileBacked && process.platform !== "darwin") + db.exec("PRAGMA mmap_size = 67108864"); // 64 MB mmap + db.exec("PRAGMA temp_store = MEMORY"); + db.exec("PRAGMA foreign_keys = ON"); + db.exec("BEGIN"); + try { + db.exec(` + CREATE TABLE IF NOT EXISTS schema_version ( + version INTEGER NOT NULL, + applied_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS decisions ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + id TEXT NOT NULL UNIQUE, + when_context TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + decision TEXT NOT NULL DEFAULT '', + choice TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + revisable TEXT NOT NULL DEFAULT '', + made_by TEXT NOT NULL DEFAULT 'agent', + superseded_by TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS requirements ( + id TEXT PRIMARY KEY, + class TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT '', + description TEXT NOT NULL DEFAULT '', + why TEXT NOT NULL DEFAULT '', + source TEXT NOT NULL DEFAULT '', + primary_owner TEXT NOT NULL DEFAULT '', + supporting_slices TEXT NOT NULL DEFAULT '', + validation TEXT NOT NULL DEFAULT '', + notes TEXT NOT NULL DEFAULT '', + full_content TEXT NOT NULL DEFAULT '', + superseded_by TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS artifacts ( + path TEXT PRIMARY KEY, + artifact_type TEXT NOT NULL DEFAULT '', + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + full_content TEXT NOT NULL DEFAULT '', + imported_at TEXT NOT NULL DEFAULT '' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memories ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + id TEXT NOT NULL UNIQUE, + category TEXT NOT NULL, + content TEXT NOT NULL, + confidence REAL NOT NULL DEFAULT 0.8, + source_unit_type TEXT, + source_unit_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + superseded_by TEXT DEFAULT NULL, + hit_count INTEGER NOT NULL DEFAULT 0, + tags TEXT NOT NULL DEFAULT '[]' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_processed_units ( + unit_key TEXT PRIMARY KEY, + activity_file TEXT, + processed_at TEXT NOT NULL + ) + `); + // memory_embeddings, memory_relations, memory_sources used to be referenced + // by helper functions and queries (memory-embeddings.ts, memory-relations.ts, + // memory-ingest.ts) without a corresponding CREATE TABLE — any actual write + // would have failed with "no such table". Creating them as IF NOT EXISTS so + // existing DBs that somehow have them survive, and fresh DBs work. + db.exec(` + CREATE TABLE IF NOT EXISTS memory_embeddings ( + memory_id TEXT PRIMARY KEY, + model TEXT NOT NULL, + dim INTEGER NOT NULL, + vector BLOB NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_relations ( + from_id TEXT NOT NULL, + to_id TEXT NOT NULL, + rel TEXT NOT NULL, + confidence REAL NOT NULL DEFAULT 0.8, + created_at TEXT NOT NULL, + PRIMARY KEY (from_id, to_id, rel), + FOREIGN KEY (from_id) REFERENCES memories(id) ON DELETE CASCADE, + FOREIGN KEY (to_id) REFERENCES memories(id) ON DELETE CASCADE + ) + `); + // PK covers from_id as leading column already; reverse lookups + // (memory-relations.ts queries WHERE to_id = ?) need their own index + // to avoid a full table scan as the relation count grows. + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memory_relations_to ON memory_relations(to_id)", + ); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_sources ( + id TEXT PRIMARY KEY, + kind TEXT NOT NULL, + uri TEXT, + title TEXT, + content TEXT NOT NULL, + content_hash TEXT NOT NULL, + imported_at TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'project', + tags TEXT NOT NULL DEFAULT '[]' + ) + `); + // content_hash is queried on every insert for deduplication; without an + // index the lookup becomes a full table scan as ingestion volume grows. + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)", + ); + // Category GROUP BY queries (e.g. /memory stats) need a covering + // index that filters active memories and groups by category. + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)", + ); + db.exec(` + CREATE TABLE IF NOT EXISTS judgments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + unit_id TEXT NOT NULL, + decision TEXT NOT NULL DEFAULT '', + alternatives_json TEXT NOT NULL DEFAULT '[]', + reasoning TEXT NOT NULL DEFAULT '', + confidence TEXT NOT NULL DEFAULT 'medium', + ts TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)", + ); + db.exec(` + CREATE TABLE IF NOT EXISTS milestones ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'active', + depends_on TEXT NOT NULL DEFAULT '[]', + created_at TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + vision TEXT NOT NULL DEFAULT '', + success_criteria TEXT NOT NULL DEFAULT '[]', + key_risks TEXT NOT NULL DEFAULT '[]', + proof_strategy TEXT NOT NULL DEFAULT '[]', + verification_contract TEXT NOT NULL DEFAULT '', + verification_integration TEXT NOT NULL DEFAULT '', + verification_operational TEXT NOT NULL DEFAULT '', + verification_uat TEXT NOT NULL DEFAULT '', + definition_of_done TEXT NOT NULL DEFAULT '[]', + requirement_coverage TEXT NOT NULL DEFAULT '', + boundary_map_markdown TEXT NOT NULL DEFAULT '', + vision_meeting_json TEXT NOT NULL DEFAULT '', + product_research_json TEXT NOT NULL DEFAULT '', + sequence INTEGER DEFAULT 0 + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS slices ( + milestone_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + risk TEXT NOT NULL DEFAULT 'medium', + depends TEXT NOT NULL DEFAULT '[]', + demo TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + full_summary_md TEXT NOT NULL DEFAULT '', + full_uat_md TEXT NOT NULL DEFAULT '', + goal TEXT NOT NULL DEFAULT '', + success_criteria TEXT NOT NULL DEFAULT '', + proof_level TEXT NOT NULL DEFAULT '', + integration_closure TEXT NOT NULL DEFAULT '', + observability_impact TEXT NOT NULL DEFAULT '', + adversarial_partner TEXT NOT NULL DEFAULT '', + adversarial_combatant TEXT NOT NULL DEFAULT '', + adversarial_architect TEXT NOT NULL DEFAULT '', + planning_meeting_json TEXT NOT NULL DEFAULT '', + sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order + replan_triggered_at TEXT DEFAULT NULL, + is_sketch INTEGER NOT NULL DEFAULT 0, -- SF ADR-011: 1 = slice is a sketch awaiting refine-slice + sketch_scope TEXT NOT NULL DEFAULT '', -- SF ADR-011: 2-3 sentence scope hint from plan-milestone + PRIMARY KEY (milestone_id, id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS tasks ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + one_liner TEXT NOT NULL DEFAULT '', + narrative TEXT NOT NULL DEFAULT '', + verification_result TEXT NOT NULL DEFAULT '', + duration TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + blocker_discovered INTEGER DEFAULT 0, + deviations TEXT NOT NULL DEFAULT '', + known_issues TEXT NOT NULL DEFAULT '', + key_files TEXT NOT NULL DEFAULT '[]', + key_decisions TEXT NOT NULL DEFAULT '[]', + full_summary_md TEXT NOT NULL DEFAULT '', + description TEXT NOT NULL DEFAULT '', + estimate TEXT NOT NULL DEFAULT '', + files TEXT NOT NULL DEFAULT '[]', + verify TEXT NOT NULL DEFAULT '', + inputs TEXT NOT NULL DEFAULT '[]', + expected_output TEXT NOT NULL DEFAULT '[]', + observability_impact TEXT NOT NULL DEFAULT '', + full_plan_md TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + verification_status TEXT NOT NULL DEFAULT '', + risk TEXT NOT NULL DEFAULT 'low', + mutation_scope TEXT NOT NULL DEFAULT 'isolated', + verification_type TEXT NOT NULL DEFAULT 'self-check', + plan_approval TEXT NOT NULL DEFAULT 'not-required', + task_status TEXT NOT NULL DEFAULT 'todo', + estimated_effort INTEGER DEFAULT NULL, + dependencies TEXT NOT NULL DEFAULT '[]', + blocks_parallel INTEGER NOT NULL DEFAULT 0, + requires_user_input INTEGER NOT NULL DEFAULT 0, + auto_retry INTEGER NOT NULL DEFAULT 1, + max_retries INTEGER NOT NULL DEFAULT 2, + sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order + escalation_pending INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): pause-on-escalation flag + escalation_awaiting_review INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): continueWithDefault=true marker (no pause) + escalation_override_applied INTEGER NOT NULL DEFAULT 0, -- SF ADR-011 P2: 1 once carry-forward injected into a downstream prompt + escalation_artifact_path TEXT DEFAULT NULL, -- ADR-011 P2 (SF): path to T##-ESCALATION.json + PRIMARY KEY (milestone_id, slice_id, id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + ensureTaskSchedulerTable(db); + if (columnExists(db, "tasks", "escalation_pending")) { + db.exec(` + CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending) + `); + } + db.exec(` + CREATE TABLE IF NOT EXISTS verification_evidence ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL DEFAULT '', + slice_id TEXT NOT NULL DEFAULT '', + milestone_id TEXT NOT NULL DEFAULT '', + command TEXT NOT NULL DEFAULT '', + exit_code INTEGER DEFAULT 0, + verdict TEXT NOT NULL DEFAULT '', + duration_ms INTEGER DEFAULT 0, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS replan_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + summary TEXT NOT NULL DEFAULT '', + previous_artifact_path TEXT DEFAULT NULL, + replacement_artifact_path TEXT DEFAULT NULL, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS assessments ( + path TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + status TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + full_content TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS quality_gates ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + gate_id TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'slice', + task_id TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + evaluated_at TEXT DEFAULT NULL, + PRIMARY KEY (milestone_id, slice_id, gate_id, task_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + // Slice dependency junction table (v14) + db.exec(` + CREATE TABLE IF NOT EXISTS slice_dependencies ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + depends_on_slice_id TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), + FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS gate_circuit_breakers ( + gate_id TEXT PRIMARY KEY, + state TEXT NOT NULL DEFAULT 'closed', + failure_streak INTEGER NOT NULL DEFAULT 0, + last_failure_at TEXT DEFAULT NULL, + opened_at TEXT DEFAULT NULL, + half_open_attempts INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL DEFAULT '' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS audit_turn_index ( + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + first_ts TEXT NOT NULL, + last_ts TEXT NOT NULL, + event_count INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY (trace_id, turn_id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS llm_task_outcomes ( + model_id TEXT NOT NULL, + provider TEXT NOT NULL, + unit_type TEXT NOT NULL, + unit_id TEXT NOT NULL, + succeeded INTEGER NOT NULL DEFAULT 0, + retries INTEGER NOT NULL DEFAULT 0, + escalated INTEGER NOT NULL DEFAULT 0, + verification_passed INTEGER DEFAULT NULL, + blocker_discovered INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER DEFAULT NULL, + tokens_total INTEGER DEFAULT NULL, + cost_usd REAL DEFAULT NULL, + recorded_at INTEGER NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS uok_runs ( + run_id TEXT PRIMARY KEY, + session_id TEXT DEFAULT NULL, + path TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'started', + started_at TEXT NOT NULL, + ended_at TEXT DEFAULT NULL, + error TEXT DEFAULT NULL, + flags_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL + ) + `); + ensureSelfFeedbackTables(db); + ensureSolverEvalTables(db); + ensureRetrievalEvidenceTables(db); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)", + ); + // v13 indexes — hot-path dispatch queries + db.exec( + "CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)", + ); + ensureVerificationEvidenceDedupIndex(db); + // v14 index — slice dependency lookups + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)", + ); + db.exec( + "CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)", + ); + ensureRepoProfileTables(db); + ensureBacklogTables(db); + ensureScheduleTables(db); + ensureSolverEvalTables(db); + ensureHeadlessRunTables(db); + ensureSessionTables(db); + ensureSessionSnapshotTable(db); + ensureUokMessageTables(db); + ensureDeployTables(db); + ensureSleeptimeQueueTable(db); + ensureSpecSchemaTables(db); + ensureTaskFrontmatterColumns(db); + ensureRetrievalEvidenceTables(db); + ensureTriageTables(db); + ensureRuntimeCounterTable(db); + ensureValidationAttentionMarkersTable(db); + db.exec( + `CREATE VIEW IF NOT EXISTS active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL`, + ); + db.exec( + `CREATE VIEW IF NOT EXISTS active_requirements AS SELECT * FROM requirements WHERE superseded_by IS NULL`, + ); + db.exec( + `CREATE VIEW IF NOT EXISTS active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL`, + ); + db.exec( + `CREATE VIEW IF NOT EXISTS active_tasks AS SELECT * FROM tasks WHERE status NOT IN ('done','complete','completed','cancelled')`, + ); + db.exec(` + CREATE VIEW IF NOT EXISTS v_task_full AS + SELECT t.*, ts.spec_version, ts.verify AS spec_verify, + ts.inputs AS spec_inputs, ts.expected_output AS spec_expected_output + FROM tasks t + LEFT JOIN task_specs ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + `); + const existing = db + .prepare("SELECT count(*) as cnt FROM schema_version") + .get(); + if (existing && existing["cnt"] === 0) { + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": SCHEMA_VERSION, + ":applied_at": new Date().toISOString(), + }); + } + db.exec("COMMIT"); + } catch (err) { + db.exec("ROLLBACK"); + throw err; + } + migrateSchema(db); +} +function columnExists(db: DbAdapter, table: string, column: string): boolean { + const rows = db.prepare(`PRAGMA table_info(${table})`).all(); + return rows.some((row) => row["name"] === column); +} +function tableExists(db: DbAdapter, table: string): boolean { + const row = db + .prepare( + `SELECT name FROM sqlite_master WHERE type='table' AND name=?`, + ) + .get(table); + return row != null; +} +function ensureColumn(db: DbAdapter, table: string, column: string, ddl: string): void { + if (!columnExists(db, table, column)) db.exec(ddl); +} +function hasPlanningPayload(planning: Record = {}): boolean { + return ( + Boolean(planning.vision) || + ((planning.successCriteria as unknown[])?.length ?? 0) > 0 || + ((planning.keyRisks as unknown[])?.length ?? 0) > 0 || + ((planning.proofStrategy as unknown[])?.length ?? 0) > 0 || + Boolean(planning.verificationContract) || + Boolean(planning.verificationIntegration) || + Boolean(planning.verificationOperational) || + Boolean(planning.verificationUat) || + ((planning.definitionOfDone as unknown[])?.length ?? 0) > 0 || + Boolean(planning.requirementCoverage) || + Boolean(planning.boundaryMapMarkdown) || + Boolean(planning.visionMeeting) || + Boolean(planning.productResearch) + ); +} +function parseJsonOrFallback(raw: unknown, fallback: unknown): unknown { + if (typeof raw !== "string" || raw.trim().length === 0) return fallback; + try { + return JSON.parse(raw); + } catch { + return fallback; + } +} +function isEmptyMilestoneSpec(row: Record): boolean { + if (!row) return true; + return ( + (row["vision"] ?? "") === "" && + (parseJsonOrFallback(row["success_criteria"], []) as unknown[]).length === 0 && + (parseJsonOrFallback(row["key_risks"], []) as unknown[]).length === 0 && + (parseJsonOrFallback(row["proof_strategy"], []) as unknown[]).length === 0 && + (row["verification_contract"] ?? "") === "" && + (row["verification_integration"] ?? "") === "" && + (row["verification_operational"] ?? "") === "" && + (row["verification_uat"] ?? "") === "" && + (parseJsonOrFallback(row["definition_of_done"], []) as unknown[]).length === 0 && + (row["requirement_coverage"] ?? "") === "" && + (row["boundary_map_markdown"] ?? "") === "" && + (row["vision_meeting_json"] ?? "") === "" && + (row["product_research_json"] ?? "") === "" + ); +} +function ensureTaskCreatedAtColumn(db: DbAdapter) { + ensureColumn( + db, + "tasks", + "created_at", + `ALTER TABLE tasks ADD COLUMN created_at TEXT NOT NULL DEFAULT ''`, + ); +} +function ensureTaskFrontmatterColumns(db: DbAdapter) { + ensureColumn( + db, + "tasks", + "risk", + `ALTER TABLE tasks ADD COLUMN risk TEXT NOT NULL DEFAULT 'low'`, + ); + ensureColumn( + db, + "tasks", + "mutation_scope", + `ALTER TABLE tasks ADD COLUMN mutation_scope TEXT NOT NULL DEFAULT 'isolated'`, + ); + ensureColumn( + db, + "tasks", + "verification_type", + `ALTER TABLE tasks ADD COLUMN verification_type TEXT NOT NULL DEFAULT 'self-check'`, + ); + ensureColumn( + db, + "tasks", + "plan_approval", + `ALTER TABLE tasks ADD COLUMN plan_approval TEXT NOT NULL DEFAULT 'not-required'`, + ); + ensureColumn( + db, + "tasks", + "task_status", + `ALTER TABLE tasks ADD COLUMN task_status TEXT NOT NULL DEFAULT 'todo'`, + ); + ensureColumn( + db, + "tasks", + "estimated_effort", + `ALTER TABLE tasks ADD COLUMN estimated_effort INTEGER DEFAULT NULL`, + ); + ensureColumn( + db, + "tasks", + "dependencies", + `ALTER TABLE tasks ADD COLUMN dependencies TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "blocks_parallel", + `ALTER TABLE tasks ADD COLUMN blocks_parallel INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "requires_user_input", + `ALTER TABLE tasks ADD COLUMN requires_user_input INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "auto_retry", + `ALTER TABLE tasks ADD COLUMN auto_retry INTEGER NOT NULL DEFAULT 1`, + ); + ensureColumn( + db, + "tasks", + "max_retries", + `ALTER TABLE tasks ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 2`, + ); + for (const table of ["task_specs"]) { + ensureColumn( + db, + table, + "risk", + `ALTER TABLE ${table} ADD COLUMN risk TEXT NOT NULL DEFAULT 'low'`, + ); + ensureColumn( + db, + table, + "mutation_scope", + `ALTER TABLE ${table} ADD COLUMN mutation_scope TEXT NOT NULL DEFAULT 'isolated'`, + ); + ensureColumn( + db, + table, + "verification_type", + `ALTER TABLE ${table} ADD COLUMN verification_type TEXT NOT NULL DEFAULT 'self-check'`, + ); + ensureColumn( + db, + table, + "plan_approval", + `ALTER TABLE ${table} ADD COLUMN plan_approval TEXT NOT NULL DEFAULT 'not-required'`, + ); + ensureColumn( + db, + table, + "estimated_effort", + `ALTER TABLE ${table} ADD COLUMN estimated_effort INTEGER DEFAULT NULL`, + ); + ensureColumn( + db, + table, + "dependencies", + `ALTER TABLE ${table} ADD COLUMN dependencies TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + table, + "blocks_parallel", + `ALTER TABLE ${table} ADD COLUMN blocks_parallel INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + table, + "requires_user_input", + `ALTER TABLE ${table} ADD COLUMN requires_user_input INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + table, + "auto_retry", + `ALTER TABLE ${table} ADD COLUMN auto_retry INTEGER NOT NULL DEFAULT 1`, + ); + ensureColumn( + db, + table, + "max_retries", + `ALTER TABLE ${table} ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 2`, + ); + } +} +function ensureTaskSchedulerTable(db: DbAdapter) { + db.exec(` + CREATE TABLE IF NOT EXISTS task_scheduler ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + task_id TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'queued', + due_at TEXT DEFAULT NULL, + claimed_by TEXT DEFAULT NULL, + dispatched_at TEXT DEFAULT NULL, + consumed_at TEXT DEFAULT NULL, + expires_at TEXT DEFAULT NULL, + updated_at TEXT NOT NULL DEFAULT '', + PRIMARY KEY (milestone_id, slice_id, task_id), + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_task_scheduler_status + ON task_scheduler(status, due_at) + `); +} +function migrateCostUsdToMicroUsd(db: DbAdapter): void { + // Tier 2.7: Migrate cost_usd REAL to cost_micro_usd INTEGER + // Converts floating-point USD values to integer micro-USD (multiply by 1,000,000) + // Benefits: eliminates float drift on accumulated costs, easier reasoning about totals + // Purpose: Enable accurate cost tracking at scale without rounding errors + // Consumer: gate_runs cost tracking, cost analytics, budget checks + + // Guard: gate_runs may not exist in minimal legacy DBs (it will be dropped in v58) + if (!tableExists(db, "gate_runs")) return; + + // Add cost_micro_usd column if it doesn't exist + if (!columnExists(db, "gate_runs", "cost_micro_usd")) { + db.exec( + `ALTER TABLE gate_runs ADD COLUMN cost_micro_usd INTEGER DEFAULT NULL`, + ); + } + + // Migrate data: convert cost_usd to cost_micro_usd + // NULL values stay NULL; non-NULL values are multiplied by 1,000,000 + if (columnExists(db, "gate_runs", "cost_usd")) { + db.prepare(` + UPDATE gate_runs + SET cost_micro_usd = CAST(ROUND(cost_usd * 1000000) AS INTEGER) + WHERE cost_usd IS NOT NULL + AND cost_micro_usd IS NULL + `).run(); + } + + // Drop old cost_usd column (SQLite ALTER TABLE DROP is only available in 3.35.0+) + // For safety, we keep the old column as deprecated but unused + // Future: drop after confirming all queries use cost_micro_usd +} +function populateSpecTablesFromExisting(db: DbAdapter): void { + // Tier 1.3 Phase 2: Migrate existing spec data to new spec tables + // This populates milestone_specs, slice_specs, task_specs from existing columns + // Evidence tables are left empty; they populate as tools create new evidence. + + const now = new Date().toISOString(); + + // Migrate milestone specs + db.prepare(` + INSERT OR IGNORE INTO milestone_specs ( + id, vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, + spec_version, created_at + ) + SELECT + id, vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, '', + 1, COALESCE(created_at, ?) + FROM milestones + WHERE id NOT IN (SELECT id FROM milestone_specs) + `).run(now); + + // Migrate slice specs + db.prepare(` + INSERT OR IGNORE INTO slice_specs ( + milestone_id, slice_id, goal, success_criteria, proof_level, + integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, + planning_meeting_json, spec_version, created_at + ) + SELECT + milestone_id, id, goal, success_criteria, proof_level, + integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, + planning_meeting_json, 1, COALESCE(created_at, ?) + FROM slices + WHERE (milestone_id, id) NOT IN (SELECT milestone_id, slice_id FROM slice_specs) + `).run(now); + + // Migrate task specs + db.prepare(` + INSERT OR IGNORE INTO task_specs ( + milestone_id, slice_id, task_id, verify, inputs, expected_output, + spec_version, created_at + ) + SELECT + milestone_id, slice_id, id, verify, inputs, expected_output, + 1, COALESCE(created_at, ?) + FROM tasks + WHERE (milestone_id, slice_id, id) NOT IN (SELECT milestone_id, slice_id, task_id FROM task_specs) + `).run(now); +} +function migrateSchema(db: DbAdapter): void { + const row = withQueryTimeout( + () => db.prepare("SELECT MAX(version) as v FROM schema_version").get(), + null, + ); + const currentVersion: number = row ? (row["v"] as number) : 0; + if (currentVersion >= SCHEMA_VERSION) return; + // Backup database before migration so a mid-migration crash doesn't + // leave a partially-migrated DB with no recovery path. + // WAL-safe: checkpoint first to flush WAL into the main DB file, then copy. + if (currentPath && currentPath !== ":memory:" && existsSync(currentPath)) { + try { + const backupPath = `${currentPath}.backup-v${currentVersion}`; + if (!existsSync(backupPath)) { + // Flush WAL to main DB file before copying — without this, the backup + // may be missing committed data that only exists in the -wal file. + try { + db.exec("PRAGMA wal_checkpoint(TRUNCATE)"); + } catch { + /* checkpoint is best-effort */ + } + copyFileSync(currentPath, backupPath); + } + } catch (backupErr) { + // Log but proceed — blocking migration leaves the DB stuck at an old + // schema version permanently on read-only or full filesystems. + logWarning( + "db", + `Pre-migration backup failed: ${backupErr instanceof Error ? backupErr.message : String(backupErr)}`, + ); + } + } + db.exec("BEGIN"); + try { + if (currentVersion < 2) { + db.exec(` + CREATE TABLE IF NOT EXISTS artifacts ( + path TEXT PRIMARY KEY, + artifact_type TEXT NOT NULL DEFAULT '', + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + full_content TEXT NOT NULL DEFAULT '', + imported_at TEXT NOT NULL DEFAULT '' + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 2, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 3) { + db.exec(` + CREATE TABLE IF NOT EXISTS memories ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + id TEXT NOT NULL UNIQUE, + category TEXT NOT NULL, + content TEXT NOT NULL, + confidence REAL NOT NULL DEFAULT 0.8, + source_unit_type TEXT, + source_unit_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + superseded_by TEXT DEFAULT NULL, + hit_count INTEGER NOT NULL DEFAULT 0 + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_processed_units ( + unit_key TEXT PRIMARY KEY, + activity_file TEXT, + processed_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", + ); + db.exec("DROP VIEW IF EXISTS active_memories"); + db.exec( + "CREATE VIEW active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 3, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 4) { + ensureColumn( + db, + "decisions", + "made_by", + `ALTER TABLE decisions ADD COLUMN made_by TEXT NOT NULL DEFAULT 'agent'`, + ); + db.exec("DROP VIEW IF EXISTS active_decisions"); + db.exec( + "CREATE VIEW active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 4, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 5) { + db.exec(` + CREATE TABLE IF NOT EXISTS milestones ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'active', + created_at TEXT NOT NULL, + completed_at TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS slices ( + milestone_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + risk TEXT NOT NULL DEFAULT 'medium', + created_at TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + PRIMARY KEY (milestone_id, id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS tasks ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + one_liner TEXT NOT NULL DEFAULT '', + narrative TEXT NOT NULL DEFAULT '', + verification_result TEXT NOT NULL DEFAULT '', + duration TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + blocker_discovered INTEGER DEFAULT 0, + deviations TEXT NOT NULL DEFAULT '', + known_issues TEXT NOT NULL DEFAULT '', + key_files TEXT NOT NULL DEFAULT '[]', + key_decisions TEXT NOT NULL DEFAULT '[]', + full_summary_md TEXT NOT NULL DEFAULT '', + PRIMARY KEY (milestone_id, slice_id, id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS verification_evidence ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL DEFAULT '', + slice_id TEXT NOT NULL DEFAULT '', + milestone_id TEXT NOT NULL DEFAULT '', + command TEXT NOT NULL DEFAULT '', + exit_code INTEGER DEFAULT 0, + verdict TEXT NOT NULL DEFAULT '', + duration_ms INTEGER DEFAULT 0, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 5, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 6) { + ensureColumn( + db, + "slices", + "full_summary_md", + `ALTER TABLE slices ADD COLUMN full_summary_md TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "full_uat_md", + `ALTER TABLE slices ADD COLUMN full_uat_md TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 6, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 7) { + ensureColumn( + db, + "slices", + "depends", + `ALTER TABLE slices ADD COLUMN depends TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "slices", + "demo", + `ALTER TABLE slices ADD COLUMN demo TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "depends_on", + `ALTER TABLE milestones ADD COLUMN depends_on TEXT NOT NULL DEFAULT '[]'`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 7, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 8) { + ensureColumn( + db, + "milestones", + "vision", + `ALTER TABLE milestones ADD COLUMN vision TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "success_criteria", + `ALTER TABLE milestones ADD COLUMN success_criteria TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "key_risks", + `ALTER TABLE milestones ADD COLUMN key_risks TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "proof_strategy", + `ALTER TABLE milestones ADD COLUMN proof_strategy TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "verification_contract", + `ALTER TABLE milestones ADD COLUMN verification_contract TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "verification_integration", + `ALTER TABLE milestones ADD COLUMN verification_integration TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "verification_operational", + `ALTER TABLE milestones ADD COLUMN verification_operational TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "verification_uat", + `ALTER TABLE milestones ADD COLUMN verification_uat TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "definition_of_done", + `ALTER TABLE milestones ADD COLUMN definition_of_done TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "requirement_coverage", + `ALTER TABLE milestones ADD COLUMN requirement_coverage TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "boundary_map_markdown", + `ALTER TABLE milestones ADD COLUMN boundary_map_markdown TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "goal", + `ALTER TABLE slices ADD COLUMN goal TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "success_criteria", + `ALTER TABLE slices ADD COLUMN success_criteria TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "proof_level", + `ALTER TABLE slices ADD COLUMN proof_level TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "integration_closure", + `ALTER TABLE slices ADD COLUMN integration_closure TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "observability_impact", + `ALTER TABLE slices ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "uat_verdict", + `ALTER TABLE slices ADD COLUMN uat_verdict TEXT DEFAULT NULL`, + ); + ensureColumn( + db, + "tasks", + "description", + `ALTER TABLE tasks ADD COLUMN description TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "tasks", + "estimate", + `ALTER TABLE tasks ADD COLUMN estimate TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "tasks", + "files", + `ALTER TABLE tasks ADD COLUMN files TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "verify", + `ALTER TABLE tasks ADD COLUMN verify TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "tasks", + "inputs", + `ALTER TABLE tasks ADD COLUMN inputs TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "expected_output", + `ALTER TABLE tasks ADD COLUMN expected_output TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "observability_impact", + `ALTER TABLE tasks ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`, + ); + db.exec(` + CREATE TABLE IF NOT EXISTS replan_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + summary TEXT NOT NULL DEFAULT '', + previous_artifact_path TEXT DEFAULT NULL, + replacement_artifact_path TEXT DEFAULT NULL, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS assessments ( + path TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + status TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + full_content TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 8, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 9) { + ensureColumn( + db, + "slices", + "sequence", + `ALTER TABLE slices ADD COLUMN sequence INTEGER DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "sequence", + `ALTER TABLE tasks ADD COLUMN sequence INTEGER DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 9, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 10) { + ensureColumn( + db, + "slices", + "replan_triggered_at", + `ALTER TABLE slices ADD COLUMN replan_triggered_at TEXT DEFAULT NULL`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 10, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 11) { + ensureColumn( + db, + "tasks", + "full_plan_md", + `ALTER TABLE tasks ADD COLUMN full_plan_md TEXT NOT NULL DEFAULT ''`, + ); + // Add unique constraint to replan_history for idempotency: + // one replan record per blocker task per slice per milestone. + db.exec(` + CREATE UNIQUE INDEX IF NOT EXISTS idx_replan_history_unique + ON replan_history(milestone_id, slice_id, task_id) + WHERE slice_id IS NOT NULL AND task_id IS NOT NULL + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 11, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 12) { + db.exec(` + CREATE TABLE IF NOT EXISTS quality_gates ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + gate_id TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'slice', + task_id TEXT DEFAULT NULL, + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + evaluated_at TEXT DEFAULT NULL, + PRIMARY KEY (milestone_id, slice_id, gate_id, COALESCE(task_id, '')), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 12, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 13) { + // Hot-path indexes for auto-loop dispatch queries + db.exec( + "CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)", + ); + ensureVerificationEvidenceDedupIndex(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 13, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 14) { + db.exec(` + CREATE TABLE IF NOT EXISTS slice_dependencies ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + depends_on_slice_id TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), + FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 14, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 15) { + db.exec(` + CREATE TABLE IF NOT EXISTS gate_runs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + gate_id TEXT NOT NULL, + gate_type TEXT NOT NULL DEFAULT '', + unit_type TEXT DEFAULT NULL, + unit_id TEXT DEFAULT NULL, + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + outcome TEXT NOT NULL DEFAULT 'pass', + failure_class TEXT NOT NULL DEFAULT 'none', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + attempt INTEGER NOT NULL DEFAULT 1, + max_attempts INTEGER NOT NULL DEFAULT 1, + retryable INTEGER NOT NULL DEFAULT 0, + evaluated_at TEXT NOT NULL DEFAULT '', + duration_ms INTEGER DEFAULT NULL, + cost_micro_usd INTEGER DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS turn_git_transactions ( + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + unit_type TEXT DEFAULT NULL, + unit_id TEXT DEFAULT NULL, + stage TEXT NOT NULL DEFAULT 'turn-start', + action TEXT NOT NULL DEFAULT 'status-only', + push INTEGER NOT NULL DEFAULT 0, + status TEXT NOT NULL DEFAULT 'ok', + error TEXT DEFAULT NULL, + metadata_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL DEFAULT '', + PRIMARY KEY (trace_id, turn_id, stage) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS audit_events ( + event_id TEXT PRIMARY KEY, + trace_id TEXT NOT NULL, + turn_id TEXT DEFAULT NULL, + caused_by TEXT DEFAULT NULL, + category TEXT NOT NULL, + type TEXT NOT NULL, + ts TEXT NOT NULL, + payload_json TEXT NOT NULL DEFAULT '{}' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS audit_turn_index ( + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + first_ts TEXT NOT NULL, + last_ts TEXT NOT NULL, + event_count INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY (trace_id, turn_id) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_gate_runs_turn ON gate_runs(trace_id, turn_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_gate_runs_lookup ON gate_runs(milestone_id, slice_id, task_id, gate_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_turn_git_tx_turn ON turn_git_transactions(trace_id, turn_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_audit_events_trace ON audit_events(trace_id, ts)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_audit_events_turn ON audit_events(trace_id, turn_id, ts)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 15, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 16) { + db.exec(` + CREATE TABLE IF NOT EXISTS llm_task_outcomes ( + model_id TEXT NOT NULL, + provider TEXT NOT NULL, + unit_type TEXT NOT NULL, + unit_id TEXT NOT NULL, + succeeded INTEGER NOT NULL DEFAULT 0, + retries INTEGER NOT NULL DEFAULT 0, + escalated INTEGER NOT NULL DEFAULT 0, + verification_passed INTEGER DEFAULT NULL, + blocker_discovered INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER DEFAULT NULL, + tokens_total INTEGER DEFAULT NULL, + cost_usd REAL DEFAULT NULL, + recorded_at INTEGER NOT NULL + ) + `); + db.exec( + "CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 16, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 17) { + ensureColumn( + db, + "tasks", + "verification_status", + `ALTER TABLE tasks ADD COLUMN verification_status TEXT NOT NULL DEFAULT ''`, + ); + // Backfill verification_status from existing verification_evidence rows so the + // prior-task guard works on databases upgraded mid-project (not just new ones). + db.exec(` + UPDATE tasks + SET verification_status = CASE + WHEN (SELECT COUNT(*) FROM verification_evidence ve + WHERE ve.milestone_id = tasks.milestone_id + AND ve.slice_id = tasks.slice_id + AND ve.task_id = tasks.id) = 0 + THEN '' + WHEN (SELECT COUNT(*) FROM verification_evidence ve + WHERE ve.milestone_id = tasks.milestone_id + AND ve.slice_id = tasks.slice_id + AND ve.task_id = tasks.id + AND ve.exit_code != 0) = 0 + THEN 'all_pass' + WHEN (SELECT COUNT(*) FROM verification_evidence ve + WHERE ve.milestone_id = tasks.milestone_id + AND ve.slice_id = tasks.slice_id + AND ve.task_id = tasks.id + AND ve.exit_code = 0) > 0 + THEN 'partial' + ELSE 'all_fail' + END + WHERE tasks.status IN ('complete', 'done') + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 17, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 18) { + ensureColumn( + db, + "slices", + "adversarial_partner", + `ALTER TABLE slices ADD COLUMN adversarial_partner TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "adversarial_combatant", + `ALTER TABLE slices ADD COLUMN adversarial_combatant TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "adversarial_architect", + `ALTER TABLE slices ADD COLUMN adversarial_architect TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 18, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 19) { + ensureColumn( + db, + "slices", + "planning_meeting_json", + `ALTER TABLE slices ADD COLUMN planning_meeting_json TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 19, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 20) { + ensureColumn( + db, + "milestones", + "vision_meeting_json", + `ALTER TABLE milestones ADD COLUMN vision_meeting_json TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 20, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 21) { + ensureRepoProfileTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 21, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 22) { + // SF ADR-011: progressive planning. is_sketch=1 means the slice is a 2-3 + // sentence sketch awaiting refine-slice expansion; refine fills in the + // real plan and clears the flag. sketch_scope holds the milestone + // planner's stored scope hint that refine treats as a hard boundary. + ensureColumn( + db, + "slices", + "is_sketch", + `ALTER TABLE slices ADD COLUMN is_sketch INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "slices", + "sketch_scope", + `ALTER TABLE slices ADD COLUMN sketch_scope TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 22, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 23) { + // ADR-011 Phase 2 (SF ADR): mid-execution escalation. escalation_pending=1 + // marks a task that paused for a user decision; escalation_artifact_path + // points to the T##-ESCALATION.json file containing options + recommendation. + // State derivation will emit phase='escalating-task' when any task in the + // active slice has escalation_pending=1; dispatch returns 'stop' so the + // loop never bypasses a pending decision. + ensureColumn( + db, + "tasks", + "escalation_pending", + `ALTER TABLE tasks ADD COLUMN escalation_pending INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "escalation_artifact_path", + `ALTER TABLE tasks ADD COLUMN escalation_artifact_path TEXT DEFAULT NULL`, + ); + try { + db.exec( + "CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending)", + ); + } catch { + /* index creation is opportunistic — fall through if backend lacks it */ + } + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 23, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 24) { + // ADR-011 P2 (SF ADR): the third escalation flag for the + // continueWithDefault=true case — an artifact is recorded for human + // review later, but the loop is NOT paused. Mutually exclusive with + // escalation_pending (the writer flips one or the other). + ensureColumn( + db, + "tasks", + "escalation_awaiting_review", + `ALTER TABLE tasks ADD COLUMN escalation_awaiting_review INTEGER NOT NULL DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 24, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 25) { + // SF ADR-011 P2 carry-forward: when an escalation is resolved, the user's + // choice should be visible to the next execute-task agent in the same + // slice. escalation_override_applied=0 marks "resolved but not yet + // injected into a downstream prompt"; the prompt builder calls + // claimEscalationOverride which atomically flips it to 1 (idempotent + // race-safe claim). Per-task granularity so multi-task slices can + // carry multiple resolved escalations forward independently. + ensureColumn( + db, + "tasks", + "escalation_override_applied", + `ALTER TABLE tasks ADD COLUMN escalation_override_applied INTEGER NOT NULL DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 25, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 26) { + db.exec(` + CREATE TABLE IF NOT EXISTS uok_runs ( + run_id TEXT PRIMARY KEY, + session_id TEXT DEFAULT NULL, + path TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'started', + started_at TEXT NOT NULL, + ended_at TEXT DEFAULT NULL, + error TEXT DEFAULT NULL, + flags_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 26, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 27) { + ensureSolverEvalTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 27, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 28) { + // UOK observability: gate execution latency + // Guard: gate_runs table may not exist in minimal legacy DBs (it will be dropped in v58) + if (tableExists(db, "gate_runs")) { + ensureColumn( + db, + "gate_runs", + "duration_ms", + "ALTER TABLE gate_runs ADD COLUMN duration_ms INTEGER DEFAULT NULL", + ); + } + // UOK circuit breaker state + db.exec(` + CREATE TABLE IF NOT EXISTS gate_circuit_breakers ( + gate_id TEXT PRIMARY KEY, + state TEXT NOT NULL DEFAULT 'closed', + failure_streak INTEGER NOT NULL DEFAULT 0, + last_failure_at TEXT DEFAULT NULL, + opened_at TEXT DEFAULT NULL, + half_open_attempts INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL DEFAULT '' + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 28, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 29) { + ensureHeadlessRunTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 29, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 30) { + ensureSelfFeedbackTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 30, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 31) { + ensureUokMessageTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 31, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 32) { + ensureTaskCreatedAtColumn(db); + ensureSpecSchemaTables(db); + // Populate spec tables from existing spec columns in runtime tables + populateSpecTablesFromExisting(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 32, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 33) { + ensureColumn( + db, + "milestones", + "sequence", + `ALTER TABLE milestones ADD COLUMN sequence INTEGER DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 33, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 34) { + ensureTaskCreatedAtColumn(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 34, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 35) { + ensureBacklogTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 35, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 36) { + migrateCostUsdToMicroUsd(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 36, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 37) { + ensureScheduleTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 37, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 38) { + try { + db.exec( + "ALTER TABLE memories ADD COLUMN tags TEXT NOT NULL DEFAULT '[]'", + ); + } catch { + // Column may already exist on fresh DBs + } + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 38, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 39) { + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 39, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 40) { + db.exec(` + CREATE TABLE IF NOT EXISTS judgments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + unit_id TEXT NOT NULL, + decision TEXT NOT NULL DEFAULT '', + alternatives_json TEXT NOT NULL DEFAULT '[]', + reasoning TEXT NOT NULL DEFAULT '', + confidence TEXT NOT NULL DEFAULT 'medium', + ts TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 40, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 41) { + ensureRetrievalEvidenceTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 41, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 42) { + ensureColumn( + db, + "milestones", + "product_research_json", + `ALTER TABLE milestones ADD COLUMN product_research_json TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestone_specs", + "product_research_json", + `ALTER TABLE milestone_specs ADD COLUMN product_research_json TEXT DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 42, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 43) { + db.exec(` + CREATE TABLE IF NOT EXISTS session_mode_state ( + id INTEGER PRIMARY KEY CHECK (id = 1), + work_mode TEXT NOT NULL DEFAULT 'chat', + run_control TEXT NOT NULL DEFAULT 'manual', + permission_profile TEXT NOT NULL DEFAULT 'restricted', + model_mode TEXT NOT NULL DEFAULT 'smart', + surface TEXT NOT NULL DEFAULT 'tui', + updated_at TEXT NOT NULL DEFAULT '' + ) + `); + db.exec(` + INSERT OR IGNORE INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at) + VALUES (1, 'chat', 'manual', 'restricted', 'smart', 'tui', datetime('now')) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 43, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 44) { + ensureSpecSchemaTables(db); + ensureTaskFrontmatterColumns(db); + db.exec(` + UPDATE tasks + SET task_status = CASE status + WHEN 'complete' THEN 'done' + WHEN 'completed' THEN 'done' + WHEN 'done' THEN 'done' + WHEN 'running' THEN 'running' + WHEN 'in_progress' THEN 'running' + WHEN 'blocked' THEN 'blocked' + WHEN 'failed' THEN 'failed' + WHEN 'cancelled' THEN 'cancelled' + ELSE COALESCE(NULLIF(task_status, ''), 'todo') + END + `); + db.exec(` + UPDATE task_specs + SET risk = COALESCE((SELECT tasks.risk FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), risk), + mutation_scope = COALESCE((SELECT tasks.mutation_scope FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), mutation_scope), + verification_type = COALESCE((SELECT tasks.verification_type FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), verification_type), + plan_approval = COALESCE((SELECT tasks.plan_approval FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), plan_approval), + estimated_effort = COALESCE((SELECT tasks.estimated_effort FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), estimated_effort), + dependencies = COALESCE((SELECT tasks.dependencies FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), dependencies), + blocks_parallel = COALESCE((SELECT tasks.blocks_parallel FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), blocks_parallel), + requires_user_input = COALESCE((SELECT tasks.requires_user_input FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), requires_user_input), + auto_retry = COALESCE((SELECT tasks.auto_retry FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), auto_retry), + max_retries = COALESCE((SELECT tasks.max_retries FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), max_retries) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 44, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 45) { + ensureTaskSchedulerTable(db); + db.exec(` + INSERT OR IGNORE INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) + SELECT milestone_id, slice_id, id, 'queued', datetime('now') + FROM tasks + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 45, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 46) { + // validation_runs: mirrors droid's validation-contract.md + validation-state.json + // pattern. Each run stores the contract spec inline and its execution state. + db.exec(` + CREATE TABLE IF NOT EXISTS validation_runs ( + run_id TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL, + slice_id TEXT, + task_id TEXT, + contract TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + started_at TEXT, + completed_at TEXT, + created_at TEXT NOT NULL, + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_validation_runs_scope + ON validation_runs(milestone_id, slice_id, task_id) + `); + db.exec(` + CREATE VIEW IF NOT EXISTS latest_validation_state AS + SELECT vr.* + FROM validation_runs vr + WHERE vr.rowid = ( + SELECT MAX(v2.rowid) + FROM validation_runs v2 + WHERE v2.milestone_id = vr.milestone_id + AND v2.slice_id IS vr.slice_id + AND v2.task_id IS vr.task_id + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 46, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 47) { + // Drop unused superseded_by column from validation_runs. + // The column was never written or queried — dead schema from v46. + const cols = db + .prepare("PRAGMA table_info(validation_runs)") + .all() + .map((c) => c.name); + if (cols.includes("superseded_by")) { + db.exec("ALTER TABLE validation_runs DROP COLUMN superseded_by"); + } + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 47, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 48) { + // Session layer: create tables, backfill from existing headless_runs and + // audit_turn_index so historical data is queryable from day one. + // Message text will be NULL for backfilled turns — it was never stored. + ensureSessionTables(db); + // Backfill: one session per headless run. + db.exec(` + INSERT OR IGNORE INTO sessions (session_id, trace_id, mode, cwd, created_at, updated_at) + SELECT run_id, NULL, 'headless', '', created_at, updated_at + FROM headless_runs + `); + // Backfill: one session per distinct trace_id in audit_turn_index. + // Reconstruct created_at/updated_at from the min/max timestamps. + db.exec(` + INSERT OR IGNORE INTO sessions (session_id, trace_id, mode, cwd, created_at, updated_at) + SELECT trace_id, trace_id, 'interactive', + '', MIN(first_ts), MAX(last_ts) + FROM audit_turn_index + GROUP BY trace_id + `); + // Backfill: one turn row per (trace_id, turn_id) in audit_turn_index. + // turn_index derived from row order within trace; message text is NULL. + db.exec(` + INSERT OR IGNORE INTO turns (session_id, turn_index, user_message, assistant_response, ts) + SELECT + trace_id, + ROW_NUMBER() OVER (PARTITION BY trace_id ORDER BY first_ts) - 1, + NULL, NULL, + first_ts + FROM audit_turn_index + `); + // Rebuild FTS index from any turns that have text. + // None from backfill yet, but required so the FTS table is consistent. + db.exec(`INSERT INTO turns_fts(turns_fts) VALUES ('rebuild')`); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 48, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 49) { + // Add session_snapshots table — checkpoints before irreversible ops. + // Safe to call on fresh DBs too (CREATE TABLE IF NOT EXISTS). + ensureSessionSnapshotTable(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 49, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 50) { + // Add sleeptime_consolidation_queue — decouples memory consolidation + // from the conversation turn so the daemon can drain it asynchronously. + ensureSleeptimeQueueTable(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 50, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 51) { + // Add deploy/smoke/release/rollback tables — closes the vision→production loop. + // deploy_runs tracks each deployment attempt; smoke_results tracks live verification; + // release_records tracks version bumps and publishes; rollback_runs tracks reversions. + ensureDeployTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 51, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 52) { + // Add triage_runs/evals/items/skills, runtime_counters, and + // validation_attention_markers tables — migrate JSONL structured state to DB. + ensureTriageTables(db); + ensureRuntimeCounterTable(db); + ensureValidationAttentionMarkersTable(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 52, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 53) { + // Add routing_history and routing_feedback tables — migrate file-based + // routing history to DB-first storage. + db.exec(` + CREATE TABLE IF NOT EXISTS routing_history ( + pattern TEXT NOT NULL, + tier TEXT NOT NULL, + success_count INTEGER NOT NULL DEFAULT 0, + fail_count INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL, + PRIMARY KEY (pattern, tier) + ); + CREATE TABLE IF NOT EXISTS routing_feedback ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + pattern TEXT NOT NULL, + tier TEXT NOT NULL, + feedback TEXT NOT NULL, + recorded_at TEXT NOT NULL + ); + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 53, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 54) { + // Migrate metrics ledger from .sf/runtime/metrics.json to DB-first + // unit_metrics and project_metrics_meta tables. + db.exec(` + CREATE TABLE IF NOT EXISTS unit_metrics ( + type TEXT NOT NULL, + id TEXT NOT NULL, + started_at INTEGER NOT NULL, + finished_at INTEGER NOT NULL, + model TEXT NOT NULL, + auto_session_key TEXT, + tokens_input INTEGER NOT NULL DEFAULT 0, + tokens_output INTEGER NOT NULL DEFAULT 0, + tokens_cache_read INTEGER NOT NULL DEFAULT 0, + tokens_cache_write INTEGER NOT NULL DEFAULT 0, + tokens_total INTEGER NOT NULL DEFAULT 0, + cost REAL NOT NULL DEFAULT 0, + tool_calls INTEGER NOT NULL DEFAULT 0, + assistant_messages INTEGER NOT NULL DEFAULT 0, + user_messages INTEGER NOT NULL DEFAULT 0, + api_requests INTEGER NOT NULL DEFAULT 0, + tier TEXT, + model_downgraded INTEGER, + context_window_tokens INTEGER, + truncation_sections INTEGER, + continue_here_fired INTEGER, + prompt_char_count INTEGER, + baseline_char_count INTEGER, + cache_hit_rate INTEGER, + skills TEXT, + PRIMARY KEY (type, id, started_at) + ); + CREATE TABLE IF NOT EXISTS project_metrics_meta ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ); + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 54, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 55) { + // Schema v55: composite index for audit_events + task access-pattern views + // Guard: audit_events may not exist in minimal legacy DBs (it will be dropped in v58) + if (tableExists(db, "audit_events")) { + db.exec( + `CREATE INDEX IF NOT EXISTS idx_audit_events_category ON audit_events(category, type, ts DESC)`, + ); + } + db.exec( + `CREATE VIEW IF NOT EXISTS active_tasks AS SELECT * FROM tasks WHERE status NOT IN ('done','complete','completed','cancelled')`, + ); + db.exec(` + CREATE VIEW IF NOT EXISTS v_task_full AS + SELECT t.*, ts.spec_version, ts.verify AS spec_verify, + ts.inputs AS spec_inputs, ts.expected_output AS spec_expected_output + FROM tasks t + LEFT JOIN task_specs ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 55, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 56) { + // Schema v56: move metrics table to dedicated metrics.db — drop from main DB + // to eliminate WAL pressure from high-frequency telemetry writes. + db.exec(`DROP TABLE IF EXISTS metrics`); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 56, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 57) { + // Schema v57: add archived_at to sessions for soft-delete / archive support. + db.exec(`ALTER TABLE sessions ADD COLUMN archived_at TEXT DEFAULT NULL`); + db.exec( + `CREATE INDEX IF NOT EXISTS idx_sessions_archived ON sessions(archived_at) WHERE archived_at IS NOT NULL`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 57, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 58) { + // Schema v58: move trace data to JSONL files — drop gate_runs, turn_git_transactions, audit_events + db.exec("DROP TABLE IF EXISTS gate_runs"); + db.exec("DROP TABLE IF EXISTS turn_git_transactions"); + db.exec("DROP TABLE IF EXISTS audit_events"); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 58, + ":applied_at": new Date().toISOString(), + }); + } + db.exec("COMMIT"); + } catch (err) { + db.exec("ROLLBACK"); + throw err; + } +} +let currentDb: DbAdapter | null = null; +let currentPath: string | null = null; +let currentPid = 0; +let _exitHandlerRegistered = false; +let _dbOpenAttempted = false; +/** + * Get the name of the SQLite provider currently loaded (or null if unavailable). + */ +export function getDbProvider(): string { + loadProvider(); + return "node:sqlite"; +} +/** + * Check if the database is currently open and available for queries. + */ +export function isDbAvailable(): boolean { + return currentDb !== null; +} +/** + * Returns true if openDatabase() has been called at least once this session. + * Used to distinguish "DB not yet initialized" from "DB genuinely unavailable" + * so that early callers (e.g. before_agent_start context injection) don't + * trigger a false degraded-mode warning. + */ +export function wasDbOpenAttempted(): boolean { + return _dbOpenAttempted; +} +/** + * Get the current database adapter, or null if the database is not open. + */ +export function getDatabase(): DbAdapter | null { + return currentDb; +} +/** + * Open the database at the specified path. Returns true if successful. + */ +export function openDatabase(path: string): void { + _dbOpenAttempted = true; + if (currentDb && currentPath !== path) closeDatabase(); + if (currentDb && currentPath === path) return true; + const rawDb = openRawDb(path); + if (!rawDb) return false; + const adapter = createAdapter(rawDb); + const fileBacked = path !== ":memory:"; + try { + initSchema(adapter, fileBacked); + createDatabaseSnapshot(rawDb, path); + performDatabaseMaintenance(rawDb, path); + } catch (err) { + // Corrupt freelist: DDL fails with "malformed" but VACUUM can rebuild. + // Attempt VACUUM recovery before giving up (see #2519). + if ( + fileBacked && + err instanceof Error && + err.message?.includes("malformed") + ) { + try { + adapter.exec("VACUUM"); + initSchema(adapter, fileBacked); + process.stderr.write("sf-db: recovered corrupt database via VACUUM\n"); + } catch (retryErr) { + try { + adapter.close(); + } catch (e) { + logWarning("db", `close after VACUUM failed: ${(e as Error)?.message}`); + } + throw retryErr; + } + } else { + try { + adapter.close(); + } catch (e) { + logWarning("db", `close after VACUUM failed: ${(e as Error)?.message}`); + } + throw err; + } + } + currentDb = adapter; + currentPath = path; + currentPid = process.pid; + if (!_exitHandlerRegistered) { + _exitHandlerRegistered = true; + process.on("exit", () => { + try { + closeDatabase(); + } catch (e) { + logWarning("db", `exit handler close failed: ${(e as Error)?.message}`); + } + }); + } + return true; +} +/** + * Flush the WAL to the main DB file using a PASSIVE checkpoint. + * + * Purpose: safely persist all committed transactions to the main DB file at + * controlled loop boundaries (post-unit finalize). With wal_autocheckpoint=0, + * this is the only way WAL pages are flushed — keeping the checkpoint window + * predictable and crash-safe (no mid-operation checkpoint that an OOM kill + * could interrupt). + * + * PASSIVE is used (not TRUNCATE) so concurrent readers are not blocked. The + * WAL is truncated on close via closeDatabase(). + * + * Consumer: runFinalize() in auto/phases.js after each successful unit. + */ +export function checkpointWal(): void { + if (!currentDb) return; + try { + currentDb.exec("PRAGMA wal_checkpoint(PASSIVE)"); + } catch (e) { + logWarning( + "db", + `WAL checkpoint failed: ${e instanceof Error ? e.message : String(e)}`, + ); + } +} + +/** + * Close the database connection. + */ +export function closeDatabase(): void { + if (currentDb) { + try { + currentDb.exec("PRAGMA wal_checkpoint(TRUNCATE)"); + } catch (e) { + logWarning("db", `WAL checkpoint failed: ${(e as Error)?.message}`); + } + try { + // Incremental vacuum to reclaim space without blocking + currentDb.exec("PRAGMA incremental_vacuum(64)"); + } catch (e) { + logWarning("db", `incremental vacuum failed: ${(e as Error)?.message}`); + } + try { + currentDb.close(); + } catch (e) { + logWarning("db", `database close failed: ${(e as Error)?.message}`); + } + currentDb = null; + currentPath = null; + currentPid = 0; + _dbOpenAttempted = false; + } +} +/** Run a full VACUUM — call sparingly (e.g. after milestone completion). */ +/** + * Vacuum the database to reclaim disk space and optimize. + */ +export function vacuumDatabase(): void { + if (!currentDb) return; + try { + currentDb.exec("VACUUM"); + } catch (e) { + logWarning("db", `VACUUM failed: ${(e as Error)?.message}`); + } +} +let _txDepth = 0; +/** + * Execute a callback within a database transaction (BEGIN...COMMIT or ROLLBACK). + */ +export function transaction(fn: () => T): T { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + // Re-entrant: if already inside a transaction, just run fn() without + // starting a new one. SQLite does not support nested BEGIN/COMMIT. + if (_txDepth > 0) { + _txDepth++; + try { + return fn(); + } finally { + _txDepth--; + } + } + _txDepth++; + currentDb.exec("BEGIN"); + try { + const result = fn(); + currentDb.exec("COMMIT"); + return result; + } catch (err) { + currentDb.exec("ROLLBACK"); + throw err; + } finally { + _txDepth--; + } +} +/** + * Wrap a block of reads in a DEFERRED transaction so that all SELECTs observe + * a consistent snapshot of the DB even if a concurrent writer commits between + * them. Use this for multi-query read flows (e.g. tool executors that query + * milestone + slices + counts and want one snapshot). Re-entrant — if already + * inside a transaction, runs fn() without starting a nested one. + */ +/** + * Execute a callback within a read-only database transaction. + */ +export function readTransaction(fn: () => T): T { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + if (_txDepth > 0) { + _txDepth++; + try { + return fn(); + } finally { + _txDepth--; + } + } + _txDepth++; + currentDb.exec("BEGIN DEFERRED"); + try { + const result = fn(); + currentDb.exec("COMMIT"); + return result; + } catch (err) { + try { + currentDb.exec("ROLLBACK"); + } catch (rollbackErr) { + // A failed ROLLBACK after a failed read is a split-brain signal — + // the transaction is in an indeterminate state. Surface it via the + // logger instead of swallowing it. + logError("db", "snapshotState ROLLBACK failed", { + error: rollbackErr.message, + }); + } + throw err; + } finally { + _txDepth--; + } +} +export function insertDecision(d: DecisionInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) + VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by)`) + .run({ + ":id": d.id, + ":when_context": d.when_context, + ":scope": d.scope, + ":decision": d.decision, + ":choice": d.choice, + ":rationale": d.rationale, + ":revisable": d.revisable, + ":made_by": d.made_by ?? "agent", + ":superseded_by": d.superseded_by, + }); +} +export function getDecisionById(id: string): DbRow | undefined { + if (!currentDb) return undefined; + const row = currentDb.prepare("SELECT * FROM decisions WHERE id = ?").get(id); + if (!row) return undefined; + return { + seq: row["seq"], + id: row["id"], + when_context: row["when_context"], + scope: row["scope"], + decision: row["decision"], + choice: row["choice"], + rationale: row["rationale"], + revisable: row["revisable"], + made_by: row["made_by"] ?? "agent", + superseded_by: row["superseded_by"] ?? null, + }; +} +export function getActiveDecisions(): DbRow[] { + if (!currentDb) return []; + const rows = currentDb.prepare("SELECT * FROM active_decisions").all(); + return rows.map((row) => ({ + seq: row["seq"], + id: row["id"], + when_context: row["when_context"], + scope: row["scope"], + decision: row["decision"], + choice: row["choice"], + rationale: row["rationale"], + revisable: row["revisable"], + made_by: row["made_by"] ?? "agent", + superseded_by: null, + })); +} +export function insertRequirement(r: RequirementInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by) + VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`) + .run({ + ":id": r.id, + ":class": r.class, + ":status": r.status, + ":description": r.description, + ":why": r.why, + ":source": r.source, + ":primary_owner": r.primary_owner, + ":supporting_slices": r.supporting_slices, + ":validation": r.validation, + ":notes": r.notes, + ":full_content": r.full_content, + ":superseded_by": r.superseded_by, + }); +} +export function getRequirementById(id: string): DbRow | undefined { + if (!currentDb) return undefined; + const row = currentDb + .prepare("SELECT * FROM requirements WHERE id = ?") + .get(id); + if (!row) return undefined; + return { + id: row["id"], + class: row["class"], + status: row["status"], + description: row["description"], + why: row["why"], + source: row["source"], + primary_owner: row["primary_owner"], + supporting_slices: row["supporting_slices"], + validation: row["validation"], + notes: row["notes"], + full_content: row["full_content"], + superseded_by: row["superseded_by"] ?? null, + }; +} +export function getActiveRequirements(): DbRow[] { + if (!currentDb) return []; + const rows = currentDb.prepare("SELECT * FROM active_requirements").all(); + return rows.map((row) => ({ + id: row["id"], + class: row["class"], + status: row["status"], + description: row["description"], + why: row["why"], + source: row["source"], + primary_owner: row["primary_owner"], + supporting_slices: row["supporting_slices"], + validation: row["validation"], + notes: row["notes"], + full_content: row["full_content"], + superseded_by: null, + })); +} +export function getDbOwnerPid(): number { + return currentPid; +} +export function getDbPath(): string | null { + return currentPath; +} + +/** + * Load persisted session mode state from DB. + * + * Purpose: restore mode state across session restarts. + * + * Consumer: AutoSession initialization. + */ +export function loadSessionModeState(): Record | null { + if (!currentDb) return null; + try { + const row = currentDb + .prepare("SELECT * FROM session_mode_state WHERE id = 1") + .get(); + if (!row) return null; + return { + workMode: row["work_mode"] ?? "chat", + runControl: row["run_control"] ?? "manual", + permissionProfile: row["permission_profile"] ?? "restricted", + modelMode: row["model_mode"] ?? "smart", + surface: row["surface"] ?? "tui", + updatedAt: row["updated_at"] ?? null, + }; + } catch { + return null; + } +} + +/** + * Persist the current session mode into the project database. + * + * Purpose: keep work mode, run control, permission profile, and model mode + * stable across reload/resume without letting command handlers write SQL. + * + * Consumer: AutoSession.setMode() after validated mode transitions. + */ +export function saveSessionModeState(mode: Record): boolean { + if (!currentDb) return; + currentDb + .prepare(` + INSERT INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at) + VALUES (1, :workMode, :runControl, :permissionProfile, :modelMode, :surface, :updatedAt) + ON CONFLICT(id) DO UPDATE SET + work_mode = excluded.work_mode, + run_control = excluded.run_control, + permission_profile = excluded.permission_profile, + model_mode = excluded.model_mode, + surface = excluded.surface, + updated_at = excluded.updated_at + `) + .run({ + ":workMode": mode.workMode, + ":runControl": mode.runControl, + ":permissionProfile": mode.permissionProfile, + ":modelMode": mode.modelMode, + ":surface": mode.surface ?? "tui", + ":updatedAt": mode.updatedAt ?? new Date().toISOString(), + }); +} + +export function _getAdapter(): DbAdapter | null { + return currentDb; +} +export function _resetProvider(): void { + loadAttempted = false; +} +export function upsertDecision(d: DecisionInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + // Use ON CONFLICT DO UPDATE instead of INSERT OR REPLACE to preserve the + // seq column. INSERT OR REPLACE deletes then reinserts, resetting seq and + // corrupting decision ordering in DECISIONS.md after reconcile replay. + currentDb + .prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) + VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by) + ON CONFLICT(id) DO UPDATE SET + when_context = excluded.when_context, + scope = excluded.scope, + decision = excluded.decision, + choice = excluded.choice, + rationale = excluded.rationale, + revisable = excluded.revisable, + made_by = excluded.made_by, + superseded_by = excluded.superseded_by`) + .run({ + ":id": d.id, + ":when_context": d.when_context, + ":scope": d.scope, + ":decision": d.decision, + ":choice": d.choice, + ":rationale": d.rationale, + ":revisable": d.revisable, + ":made_by": d.made_by ?? "agent", + ":superseded_by": d.superseded_by ?? null, + }); +} +export function upsertRequirement(r: RequirementInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by) + VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`) + .run({ + ":id": r.id, + ":class": r.class, + ":status": r.status, + ":description": r.description, + ":why": r.why, + ":source": r.source, + ":primary_owner": r.primary_owner, + ":supporting_slices": r.supporting_slices, + ":validation": r.validation, + ":notes": r.notes, + ":full_content": r.full_content, + ":superseded_by": r.superseded_by ?? null, + }); +} +export function clearArtifacts(): void { + if (!currentDb) return; + try { + currentDb.exec("DELETE FROM artifacts"); + } catch (e) { + logWarning("db", `clearArtifacts failed: ${(e as Error)?.message}`); + } +} +export function insertArtifact(a: ArtifactInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO artifacts (path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at) + VALUES (:path, :artifact_type, :milestone_id, :slice_id, :task_id, :full_content, :imported_at)`) + .run({ + ":path": a.path, + ":artifact_type": a.artifact_type, + ":milestone_id": a.milestone_id, + ":slice_id": a.slice_id, + ":task_id": a.task_id, + ":full_content": a.full_content, + ":imported_at": new Date().toISOString(), + }); +} +export function insertMilestone(m: MilestoneInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO milestones ( + id, title, status, depends_on, created_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, sequence + ) VALUES ( + :id, :title, :status, :depends_on, :created_at, + :vision, :success_criteria, :key_risks, :proof_strategy, + :verification_contract, :verification_integration, :verification_operational, :verification_uat, + :definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, :sequence + )`) + .run({ + ":id": m.id, + ":title": m.title ?? "", + // Default to "queued" — never auto-create milestones as "active" (#3380). + // Callers that need "active" must pass it explicitly. + ":status": m.status ?? "queued", + ":depends_on": JSON.stringify(m.depends_on ?? []), + ":created_at": new Date().toISOString(), + ":vision": m.planning?.vision ?? "", + ":success_criteria": JSON.stringify(m.planning?.successCriteria ?? []), + ":key_risks": JSON.stringify(m.planning?.keyRisks ?? []), + ":proof_strategy": JSON.stringify(m.planning?.proofStrategy ?? []), + ":verification_contract": m.planning?.verificationContract ?? "", + ":verification_integration": m.planning?.verificationIntegration ?? "", + ":verification_operational": m.planning?.verificationOperational ?? "", + ":verification_uat": m.planning?.verificationUat ?? "", + ":definition_of_done": JSON.stringify(m.planning?.definitionOfDone ?? []), + ":requirement_coverage": m.planning?.requirementCoverage ?? "", + ":boundary_map_markdown": m.planning?.boundaryMapMarkdown ?? "", + ":vision_meeting_json": m.planning?.visionMeeting + ? JSON.stringify(m.planning.visionMeeting) + : "", + ":product_research_json": m.planning?.productResearch + ? JSON.stringify(m.planning.productResearch) + : "", + ":sequence": m.sequence ?? 0, + }); + if (hasPlanningPayload(m.planning as Record)) { + insertMilestoneSpecIfAbsent(m.id, (m.planning ?? {}) as Record); + } +} +function insertMilestoneSpecIfAbsent(milestoneId: string, planning: Record = {}): void { + if (!hasPlanningPayload(planning)) return; + const existing = (currentDb as DbAdapter) + .prepare("SELECT * FROM milestone_specs WHERE id = ?") + .get(milestoneId); + if (existing && !isEmptyMilestoneSpec(existing)) return; + const params = { + ":id": milestoneId, + ":vision": planning.vision ?? "", + ":success_criteria": JSON.stringify(planning.successCriteria ?? []), + ":key_risks": JSON.stringify(planning.keyRisks ?? []), + ":proof_strategy": JSON.stringify(planning.proofStrategy ?? []), + ":verification_contract": planning.verificationContract ?? "", + ":verification_integration": planning.verificationIntegration ?? "", + ":verification_operational": planning.verificationOperational ?? "", + ":verification_uat": planning.verificationUat ?? "", + ":definition_of_done": JSON.stringify(planning.definitionOfDone ?? []), + ":requirement_coverage": planning.requirementCoverage ?? "", + ":boundary_map_markdown": planning.boundaryMapMarkdown ?? "", + ":vision_meeting_json": planning.visionMeeting + ? JSON.stringify(planning.visionMeeting) + : "", + ":product_research_json": planning.productResearch + ? JSON.stringify(planning.productResearch) + : "", + ":created_at": new Date().toISOString(), + }; + if (existing) { + currentDb + .prepare(`UPDATE milestone_specs SET + vision = :vision, + success_criteria = :success_criteria, + key_risks = :key_risks, + proof_strategy = :proof_strategy, + verification_contract = :verification_contract, + verification_integration = :verification_integration, + verification_operational = :verification_operational, + verification_uat = :verification_uat, + definition_of_done = :definition_of_done, + requirement_coverage = :requirement_coverage, + boundary_map_markdown = :boundary_map_markdown, + vision_meeting_json = :vision_meeting_json, + product_research_json = :product_research_json + WHERE id = :id`) + .run(params); + return; + } + currentDb + .prepare(`INSERT OR IGNORE INTO milestone_specs ( + id, vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, + spec_version, created_at + ) VALUES ( + :id, :vision, :success_criteria, :key_risks, :proof_strategy, + :verification_contract, :verification_integration, :verification_operational, :verification_uat, + :definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, + 1, :created_at + )`) + .run(params); +} +export function upsertMilestonePlanning(milestoneId: string, planning: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + insertMilestoneSpecIfAbsent(milestoneId, planning); + currentDb + .prepare(`UPDATE milestones SET + title = COALESCE(NULLIF(:title, ''), title), + status = COALESCE(NULLIF(:status, ''), status), + vision = COALESCE(:vision, vision), + success_criteria = COALESCE(:success_criteria, success_criteria), + key_risks = COALESCE(:key_risks, key_risks), + proof_strategy = COALESCE(:proof_strategy, proof_strategy), + verification_contract = COALESCE(:verification_contract, verification_contract), + verification_integration = COALESCE(:verification_integration, verification_integration), + verification_operational = COALESCE(:verification_operational, verification_operational), + verification_uat = COALESCE(:verification_uat, verification_uat), + definition_of_done = COALESCE(:definition_of_done, definition_of_done), + requirement_coverage = COALESCE(:requirement_coverage, requirement_coverage), + boundary_map_markdown = COALESCE(:boundary_map_markdown, boundary_map_markdown), + vision_meeting_json = COALESCE(:vision_meeting_json, vision_meeting_json), + product_research_json = COALESCE(:product_research_json, product_research_json) + WHERE id = :id`) + .run({ + ":id": milestoneId, + ":title": planning.title ?? "", + ":status": planning.status ?? "", + ":vision": planning.vision ?? null, + ":success_criteria": planning.successCriteria + ? JSON.stringify(planning.successCriteria) + : null, + ":key_risks": planning.keyRisks + ? JSON.stringify(planning.keyRisks) + : null, + ":proof_strategy": planning.proofStrategy + ? JSON.stringify(planning.proofStrategy) + : null, + ":verification_contract": planning.verificationContract ?? null, + ":verification_integration": planning.verificationIntegration ?? null, + ":verification_operational": planning.verificationOperational ?? null, + ":verification_uat": planning.verificationUat ?? null, + ":definition_of_done": planning.definitionOfDone + ? JSON.stringify(planning.definitionOfDone) + : null, + ":requirement_coverage": planning.requirementCoverage ?? null, + ":boundary_map_markdown": planning.boundaryMapMarkdown ?? null, + ":vision_meeting_json": planning.visionMeeting + ? JSON.stringify(planning.visionMeeting) + : null, + ":product_research_json": planning.productResearch + ? JSON.stringify(planning.productResearch) + : null, + }); +} +export function insertSlice(s: SliceInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO slices ( + milestone_id, id, title, status, risk, depends, demo, created_at, + goal, success_criteria, proof_level, integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, sequence, + is_sketch, sketch_scope + ) VALUES ( + :milestone_id, :id, :title, :status, :risk, :depends, :demo, :created_at, + :goal, :success_criteria, :proof_level, :integration_closure, :observability_impact, + :adversarial_partner, :adversarial_combatant, :adversarial_architect, :planning_meeting_json, :sequence, + :is_sketch, :sketch_scope + ) + ON CONFLICT (milestone_id, id) DO UPDATE SET + title = CASE WHEN :raw_title IS NOT NULL THEN excluded.title ELSE slices.title END, + status = CASE WHEN slices.status IN ('complete', 'done') THEN slices.status ELSE excluded.status END, + risk = CASE WHEN :raw_risk IS NOT NULL THEN excluded.risk ELSE slices.risk END, + depends = excluded.depends, + demo = CASE WHEN :raw_demo IS NOT NULL THEN excluded.demo ELSE slices.demo END, + goal = CASE WHEN :raw_goal IS NOT NULL THEN excluded.goal ELSE slices.goal END, + success_criteria = CASE WHEN :raw_success_criteria IS NOT NULL THEN excluded.success_criteria ELSE slices.success_criteria END, + proof_level = CASE WHEN :raw_proof_level IS NOT NULL THEN excluded.proof_level ELSE slices.proof_level END, + integration_closure = CASE WHEN :raw_integration_closure IS NOT NULL THEN excluded.integration_closure ELSE slices.integration_closure END, + observability_impact = CASE WHEN :raw_observability_impact IS NOT NULL THEN excluded.observability_impact ELSE slices.observability_impact END, + adversarial_partner = CASE WHEN :raw_adversarial_partner IS NOT NULL THEN excluded.adversarial_partner ELSE slices.adversarial_partner END, + adversarial_combatant = CASE WHEN :raw_adversarial_combatant IS NOT NULL THEN excluded.adversarial_combatant ELSE slices.adversarial_combatant END, + adversarial_architect = CASE WHEN :raw_adversarial_architect IS NOT NULL THEN excluded.adversarial_architect ELSE slices.adversarial_architect END, + planning_meeting_json = CASE WHEN :raw_planning_meeting_json IS NOT NULL THEN excluded.planning_meeting_json ELSE slices.planning_meeting_json END, + sequence = CASE WHEN :raw_sequence IS NOT NULL THEN excluded.sequence ELSE slices.sequence END, + is_sketch = CASE WHEN :raw_is_sketch IS NOT NULL THEN excluded.is_sketch ELSE slices.is_sketch END, + sketch_scope = CASE WHEN :raw_sketch_scope IS NOT NULL THEN excluded.sketch_scope ELSE slices.sketch_scope END`) + .run({ + ":milestone_id": s.milestoneId, + ":id": s.id, + ":title": s.title ?? "", + ":status": s.status ?? "pending", + ":risk": s.risk ?? "medium", + ":depends": JSON.stringify(s.depends ?? []), + ":demo": s.demo ?? "", + ":created_at": new Date().toISOString(), + ":goal": s.planning?.goal ?? "", + ":success_criteria": s.planning?.successCriteria ?? "", + ":proof_level": s.planning?.proofLevel ?? "", + ":integration_closure": s.planning?.integrationClosure ?? "", + ":observability_impact": s.planning?.observabilityImpact ?? "", + ":adversarial_partner": s.planning?.adversarialReview?.partner ?? "", + ":adversarial_combatant": s.planning?.adversarialReview?.combatant ?? "", + ":adversarial_architect": s.planning?.adversarialReview?.architect ?? "", + ":planning_meeting_json": s.planning?.planningMeeting + ? JSON.stringify(s.planning.planningMeeting) + : "", + ":sequence": s.sequence ?? 0, + ":is_sketch": s.isSketch === true ? 1 : 0, + ":sketch_scope": s.sketchScope ?? "", + // Raw sentinel params: NULL when caller omitted the field, used in ON CONFLICT guards + ":raw_title": s.title ?? null, + ":raw_risk": s.risk ?? null, + ":raw_demo": s.demo ?? null, + ":raw_goal": s.planning?.goal ?? null, + ":raw_success_criteria": s.planning?.successCriteria ?? null, + ":raw_proof_level": s.planning?.proofLevel ?? null, + ":raw_integration_closure": s.planning?.integrationClosure ?? null, + ":raw_observability_impact": s.planning?.observabilityImpact ?? null, + ":raw_adversarial_partner": + s.planning?.adversarialReview?.partner ?? null, + ":raw_adversarial_combatant": + s.planning?.adversarialReview?.combatant ?? null, + ":raw_adversarial_architect": + s.planning?.adversarialReview?.architect ?? null, + ":raw_planning_meeting_json": s.planning?.planningMeeting + ? JSON.stringify(s.planning.planningMeeting) + : null, + ":raw_sequence": s.sequence ?? null, + ":raw_is_sketch": s.isSketch === undefined ? null : s.isSketch ? 1 : 0, + ":raw_sketch_scope": s.sketchScope === undefined ? null : s.sketchScope, + }); + insertSliceSpecIfAbsent(s.milestoneId, s.id, s.planning ?? {}); +} +function insertSliceSpecIfAbsent(milestoneId: string, sliceId: string, planning: Record = {}): void { + currentDb + .prepare(`INSERT OR IGNORE INTO slice_specs ( + milestone_id, slice_id, goal, success_criteria, proof_level, + integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, + planning_meeting_json, spec_version, created_at + ) VALUES ( + :milestone_id, :slice_id, :goal, :success_criteria, :proof_level, + :integration_closure, :observability_impact, + :adversarial_partner, :adversarial_combatant, :adversarial_architect, + :planning_meeting_json, 1, :created_at + )`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":goal": planning.goal ?? "", + ":success_criteria": planning.successCriteria ?? "", + ":proof_level": planning.proofLevel ?? "", + ":integration_closure": planning.integrationClosure ?? "", + ":observability_impact": planning.observabilityImpact ?? "", + ":adversarial_partner": planning.adversarialReview?.partner ?? "", + ":adversarial_combatant": planning.adversarialReview?.combatant ?? "", + ":adversarial_architect": planning.adversarialReview?.architect ?? "", + ":planning_meeting_json": planning.planningMeeting + ? JSON.stringify(planning.planningMeeting) + : "", + ":created_at": new Date().toISOString(), + }); +} +/** + * SF ADR-011: clear the is_sketch flag after refine-slice fills in the full plan. + * Idempotent — safe to call on already-refined slices. + */ +export function clearSliceSketch(milestoneId: string, sliceId: string): void { + setSliceSketchFlag(milestoneId, sliceId, false); +} +/** + * SF ADR-011: generalized sketch-flag setter — flip true or false. + * Idempotent. Use this instead of clearSliceSketch when you also need to + * mark a slice as a sketch (e.g., a re-plan flow that wants to revert to + * sketch-then-refine). + */ +export function setSliceSketchFlag(milestoneId: string, sliceId: string, isSketch: boolean): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE slices SET is_sketch = :is_sketch WHERE milestone_id = :mid AND id = :sid`, + ) + .run({ + ":is_sketch": isSketch ? 1 : 0, + ":mid": milestoneId, + ":sid": sliceId, + }); +} +/** + * SF ADR-011 auto-heal: reconcile stale is_sketch=1 rows whose PLAN file already + * exists on disk. The caller passes a predicate that uses the canonical path + * resolver so path logic stays in one place. Safe to call repeatedly — only + * flips rows that meet the predicate. + */ +export function autoHealSketchFlags(milestoneId: string, hasPlanFile: (sliceId: string) => boolean): void { + if (!currentDb) return; + const rows = currentDb + .prepare( + `SELECT id FROM slices WHERE milestone_id = :mid AND is_sketch = 1`, + ) + .all({ ":mid": milestoneId }); + for (const row of rows) { + if (hasPlanFile(row['id'] as string)) { + setSliceSketchFlag(milestoneId, row['id'] as string, false); + } + } +} +/** + * SF ADR-011 P2: list tasks across a milestone that have an + * escalation artifact path. By default returns only ACTIVE escalations + * (pending OR awaiting_review); pass includeResolved=true to also return + * resolved-but-still-recorded entries (audit trail). + * + * Used by `/escalate list` to enumerate cross-slice escalations. + */ +export function listEscalationArtifacts(milestoneId: string, includeResolved = false): DbRow[] { + if (!currentDb) return []; + const filter = includeResolved + ? "escalation_artifact_path IS NOT NULL" + : "(escalation_pending = 1 OR escalation_awaiting_review = 1) AND escalation_artifact_path IS NOT NULL"; + const rows = currentDb + .prepare( + `SELECT * FROM tasks WHERE milestone_id = :mid AND ${filter} ORDER BY slice_id, sequence, id`, + ) + .all({ ":mid": milestoneId }); + return rows.map(rowToTask); +} +export function upsertSlicePlanning(milestoneId: string, sliceId: string, planning: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + insertSliceSpecIfAbsent(milestoneId, sliceId, planning); + currentDb + .prepare(`UPDATE slices SET + goal = COALESCE(:goal, goal), + success_criteria = COALESCE(:success_criteria, success_criteria), + proof_level = COALESCE(:proof_level, proof_level), + integration_closure = COALESCE(:integration_closure, integration_closure), + observability_impact = COALESCE(:observability_impact, observability_impact), + adversarial_partner = COALESCE(:adversarial_partner, adversarial_partner), + adversarial_combatant = COALESCE(:adversarial_combatant, adversarial_combatant), + adversarial_architect = COALESCE(:adversarial_architect, adversarial_architect), + planning_meeting_json = COALESCE(:planning_meeting_json, planning_meeting_json) + WHERE milestone_id = :milestone_id AND id = :id`) + .run({ + ":milestone_id": milestoneId, + ":id": sliceId, + ":goal": planning.goal ?? null, + ":success_criteria": planning.successCriteria ?? null, + ":proof_level": planning.proofLevel ?? null, + ":integration_closure": planning.integrationClosure ?? null, + ":observability_impact": planning.observabilityImpact ?? null, + ":adversarial_partner": planning.adversarialReview?.partner ?? null, + ":adversarial_combatant": planning.adversarialReview?.combatant ?? null, + ":adversarial_architect": planning.adversarialReview?.architect ?? null, + ":planning_meeting_json": planning.planningMeeting + ? JSON.stringify(planning.planningMeeting) + : null, + }); +} +export function insertTask(t: TaskInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO tasks ( + milestone_id, slice_id, id, title, status, task_status, one_liner, narrative, + verification_result, verification_status, duration, completed_at, blocker_discovered, + deviations, known_issues, key_files, key_decisions, full_summary_md, + description, estimate, files, verify, inputs, expected_output, observability_impact, sequence + ) VALUES ( + :milestone_id, :slice_id, :id, :title, :status, :task_status, :one_liner, :narrative, + :verification_result, :verification_status, :duration, :completed_at, :blocker_discovered, + :deviations, :known_issues, :key_files, :key_decisions, :full_summary_md, + :description, :estimate, :files, :verify, :inputs, :expected_output, :observability_impact, :sequence + ) + ON CONFLICT(milestone_id, slice_id, id) DO UPDATE SET + title = CASE WHEN NULLIF(:title, '') IS NOT NULL THEN :title ELSE tasks.title END, + status = :status, + task_status = :task_status, + one_liner = :one_liner, + narrative = :narrative, + verification_result = :verification_result, + verification_status = :verification_status, + duration = :duration, + completed_at = :completed_at, + blocker_discovered = :blocker_discovered, + deviations = :deviations, + known_issues = :known_issues, + key_files = :key_files, + key_decisions = :key_decisions, + full_summary_md = :full_summary_md, + description = CASE WHEN NULLIF(:description, '') IS NOT NULL THEN :description ELSE tasks.description END, + estimate = CASE WHEN NULLIF(:estimate, '') IS NOT NULL THEN :estimate ELSE tasks.estimate END, + files = CASE WHEN NULLIF(:files, '[]') IS NOT NULL THEN :files ELSE tasks.files END, + verify = CASE WHEN NULLIF(:verify, '') IS NOT NULL THEN :verify ELSE tasks.verify END, + inputs = CASE WHEN NULLIF(:inputs, '[]') IS NOT NULL THEN :inputs ELSE tasks.inputs END, + expected_output = CASE WHEN NULLIF(:expected_output, '[]') IS NOT NULL THEN :expected_output ELSE tasks.expected_output END, + observability_impact = CASE WHEN NULLIF(:observability_impact, '') IS NOT NULL THEN :observability_impact ELSE tasks.observability_impact END, + sequence = :sequence`) + .run({ + ":milestone_id": t.milestoneId, + ":slice_id": t.sliceId, + ":id": t.id, + ":title": t.title ?? "", + ":status": t.status ?? "pending", + ":task_status": normalizeTaskStatus(t.taskStatus ?? t.status) ?? "todo", + ":one_liner": t.oneLiner ?? "", + ":narrative": t.narrative ?? "", + ":verification_result": t.verificationResult ?? "", + ":verification_status": t.verificationStatus ?? "", + ":duration": t.duration ?? "", + ":completed_at": + t.status === "done" || t.status === "complete" + ? new Date().toISOString() + : null, + ":blocker_discovered": t.blockerDiscovered ? 1 : 0, + ":deviations": t.deviations ?? "", + ":known_issues": t.knownIssues ?? "", + ":key_files": JSON.stringify(t.keyFiles ?? []), + ":key_decisions": JSON.stringify(t.keyDecisions ?? []), + ":full_summary_md": t.fullSummaryMd ?? "", + ":description": t.planning?.description ?? "", + ":estimate": t.planning?.estimate ?? "", + ":files": JSON.stringify(t.planning?.files ?? []), + ":verify": t.planning?.verify ?? "", + ":inputs": JSON.stringify(t.planning?.inputs ?? []), + ":expected_output": JSON.stringify(t.planning?.expectedOutput ?? []), + ":observability_impact": t.planning?.observabilityImpact ?? "", + ":sequence": t.sequence ?? 0, + }); + if (hasTaskSpecIntent(t.planning)) { + insertTaskSpecIfAbsent(t.milestoneId, t.sliceId, t.id, t.planning ?? {}); + } + insertTaskSchedulerIfAbsent(t.milestoneId, t.sliceId, t.id); +} +function hasTaskSpecIntent(planning: Record = {}): boolean { + if (!planning || typeof planning !== "object") return false; + if (typeof planning.verify === "string" && planning.verify.trim()) + return true; + if (Array.isArray(planning.inputs) && planning.inputs.length > 0) return true; + if ( + Array.isArray(planning.expectedOutput) && + planning.expectedOutput.length > 0 + ) { + return true; + } + for (const key of [ + "risk", + "mutationScope", + "mutation_scope", + "verification", + "verificationType", + "verification_type", + "planApproval", + "plan_approval", + "estimatedEffort", + "estimated_effort", + "dependencies", + "blocksParallel", + "blocks_parallel", + "requiresUserInput", + "requires_user_input", + "autoRetry", + "auto_retry", + "maxRetries", + "max_retries", + ]) { + if (planning[key] !== undefined) return true; + } + return false; +} +function insertTaskSpecIfAbsent(milestoneId: string, sliceId: string, taskId: string, planning: Record = {}): void { + if (!hasTaskSpecIntent(planning)) return; + const { normalized: frontmatter, errors } = taskFrontmatterFromRecord(planning); + if (errors?.length) logWarning("sf-db:insertTaskSpec", `frontmatter validation errors for ${milestoneId}/${sliceId}/${taskId}: ${errors.join(", ")}`); + currentDb + .prepare(`INSERT OR IGNORE INTO task_specs ( + milestone_id, slice_id, task_id, verify, inputs, expected_output, + risk, mutation_scope, verification_type, plan_approval, estimated_effort, + dependencies, blocks_parallel, requires_user_input, auto_retry, max_retries, + spec_version, created_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :verify, :inputs, :expected_output, + :risk, :mutation_scope, :verification_type, :plan_approval, :estimated_effort, + :dependencies, :blocks_parallel, :requires_user_input, :auto_retry, :max_retries, + 1, :created_at + )`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":task_id": taskId, + ":verify": planning.verify ?? "", + ":inputs": JSON.stringify(planning.inputs ?? []), + ":expected_output": JSON.stringify(planning.expectedOutput ?? []), + ":risk": frontmatter.risk, + ":mutation_scope": frontmatter.mutationScope, + ":verification_type": frontmatter.verification, + ":plan_approval": frontmatter.planApproval, + ":estimated_effort": frontmatter.estimatedEffort, + ":dependencies": JSON.stringify(frontmatter.dependencies), + ":blocks_parallel": frontmatter.blocksParallel ? 1 : 0, + ":requires_user_input": frontmatter.requiresUserInput ? 1 : 0, + ":auto_retry": frontmatter.autoRetry ? 1 : 0, + ":max_retries": frontmatter.maxRetries, + ":created_at": new Date().toISOString(), + }); +} +function insertTaskSchedulerIfAbsent(milestoneId: string, sliceId: string, taskId: string): void { + upsertTaskSchedulerStatus(milestoneId, sliceId, taskId, "queued", { + onlyIfAbsent: true, + }); +} +/** + * Upsert a task scheduler row without changing the task lifecycle row. + * + * Purpose: keep due/claimed/dispatched/consumed scheduling separate from + * task_status so automation level and timing do not overwrite work progress. + * + * Consumer: task scheduling/dispatch surfaces and task planning row creation. + */ +export function upsertTaskSchedulerStatus( + milestoneId: string, + sliceId: string, + taskId: string, + status = "queued", + { onlyIfAbsent = false }: { onlyIfAbsent?: boolean } = {}, +): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const schedulerStatus = normalizeSchedulerStatus(status) ?? "queued"; + const sql = onlyIfAbsent + ? `INSERT OR IGNORE INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :status, :updated_at + )` + : `INSERT INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :status, :updated_at + ) + ON CONFLICT(milestone_id, slice_id, task_id) DO UPDATE SET + status = excluded.status, + updated_at = excluded.updated_at`; + currentDb.prepare(sql).run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":task_id": taskId, + ":status": schedulerStatus, + ":updated_at": new Date().toISOString(), + }); +} +export function updateTaskStatus( + milestoneId: string, + sliceId: string, + taskId: string, + status: string, + completedAt: string | null = null, +): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const taskStatus = normalizeTaskStatus(status) ?? "todo"; + currentDb + .prepare(`UPDATE tasks SET + status = :status, + completed_at = :completed_at, + task_status = :task_status + WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`) + .run({ + ":status": status, + ":completed_at": completedAt ?? null, + ":task_status": taskStatus, + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":id": taskId, + }); +} +/** SF ADR-011 P2: set pause-on-escalation state on a task. The two flags are + * mutually exclusive — pending=1 forces awaiting_review=0. */ +export function setTaskEscalationPending( + milestoneId, + sliceId, + taskId, + artifactPath, +) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE tasks + SET escalation_pending = 1, + escalation_awaiting_review = 0, + escalation_artifact_path = :path + WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) + .run({ + ":path": artifactPath, + ":mid": milestoneId, + ":sid": sliceId, + ":tid": taskId, + }); +} +/** SF ADR-011 P2: continueWithDefault=true marker — artifact exists but no pause. + * Mutually exclusive with escalation_pending. */ +export function setTaskEscalationAwaitingReview( + milestoneId, + sliceId, + taskId, + artifactPath, +) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE tasks + SET escalation_awaiting_review = 1, + escalation_pending = 0, + escalation_artifact_path = :path + WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) + .run({ + ":path": artifactPath, + ":mid": milestoneId, + ":sid": sliceId, + ":tid": taskId, + }); +} +/** SF ADR-011 P2: clear both escalation flags (called when an escalation is + * resolved or its artifact is removed). Leaves escalation_artifact_path so + * the resolution audit trail survives. */ +export function clearTaskEscalationFlags(milestoneId: string, sliceId: string, taskId: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE tasks + SET escalation_pending = 0, + escalation_awaiting_review = 0 + WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); +} +/** SF ADR-011 P2 carry-forward: find a task in this slice that has a resolved + * escalation override that has NOT yet been injected into a downstream + * prompt. Returns the first match by sequence (lowest first), or null when + * no carry-forward is pending. + * + * The match criterion: escalation_artifact_path IS NOT NULL AND + * escalation_pending=0 AND escalation_awaiting_review=0 AND + * escalation_override_applied=0. The artifact's respondedAt is checked by + * the caller (claimOverrideForInjection in escalation.ts) — keeping artifact + * schema knowledge out of the DB layer. */ +export function findUnappliedEscalationOverride(milestoneId: string, sliceId: string): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare(`SELECT id, escalation_artifact_path + FROM tasks + WHERE milestone_id = :mid + AND slice_id = :sid + AND escalation_artifact_path IS NOT NULL + AND escalation_pending = 0 + AND escalation_awaiting_review = 0 + AND escalation_override_applied = 0 + ORDER BY sequence ASC, id ASC + LIMIT 1`) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row || !row.escalation_artifact_path) return null; + return { taskId: row.id, artifactPath: row.escalation_artifact_path }; +} +/** SF ADR-011 P2 carry-forward: atomically claim the override for injection. + * Returns true when this caller successfully flipped 0→1 (race winner) or + * false when another caller claimed it first (race loser). Use this to + * guarantee the override is injected exactly once. */ +export function claimEscalationOverride(milestoneId: string, sliceId: string, taskId: string): void { + if (!currentDb) return; + const result = currentDb + .prepare(`UPDATE tasks + SET escalation_override_applied = 1 + WHERE milestone_id = :mid + AND slice_id = :sid + AND id = :tid + AND escalation_override_applied = 0`) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + return (result?.changes ?? 0) > 0; +} +export function setTaskBlockerDiscovered( + milestoneId, + sliceId, + taskId, + discovered, +) { + if (!currentDb) return; + currentDb + .prepare( + `UPDATE tasks SET blocker_discovered = :discovered WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, + ) + .run({ + ":discovered": discovered ? 1 : 0, + ":mid": milestoneId, + ":sid": sliceId, + ":tid": taskId, + }); +} +export function upsertTaskPlanning(milestoneId: string, sliceId: string, taskId: string, planning: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning); + const { normalized: frontmatter, errors: fmErrors } = taskFrontmatterFromRecord(planning); + if (fmErrors?.length) logWarning("sf-db:upsertTaskPlanning", `frontmatter validation errors for ${milestoneId}/${sliceId}/${taskId}: ${fmErrors.join(", ")}`); + const hasTaskStatus = + planning.taskStatus !== undefined || + planning.task_status !== undefined || + planning.status !== undefined; + currentDb + .prepare(`UPDATE tasks SET + title = COALESCE(:title, title), + description = COALESCE(:description, description), + estimate = COALESCE(:estimate, estimate), + files = COALESCE(:files, files), + verify = COALESCE(:verify, verify), + inputs = COALESCE(:inputs, inputs), + expected_output = COALESCE(:expected_output, expected_output), + observability_impact = COALESCE(:observability_impact, observability_impact), + full_plan_md = COALESCE(:full_plan_md, full_plan_md), + risk = :risk, + mutation_scope = :mutation_scope, + verification_type = :verification_type, + plan_approval = :plan_approval, + task_status = CASE WHEN :has_task_status = 1 THEN :task_status ELSE task_status END, + estimated_effort = :estimated_effort, + dependencies = :dependencies, + blocks_parallel = :blocks_parallel, + requires_user_input = :requires_user_input, + auto_retry = :auto_retry, + max_retries = :max_retries + WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":id": taskId, + ":title": planning.title ?? null, + ":description": planning.description ?? null, + ":estimate": planning.estimate ?? null, + ":files": planning.files ? JSON.stringify(planning.files) : null, + ":verify": planning.verify ?? null, + ":inputs": planning.inputs ? JSON.stringify(planning.inputs) : null, + ":expected_output": planning.expectedOutput + ? JSON.stringify(planning.expectedOutput) + : null, + ":observability_impact": planning.observabilityImpact ?? null, + ":full_plan_md": planning.fullPlanMd ?? null, + ":risk": frontmatter.risk, + ":mutation_scope": frontmatter.mutationScope, + ":verification_type": frontmatter.verification, + ":plan_approval": frontmatter.planApproval, + ":task_status": frontmatter.taskStatus, + ":has_task_status": hasTaskStatus ? 1 : 0, + ":estimated_effort": frontmatter.estimatedEffort, + ":dependencies": JSON.stringify(frontmatter.dependencies), + ":blocks_parallel": frontmatter.blocksParallel ? 1 : 0, + ":requires_user_input": frontmatter.requiresUserInput ? 1 : 0, + ":auto_retry": frontmatter.autoRetry ? 1 : 0, + ":max_retries": frontmatter.maxRetries, + }); + if ( + planning.schedulerStatus !== undefined || + planning.scheduler_status !== undefined + ) { + upsertTaskSchedulerStatus( + milestoneId, + sliceId, + taskId, + frontmatter.schedulerStatus, + ); + } else { + insertTaskSchedulerIfAbsent(milestoneId, sliceId, taskId); + } +} +function parsePlanningMeeting(raw: unknown): Record { + if (typeof raw !== "string" || raw.trim() === "") return null; + try { + return JSON.parse(raw); + } catch { + return null; + } +} +function rowToSlice(row: Record): Record { + return { + milestone_id: row["milestone_id"], + id: row["id"], + title: row["title"], + status: row["status"], + risk: row["risk"], + depends: safeParseJsonArray(row["depends"]), + demo: row["demo"] ?? "", + created_at: row["created_at"], + completed_at: row["completed_at"] ?? null, + full_summary_md: row["full_summary_md"] ?? "", + full_uat_md: row["full_uat_md"] ?? "", + goal: row["goal"] ?? "", + success_criteria: row["success_criteria"] ?? "", + proof_level: row["proof_level"] ?? "", + integration_closure: row["integration_closure"] ?? "", + observability_impact: row["observability_impact"] ?? "", + adversarial_partner: row["adversarial_partner"] ?? "", + adversarial_combatant: row["adversarial_combatant"] ?? "", + adversarial_architect: row["adversarial_architect"] ?? "", + planning_meeting: parsePlanningMeeting(row["planning_meeting_json"]), + sequence: row["sequence"] ?? 0, + replan_triggered_at: row["replan_triggered_at"] ?? null, + sketch_scope: row["sketch_scope"] ?? "", + is_sketch: row["is_sketch"] ?? 0, + }; +} +export function getSlice(milestoneId: string, sliceId: string): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM slices WHERE milestone_id = :mid AND id = :sid") + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return null; + return rowToSlice(row); +} +export function updateSliceStatus(milestoneId: string, sliceId: string, status: string, completedAt: string | null): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE slices SET status = :status, completed_at = :completed_at + WHERE milestone_id = :milestone_id AND id = :id`) + .run({ + ":status": status, + ":completed_at": completedAt ?? null, + ":milestone_id": milestoneId, + ":id": sliceId, + }); +} +/** + * Store the UAT verdict for a slice. Called when an ASSESSMENT or UAT_RESULT + * file is written so the DB is the canonical source for verdict checks. + */ +export function setSliceUatVerdict(milestoneId: string, sliceId: string, verdict: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE slices SET uat_verdict = :verdict WHERE milestone_id = :mid AND id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":verdict": verdict }); +} +/** + * Returns the stored UAT verdict for a slice, or null if not yet recorded. + */ +export function getSliceUatVerdict(milestoneId: string, sliceId: string): string | null { + if (!currentDb) return null; + const row = currentDb + .prepare( + `SELECT uat_verdict FROM slices WHERE milestone_id = :mid AND id = :sid`, + ) + .get({ ":mid": milestoneId, ":sid": sliceId }); + return row?.uat_verdict ?? null; +} +/** + * Scan existing ASSESSMENT/UAT_RESULT files on disk and populate uat_verdict + * for slices that have no verdict recorded in the DB yet. + * + * Purpose: one-time migration path so that repos with pre-existing verdict + * files work without file fallbacks in checkNeedsRunUat — the DB becomes the + * sole source of truth immediately after open. + * + * Consumer: ensureDbOpen (dynamic-tools.js) after openDatabase succeeds. + */ +export function backfillUatVerdicts(basePath: string): void { + if (!currentDb) return; + // Find all slices that have no verdict yet + const rows = currentDb + .prepare(`SELECT milestone_id, id FROM slices WHERE uat_verdict IS NULL`) + .all(); + if (!rows.length) return; + // Extract verdict from content — inline to avoid cross-module import at db layer + function parseVerdictFromContent(content) { + const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); + if (fmMatch) { + const m = fmMatch[1].match(/verdict:\s*([\w-]+)/i); + if (m) { + let v = m[1].toLowerCase(); + if (v === "passed") v = "pass"; + return v; + } + return null; + } + const bodyMatch = content.match( + /\*\*Verdict:?\*\*\s*(?:✅\s*)?(\w[\w-]*)/i, + ); + if (bodyMatch) { + let v = bodyMatch[1].toLowerCase(); + if (v === "passed") v = "pass"; + return v; + } + return null; + } + const stmt = currentDb.prepare( + `UPDATE slices SET uat_verdict = :verdict WHERE milestone_id = :mid AND id = :sid`, + ); + for (const row of rows) { + const mid = row["milestone_id"]; + const sid = row["id"]; + const sliceDir = join(basePath, ".sf", "milestones", mid, "slices", sid); + const candidates = [ + join(sliceDir, `${sid}-ASSESSMENT.md`), + join(sliceDir, `${sid}-UAT_RESULT.md`), + ]; + for (const candidatePath of candidates) { + if (!existsSync(candidatePath)) continue; + try { + const content = readFileSync(candidatePath, "utf8"); + const verdict = parseVerdictFromContent(content); + if (verdict) { + stmt.run({ ":mid": mid, ":sid": sid, ":verdict": verdict }); + break; + } + } catch { + // Skip unreadable files + } + } + } +} +export function setTaskSummaryMd(milestoneId: string, sliceId: string, taskId: string, md: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE tasks SET full_summary_md = :md WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId, ":md": md }); +} +export function setSliceSummaryMd(milestoneId: string, sliceId: string, summaryMd: string, uatMd: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE slices SET full_summary_md = :summary_md, full_uat_md = :uat_md WHERE milestone_id = :mid AND id = :sid`, + ) + .run({ + ":mid": milestoneId, + ":sid": sliceId, + ":summary_md": summaryMd, + ":uat_md": uatMd, + }); +} +function safeParseJsonArray(raw, fallback = []) { + if (typeof raw !== "string" || raw.trim() === "") return fallback; + try { + const parsed = JSON.parse(raw); + return Array.isArray(parsed) ? parsed : fallback; + } catch { + return fallback; + } +} +function parseTaskArrayColumn(raw) { + if (typeof raw !== "string" || raw.trim() === "") return []; + try { + const parsed = JSON.parse(raw); + if (Array.isArray(parsed)) return parsed.map((value) => String(value)); + if (parsed === null || parsed === undefined || parsed === "") return []; + return [String(parsed)]; + } catch { + // Older/corrupt rows may contain comma-separated strings instead of JSON. + return raw + .split(",") + .map((value) => value.trim()) + .filter(Boolean); + } +} +function rowToTask(row) { + const parseTaskArray = (value) => { + if (Array.isArray(value)) { + return value.filter((entry) => typeof entry === "string"); + } + if (typeof value !== "string") return []; + const trimmed = value.trim(); + if (!trimmed) return []; + try { + const parsed = JSON.parse(trimmed); + if (Array.isArray(parsed)) { + return parsed.filter((entry) => typeof entry === "string"); + } + if (typeof parsed === "string" && parsed.trim()) { + return [parsed.trim()]; + } + } catch { + // Older/corrupt DB rows may contain raw comma-separated paths instead of JSON arrays. + } + return trimmed + .split(",") + .map((entry) => entry.trim()) + .filter(Boolean); + }; + return withTaskFrontmatter({ + milestone_id: row["milestone_id"], + slice_id: row["slice_id"], + id: row["id"], + title: row["title"], + status: row["status"], + one_liner: row["one_liner"], + narrative: row["narrative"], + verification_result: row["verification_result"], + duration: row["duration"], + completed_at: row["completed_at"] ?? null, + blocker_discovered: row["blocker_discovered"] === 1, + deviations: row["deviations"], + known_issues: row["known_issues"], + key_files: parseTaskArrayColumn(row["key_files"]), + key_decisions: parseTaskArrayColumn(row["key_decisions"]), + full_summary_md: row["full_summary_md"], + description: row["description"] ?? "", + estimate: row["estimate"] ?? "", + files: parseTaskArray(row["files"]), + verify: row["verify"] ?? "", + inputs: parseTaskArray(row["inputs"]), + expected_output: parseTaskArray(row["expected_output"]), + observability_impact: row["observability_impact"] ?? "", + full_plan_md: row["full_plan_md"] ?? "", + sequence: row["sequence"] ?? 0, + verification_status: row["verification_status"] ?? "", + risk: row["risk"] ?? "low", + mutation_scope: row["mutation_scope"] ?? "isolated", + verification_type: row["verification_type"] ?? "self-check", + plan_approval: row["plan_approval"] ?? "not-required", + task_status: row["task_status"] ?? row["status"] ?? "todo", + scheduler_status: row["scheduler_status"] ?? "queued", + estimated_effort: row["estimated_effort"] ?? null, + dependencies: parseTaskArray(row["dependencies"]), + blocks_parallel: row["blocks_parallel"] ?? 0, + requires_user_input: row["requires_user_input"] ?? 0, + auto_retry: row["auto_retry"] ?? 1, + max_retries: row["max_retries"] ?? 2, + escalation_pending: row["escalation_pending"] ?? 0, + escalation_awaiting_review: row["escalation_awaiting_review"] ?? 0, + escalation_override_applied: row["escalation_override_applied"] ?? 0, + escalation_artifact_path: row["escalation_artifact_path"] ?? null, + }); +} +export function getTask(milestoneId: string, sliceId: string, taskId: string): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare( + `SELECT t.*, ts.status AS scheduler_status + FROM tasks t + LEFT JOIN task_scheduler ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + WHERE t.milestone_id = :mid AND t.slice_id = :sid AND t.id = :tid`, + ) + .get({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + if (!row) return null; + return rowToTask(row); +} +export function getSliceTasks(milestoneId: string, sliceId: string): DbRow[] { + if (!currentDb) return []; + const rows = currentDb + .prepare( + `SELECT t.*, ts.status AS scheduler_status + FROM tasks t + LEFT JOIN task_scheduler ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + WHERE t.milestone_id = :mid AND t.slice_id = :sid + ORDER BY t.sequence, t.id`, + ) + .all({ ":mid": milestoneId, ":sid": sliceId }); + return rows.map(rowToTask); +} +export function insertVerificationEvidence(e: VerificationEvidenceInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at) + VALUES (:task_id, :slice_id, :milestone_id, :command, :exit_code, :verdict, :duration_ms, :created_at)`) + .run({ + ":task_id": e.taskId, + ":slice_id": e.sliceId, + ":milestone_id": e.milestoneId, + ":command": e.command, + ":exit_code": e.exitCode, + ":verdict": e.verdict, + ":duration_ms": e.durationMs, + ":created_at": new Date().toISOString(), + }); +} +export function getVerificationEvidence(milestoneId: string, sliceId: string, taskId: string): DbRow[] { + if (!currentDb) return []; + const rows = currentDb + .prepare( + "SELECT * FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid ORDER BY id", + ) + .all({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + return rows; +} +function rowToSelfFeedback(row) { + try { + const parsed = JSON.parse(row["full_json"]); + return { + ...parsed, + resolvedAt: row["resolved_at"] ?? parsed.resolvedAt, + resolvedReason: row["resolved_reason"] ?? parsed.resolvedReason, + resolvedBySfVersion: + row["resolved_by_sf_version"] ?? parsed.resolvedBySfVersion, + resolvedEvidence: row["resolved_evidence_json"] + ? JSON.parse(row["resolved_evidence_json"]) + : parsed.resolvedEvidence, + resolvedCriteriaMet: row["resolved_criteria_json"] + ? JSON.parse(row["resolved_criteria_json"]) + : parsed.resolvedCriteriaMet, + }; + } catch { + return { + id: row["id"], + ts: row["ts"], + kind: row["kind"], + severity: row["severity"], + blocking: row["blocking"] === 1, + repoIdentity: row["repo_identity"], + sfVersion: row["sf_version"], + basePath: row["base_path"], + occurredIn: { + unitType: row["unit_type"] ?? undefined, + milestone: row["milestone_id"] ?? undefined, + slice: row["slice_id"] ?? undefined, + task: row["task_id"] ?? undefined, + }, + summary: row["summary"], + evidence: row["evidence"], + suggestedFix: row["suggested_fix"], + resolvedAt: row["resolved_at"] ?? undefined, + resolvedReason: row["resolved_reason"] ?? undefined, + resolvedBySfVersion: row["resolved_by_sf_version"] ?? undefined, + resolvedEvidence: row["resolved_evidence_json"] + ? JSON.parse(row["resolved_evidence_json"]) + : undefined, + resolvedCriteriaMet: row["resolved_criteria_json"] + ? JSON.parse(row["resolved_criteria_json"]) + : undefined, + }; + } +} +export function insertSelfFeedbackEntry(entry: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const occurred = entry.occurredIn ?? {}; + currentDb + .prepare(`INSERT INTO self_feedback ( + id, ts, kind, severity, blocking, repo_identity, sf_version, base_path, + unit_type, milestone_id, slice_id, task_id, summary, evidence, suggested_fix, full_json, + resolved_at, resolved_reason, resolved_by_sf_version, resolved_evidence_json, resolved_criteria_json + ) VALUES ( + :id, :ts, :kind, :severity, :blocking, :repo_identity, :sf_version, :base_path, + :unit_type, :milestone_id, :slice_id, :task_id, :summary, :evidence, :suggested_fix, :full_json, + :resolved_at, :resolved_reason, :resolved_by_sf_version, :resolved_evidence_json, :resolved_criteria_json + ) + ON CONFLICT(id) DO NOTHING`) + .run({ + ":id": entry.id, + ":ts": entry.ts, + ":kind": entry.kind, + ":severity": entry.severity, + ":blocking": entry.blocking ? 1 : 0, + ":repo_identity": entry.repoIdentity ?? "", + ":sf_version": entry.sfVersion ?? "", + ":base_path": entry.basePath ?? "", + ":unit_type": occurred.unitType ?? null, + ":milestone_id": occurred.milestone ?? null, + ":slice_id": occurred.slice ?? null, + ":task_id": occurred.task ?? null, + ":summary": entry.summary ?? "", + ":evidence": entry.evidence ?? "", + ":suggested_fix": entry.suggestedFix ?? "", + ":full_json": JSON.stringify(entry), + ":resolved_at": entry.resolvedAt ?? null, + ":resolved_reason": entry.resolvedReason ?? null, + ":resolved_by_sf_version": entry.resolvedBySfVersion ?? null, + ":resolved_evidence_json": entry.resolvedEvidence + ? JSON.stringify(entry.resolvedEvidence) + : null, + ":resolved_criteria_json": entry.resolvedCriteriaMet + ? JSON.stringify(entry.resolvedCriteriaMet) + : null, + }); +} +export function listSelfFeedbackEntries(): DbRow[] { + if (!currentDb) return []; + const rows = currentDb + .prepare("SELECT * FROM self_feedback ORDER BY ts ASC, id ASC") + .all(); + return rows.map(rowToSelfFeedback); +} +export function resolveSelfFeedbackEntry(entryId: string, resolution: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const existing = currentDb + .prepare("SELECT * FROM self_feedback WHERE id = :id") + .get({ ":id": entryId }); + if (!existing || existing["resolved_at"]) return false; + const resolvedAt = resolution.resolvedAt ?? new Date().toISOString(); + const entry = { + ...rowToSelfFeedback(existing), + resolvedAt, + resolvedReason: resolution.reason, + resolvedBySfVersion: resolution.resolvedBySfVersion ?? "", + resolvedEvidence: resolution.evidence, + }; + if (resolution.criteriaMet) + entry.resolvedCriteriaMet = resolution.criteriaMet; + const result = currentDb + .prepare(`UPDATE self_feedback SET + full_json = :full_json, + resolved_at = :resolved_at, + resolved_reason = :resolved_reason, + resolved_by_sf_version = :resolved_by_sf_version, + resolved_evidence_json = :resolved_evidence_json, + resolved_criteria_json = :resolved_criteria_json + WHERE id = :id AND resolved_at IS NULL`) + .run({ + ":id": entryId, + ":full_json": JSON.stringify(entry), + ":resolved_at": resolvedAt, + ":resolved_reason": resolution.reason ?? "", + ":resolved_by_sf_version": resolution.resolvedBySfVersion ?? "", + ":resolved_evidence_json": resolution.evidence + ? JSON.stringify(resolution.evidence) + : null, + ":resolved_criteria_json": resolution.criteriaMet + ? JSON.stringify(resolution.criteriaMet) + : null, + }); + return result.changes > 0; +} +function parseVisionMeeting(raw) { + if (typeof raw !== "string" || raw.trim().length === 0) return null; + try { + return JSON.parse(raw); + } catch { + return null; + } +} +function parseProductResearch(raw) { + if (typeof raw !== "string" || raw.trim().length === 0) return null; + try { + return JSON.parse(raw); + } catch { + return null; + } +} +function rowToMilestone(row) { + return { + id: row["id"], + title: row["title"], + status: row["status"], + depends_on: safeParseJsonArray(row["depends_on"]), + created_at: row["created_at"], + completed_at: row["completed_at"] ?? null, + vision: row["vision"] ?? "", + success_criteria: safeParseJsonArray(row["success_criteria"]), + key_risks: safeParseJsonArray(row["key_risks"]), + proof_strategy: safeParseJsonArray(row["proof_strategy"]), + verification_contract: row["verification_contract"] ?? "", + verification_integration: row["verification_integration"] ?? "", + verification_operational: row["verification_operational"] ?? "", + verification_uat: row["verification_uat"] ?? "", + definition_of_done: safeParseJsonArray(row["definition_of_done"]), + requirement_coverage: row["requirement_coverage"] ?? "", + boundary_map_markdown: row["boundary_map_markdown"] ?? "", + vision_meeting: parseVisionMeeting(row["vision_meeting_json"]), + product_research: parseProductResearch(row["product_research_json"]), + sequence: row["sequence"] ?? 0, + }; +} +function rowToArtifact(row) { + return { + path: row["path"], + artifact_type: row["artifact_type"], + milestone_id: row["milestone_id"] ?? null, + slice_id: row["slice_id"] ?? null, + task_id: row["task_id"] ?? null, + full_content: row["full_content"], + imported_at: row["imported_at"], + }; +} +export function getAllMilestones(): DbRow[] { + if (!currentDb) return []; + const rows = currentDb + .prepare( + "SELECT * FROM milestones ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id", + ) + .all(); + return rows.map(rowToMilestone); +} +export function getMilestone(id: string): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM milestones WHERE id = :id") + .get({ ":id": id }); + if (!row) return null; + return rowToMilestone(row); +} +function rowToBacklogItem(row) { + return { + id: row["id"], + title: row["title"], + status: row["status"], + note: row["note"] ?? "", + source: row["source"] ?? "", + triageRunId: row["triage_run_id"] ?? null, + sequence: row["sequence"] ?? 0, + createdAt: row["created_at"], + updatedAt: row["updated_at"], + promotedAt: row["promoted_at"] ?? null, + }; +} +export function listBacklogItems(): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT * FROM backlog_items ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id", + ) + .all() + .map(rowToBacklogItem); +} +export function nextBacklogItemId(): string { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const row = currentDb + .prepare( + "SELECT id FROM backlog_items WHERE id LIKE '999.%' ORDER BY CAST(substr(id, 5) AS INTEGER) DESC LIMIT 1", + ) + .get(); + const next = row?.id ? Number.parseInt(String(row.id).slice(4), 10) + 1 : 1; + return `999.${Number.isFinite(next) ? next : 1}`; +} +export function addBacklogItem({ + id, + title, + note = "", + source = "manual", + triageRunId = null, + status = "pending", +}) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const itemId = id ?? nextBacklogItemId(); + const now = new Date().toISOString(); + const sequenceRow = currentDb + .prepare( + "SELECT COALESCE(MAX(sequence), 0) + 1 AS sequence FROM backlog_items", + ) + .get(); + currentDb + .prepare(`INSERT INTO backlog_items ( + id, title, status, note, source, triage_run_id, sequence, created_at, updated_at, promoted_at + ) VALUES ( + :id, :title, :status, :note, :source, :triage_run_id, :sequence, :created_at, :updated_at, :promoted_at + ) + ON CONFLICT(id) DO UPDATE SET + title = excluded.title, + status = excluded.status, + note = excluded.note, + source = excluded.source, + triage_run_id = excluded.triage_run_id, + updated_at = excluded.updated_at, + promoted_at = excluded.promoted_at`) + .run({ + ":id": itemId, + ":title": title, + ":status": status, + ":note": note, + ":source": source, + ":triage_run_id": triageRunId, + ":sequence": sequenceRow?.sequence ?? 1, + ":created_at": now, + ":updated_at": now, + ":promoted_at": status === "promoted" ? now : null, + }); + return itemId; +} +export function updateBacklogItemStatus(id: string, status: string, note = ""): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + const result = currentDb + .prepare(`UPDATE backlog_items + SET status = :status, + note = :note, + updated_at = :updated_at, + promoted_at = CASE WHEN :status = 'promoted' THEN :updated_at ELSE promoted_at END + WHERE id = :id`) + .run({ + ":id": id, + ":status": status, + ":note": note, + ":updated_at": now, + }); + return (result?.changes ?? 0) > 0; +} +export function removeBacklogItem(id: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const result = currentDb + .prepare("DELETE FROM backlog_items WHERE id = :id") + .run({ ":id": id }); + return (result?.changes ?? 0) > 0; +} +/** + * Update a milestone's status in the database. + * Used by park/unpark to keep the DB in sync with the filesystem marker. + * See: https://github.com/singularity-forge/sf-run/issues/2694 + */ +export function updateMilestoneStatus(milestoneId: string, status: string, completedAt: string | null): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE milestones SET status = :status, completed_at = :completed_at WHERE id = :id`, + ) + .run({ + ":status": status, + ":completed_at": completedAt ?? null, + ":id": milestoneId, + }); +} +/** + * Persist explicit milestone execution order in the structured runtime DB. + * + * Purpose: make roadmap priority/order queryable and schema-owned instead of + * relying on `.sf/QUEUE-ORDER.json` as a peer source of truth. + * + * Consumer: queue-order.js when `/queue` or rethink reorders milestones. + */ +export function updateMilestoneQueueOrder(order: string[]): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + const stmt = currentDb.prepare( + "UPDATE milestones SET sequence = :sequence WHERE id = :id", + ); + for (let i = 0; i < order.length; i++) { + stmt.run({ ":sequence": i + 1, ":id": order[i] }); + } + }); +} +export function getActiveMilestoneFromDb(): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT * FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id LIMIT 1", + ) + .get(); + if (!row) return null; + return rowToMilestone(row); +} +export function getActiveSliceFromDb(milestoneId: string): DbRow | undefined { + if (!currentDb) return null; + // Find the first non-complete slice whose dependencies are all satisfied. + // Uses the slice_dependencies junction table (kept in sync by syncSliceDependencies). + const row = currentDb + .prepare(`SELECT s.* FROM slices s + WHERE s.milestone_id = :mid + AND s.status NOT IN ('complete', 'done', 'skipped') + AND NOT EXISTS ( + SELECT 1 FROM slice_dependencies d + WHERE d.milestone_id = :mid + AND d.slice_id = s.id + AND d.depends_on_slice_id NOT IN ( + SELECT id FROM slices WHERE milestone_id = :mid AND status IN ('complete', 'done', 'skipped') + ) + ) + ORDER BY s.sequence, s.id + LIMIT 1`) + .get({ ":mid": milestoneId }); + if (!row) return null; + return rowToSlice(row); +} +export function getActiveTaskFromDb(milestoneId: string, sliceId: string): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT * FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1", + ) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return null; + return rowToTask(row); +} +export function getMilestoneSlices(milestoneId: string): DbRow[] { + if (!currentDb) return []; + const rows = currentDb + .prepare( + "SELECT * FROM slices WHERE milestone_id = :mid ORDER BY sequence, id", + ) + .all({ ":mid": milestoneId }); + return rows.map(rowToSlice); +} +export function getArtifact(path: string): DbRow | undefined { + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM artifacts WHERE path = :path") + .get({ ":path": path }); + if (!row) return null; + return rowToArtifact(row); +} +// ─── Lightweight Query Variants (hot-path optimized) ───────────────────── +/** Fast milestone status check — avoids deserializing JSON planning fields. */ +export function getActiveMilestoneIdFromDb(): string | null { + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT id, status FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY id LIMIT 1", + ) + .get(); + if (!row) return null; + return { id: row["id"], status: row["status"] }; +} +/** Fast slice status check — avoids deserializing JSON depends/planning fields. */ +export function getSliceStatusSummary(milestoneId: string): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT id, status FROM slices WHERE milestone_id = :mid ORDER BY sequence, id", + ) + .all({ ":mid": milestoneId }) + .map((r) => ({ id: r["id"], status: r["status"] })); +} +/** Fast task status check — avoids deserializing JSON arrays and large text fields. */ +export function getActiveTaskIdFromDb(milestoneId: string, sliceId: string): string | null { + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT id, status, title FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1", + ) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return null; + return { + id: row["id"], + status: row["status"], + title: row["title"], + }; +} +/** Count tasks by status for a slice — useful for progress reporting without full row load. */ +export function getSliceTaskCounts(milestoneId: string, sliceId: string): DbRow | undefined { + if (!currentDb) return { total: 0, done: 0, pending: 0 }; + const row = currentDb + .prepare(`SELECT + COUNT(*) as total, + SUM(CASE WHEN status IN ('complete', 'done') THEN 1 ELSE 0 END) as done, + SUM(CASE WHEN status NOT IN ('complete', 'done') THEN 1 ELSE 0 END) as pending + FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return { total: 0, done: 0, pending: 0 }; + return { + total: row["total"] ?? 0, + done: row["done"] ?? 0, + pending: row["pending"] ?? 0, + }; +} +// ─── Slice Dependencies (junction table) ───────────────────────────────── +/** Sync the slice_dependencies junction table from a slice's JSON depends array. */ +export function syncSliceDependencies(milestoneId: string, sliceId: string, depends: string[]): void { + if (!currentDb) return; + currentDb + .prepare( + "DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid", + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + for (const dep of depends) { + currentDb + .prepare( + "INSERT OR IGNORE INTO slice_dependencies (milestone_id, slice_id, depends_on_slice_id) VALUES (:mid, :sid, :dep)", + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":dep": dep }); + } +} +/** Get all slices that depend on a given slice. */ +export function getDependentSlices(milestoneId: string, sliceId: string): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT slice_id FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid", + ) + .all({ ":mid": milestoneId, ":sid": sliceId }) + .map((r) => r["slice_id"]); +} +// ─── Worktree DB Helpers ────────────────────────────────────────────────── +export function copyWorktreeDb(srcDbPath: string, destDbPath: string): void { + try { + if (!existsSync(srcDbPath)) return false; + const destDir = dirname(destDbPath); + mkdirSync(destDir, { recursive: true }); + copyFileSync(srcDbPath, destDbPath); + return true; + } catch (err) { + logError("db", "failed to copy DB to worktree", { + error: err.message, + }); + return false; + } +} +export function reconcileWorktreeDb(mainDbPath, worktreeDbPath) { + const zero = { + decisions: 0, + requirements: 0, + artifacts: 0, + milestones: 0, + slices: 0, + tasks: 0, + memories: 0, + verification_evidence: 0, + conflicts: [], + }; + if (!existsSync(worktreeDbPath)) return zero; + // Guard: bail when both paths resolve to the same physical file. + // ATTACHing a WAL-mode DB to itself corrupts the WAL (#2823). + try { + if (realpathSync(mainDbPath) === realpathSync(worktreeDbPath)) return zero; + } catch (e) { + logWarning("db", `realpathSync failed: ${(e as Error)?.message}`); + } + // Sanitize path: reject any characters that could break ATTACH syntax. + // ATTACH DATABASE doesn't support parameterized paths in all providers, + // so we use strict allowlist validation instead. + if (/['";\x00]/.test(worktreeDbPath)) { + logError( + "db", + "worktree DB reconciliation failed: path contains unsafe characters", + ); + return zero; + } + if (!currentDb) { + const opened = openDatabase(mainDbPath); + if (!opened) { + logError("db", "worktree DB reconciliation failed: cannot open main DB"); + return zero; + } + } + const adapter = currentDb; + const conflicts = []; + try { + adapter.exec(`ATTACH DATABASE '${worktreeDbPath}' AS wt`); + try { + const wtInfo = adapter.prepare("PRAGMA wt.table_info('decisions')").all(); + const hasMadeBy = wtInfo.some((col) => col["name"] === "made_by"); + const wtMilestoneInfo = adapter + .prepare("PRAGMA wt.table_info('milestones')") + .all(); + const hasProductResearch = wtMilestoneInfo.some( + (col) => col["name"] === "product_research_json", + ); + const decConf = adapter + .prepare( + `SELECT m.id FROM decisions m INNER JOIN wt.decisions w ON m.id = w.id WHERE m.decision != w.decision OR m.choice != w.choice OR m.rationale != w.rationale OR ${hasMadeBy ? "m.made_by != w.made_by" : "'agent' != 'agent'"} OR m.superseded_by IS NOT w.superseded_by`, + ) + .all(); + for (const row of decConf) + conflicts.push(`decision ${row["id"]}: modified in both`); + const reqConf = adapter + .prepare( + `SELECT m.id FROM requirements m INNER JOIN wt.requirements w ON m.id = w.id WHERE m.description != w.description OR m.status != w.status OR m.notes != w.notes OR m.superseded_by IS NOT w.superseded_by`, + ) + .all(); + for (const row of reqConf) + conflicts.push(`requirement ${row["id"]}: modified in both`); + const merged = { + decisions: 0, + requirements: 0, + artifacts: 0, + milestones: 0, + slices: 0, + tasks: 0, + memories: 0, + verification_evidence: 0, + }; + function countChanges(result) { + return typeof result === "object" && result !== null + ? (result.changes ?? 0) + : 0; + } + adapter.exec("BEGIN"); + try { + merged.decisions = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO decisions ( + id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by + ) + SELECT id, when_context, scope, decision, choice, rationale, revisable, ${hasMadeBy ? "made_by" : "'agent'"}, superseded_by FROM wt.decisions + `) + .run(), + ); + merged.requirements = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO requirements ( + id, class, status, description, why, source, primary_owner, + supporting_slices, validation, notes, full_content, superseded_by + ) + SELECT id, class, status, description, why, source, primary_owner, + supporting_slices, validation, notes, full_content, superseded_by + FROM wt.requirements + `) + .run(), + ); + merged.artifacts = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO artifacts ( + path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at + ) + SELECT path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at + FROM wt.artifacts + `) + .run(), + ); + // Merge milestones — worktree may have updated status/planning fields + merged.milestones = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO milestones ( + id, title, status, depends_on, created_at, completed_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json + ) + SELECT id, title, status, depends_on, created_at, completed_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, ${hasProductResearch ? "product_research_json" : "''"} + FROM wt.milestones + `) + .run(), + ); + // Merge slices — preserve worktree progress but never downgrade completed status (#2558). + // Uses INSERT OR REPLACE with a subquery that picks the best status — if the main DB + // already has a completed slice, keep that status even if the worktree copy is stale. + merged.slices = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO slices ( + milestone_id, id, title, status, risk, depends, demo, created_at, completed_at, + full_summary_md, full_uat_md, goal, success_criteria, proof_level, + integration_closure, observability_impact, adversarial_partner, adversarial_combatant, + adversarial_architect, planning_meeting_json, sequence, replan_triggered_at + ) + SELECT w.milestone_id, w.id, w.title, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.status ELSE w.status + END, + w.risk, w.depends, w.demo, w.created_at, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.completed_at ELSE w.completed_at + END, + w.full_summary_md, w.full_uat_md, w.goal, w.success_criteria, w.proof_level, + w.integration_closure, w.observability_impact, w.adversarial_partner, w.adversarial_combatant, + w.adversarial_architect, w.planning_meeting_json, w.sequence, w.replan_triggered_at + FROM wt.slices w + LEFT JOIN slices m ON m.milestone_id = w.milestone_id AND m.id = w.id + `) + .run(), + ); + // Merge tasks — preserve execution results, never downgrade completed status (#2558) + merged.tasks = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO tasks ( + milestone_id, slice_id, id, title, status, one_liner, narrative, + verification_result, duration, completed_at, blocker_discovered, + deviations, known_issues, key_files, key_decisions, full_summary_md, + description, estimate, files, verify, inputs, expected_output, + observability_impact, full_plan_md, sequence + ) + SELECT w.milestone_id, w.slice_id, w.id, w.title, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.status ELSE w.status + END, + w.one_liner, w.narrative, + w.verification_result, w.duration, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.completed_at ELSE w.completed_at + END, + w.blocker_discovered, + w.deviations, w.known_issues, w.key_files, w.key_decisions, w.full_summary_md, + w.description, w.estimate, w.files, w.verify, w.inputs, w.expected_output, + w.observability_impact, w.full_plan_md, w.sequence + FROM wt.tasks w + LEFT JOIN tasks m ON m.milestone_id = w.milestone_id AND m.slice_id = w.slice_id AND m.id = w.id + `) + .run(), + ); + // Merge memories — keep worktree-learned insights + merged.memories = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO memories ( + seq, id, category, content, confidence, source_unit_type, source_unit_id, + created_at, updated_at, superseded_by, hit_count + ) + SELECT seq, id, category, content, confidence, source_unit_type, source_unit_id, + created_at, updated_at, superseded_by, hit_count + FROM wt.memories + `) + .run(), + ); + // Merge verification evidence — append-only, use INSERT OR IGNORE to avoid duplicates + merged.verification_evidence = countChanges( + adapter + .prepare(` + INSERT OR IGNORE INTO verification_evidence ( + task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at + ) + SELECT task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at + FROM wt.verification_evidence + `) + .run(), + ); + adapter.exec("COMMIT"); + } catch (txErr) { + try { + adapter.exec("ROLLBACK"); + } catch (e) { + logWarning("db", `rollback failed: ${(e as Error)?.message}`); + } + throw txErr; + } + return { ...merged, conflicts }; + } finally { + try { + adapter.exec("DETACH DATABASE wt"); + } catch (e) { + logWarning("db", `detach worktree DB failed: ${(e as Error)?.message}`); + } + } + } catch (err) { + logError("db", "worktree DB reconciliation failed", { + error: err.message, + }); + return { ...zero, conflicts }; + } +} +// ─── Replan & Assessment Helpers ────────────────────────────────────────── +export function insertReplanHistory(entry) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + // INSERT OR REPLACE: idempotent on (milestone_id, slice_id, task_id) via schema v11 unique index. + // Retrying the same replan silently updates summary instead of accumulating duplicate rows. + currentDb + .prepare(`INSERT OR REPLACE INTO replan_history (milestone_id, slice_id, task_id, summary, previous_artifact_path, replacement_artifact_path, created_at) + VALUES (:milestone_id, :slice_id, :task_id, :summary, :previous_artifact_path, :replacement_artifact_path, :created_at)`) + .run({ + ":milestone_id": entry.milestoneId, + ":slice_id": entry.sliceId ?? null, + ":task_id": entry.taskId ?? null, + ":summary": entry.summary, + ":previous_artifact_path": entry.previousArtifactPath ?? null, + ":replacement_artifact_path": entry.replacementArtifactPath ?? null, + ":created_at": new Date().toISOString(), + }); +} +export function insertAssessment(entry) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO assessments (path, milestone_id, slice_id, task_id, status, scope, full_content, created_at) + VALUES (:path, :milestone_id, :slice_id, :task_id, :status, :scope, :full_content, :created_at)`) + .run({ + ":path": entry.path, + ":milestone_id": entry.milestoneId, + ":slice_id": entry.sliceId ?? null, + ":task_id": entry.taskId ?? null, + ":status": entry.status, + ":scope": entry.scope, + ":full_content": entry.fullContent, + ":created_at": new Date().toISOString(), + }); +} +export function deleteAssessmentByScope(milestoneId, scope) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `DELETE FROM assessments WHERE milestone_id = :mid AND scope = :scope`, + ) + .run({ ":mid": milestoneId, ":scope": scope }); +} +export function deleteVerificationEvidence(milestoneId, sliceId, taskId) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); +} +export function deleteTask(milestoneId, sliceId, taskId) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + // Must delete verification_evidence first (FK constraint) + currentDb + .prepare( + `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + currentDb + .prepare( + `DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + }); +} +export function deleteSlice(milestoneId, sliceId) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + // Cascade-style manual deletion: evidence → tasks → dependencies → slice + currentDb + .prepare( + `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare( + `DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare( + `DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare( + `DELETE FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare(`DELETE FROM slices WHERE milestone_id = :mid AND id = :sid`) + .run({ ":mid": milestoneId, ":sid": sliceId }); + }); +} +export function deleteMilestone(milestoneId) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + currentDb + .prepare(`DELETE FROM verification_evidence WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM quality_gates WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM tasks WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM slice_dependencies WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM slices WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM replan_history WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM assessments WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM artifacts WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM milestones WHERE id = :mid`) + .run({ ":mid": milestoneId }); + }); +} +export function updateSliceFields(milestoneId, sliceId, fields) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE slices SET + title = COALESCE(:title, title), + risk = COALESCE(:risk, risk), + depends = COALESCE(:depends, depends), + demo = COALESCE(:demo, demo) + WHERE milestone_id = :milestone_id AND id = :id`) + .run({ + ":milestone_id": milestoneId, + ":id": sliceId, + ":title": fields.title ?? null, + ":risk": fields.risk ?? null, + ":depends": fields.depends ? JSON.stringify(fields.depends) : null, + ":demo": fields.demo ?? null, + }); +} +export function getReplanHistory(milestoneId, sliceId) { + if (!currentDb) return []; + if (sliceId) { + return currentDb + .prepare( + `SELECT * FROM replan_history WHERE milestone_id = :mid AND slice_id = :sid ORDER BY created_at DESC`, + ) + .all({ ":mid": milestoneId, ":sid": sliceId }); + } + return currentDb + .prepare( + `SELECT * FROM replan_history WHERE milestone_id = :mid ORDER BY created_at DESC`, + ) + .all({ ":mid": milestoneId }); +} +export function getAssessment(path) { + if (!currentDb) return null; + const row = currentDb + .prepare(`SELECT * FROM assessments WHERE path = :path`) + .get({ ":path": path }); + return row ?? null; +} +export function getAssessmentByScope(milestoneId, scope) { + if (!currentDb) return null; + const row = currentDb + .prepare( + `SELECT * FROM assessments + WHERE milestone_id = :mid AND scope = :scope + ORDER BY created_at DESC + LIMIT 1`, + ) + .get({ ":mid": milestoneId, ":scope": scope }); + return row ?? null; +} +export function getMilestoneValidationAssessment(milestoneId) { + return getAssessmentByScope(milestoneId, "milestone-validation"); +} +// ─── Quality Gates ─────────────────────────────────────────────────────── +function rowToGate(row) { + return { + milestone_id: row["milestone_id"], + slice_id: row["slice_id"], + gate_id: row["gate_id"], + scope: row["scope"], + task_id: row["task_id"] ?? "", + status: row["status"], + verdict: row["verdict"] || "", + rationale: row["rationale"] || "", + findings: row["findings"] || "", + evaluated_at: row["evaluated_at"] ?? null, + }; +} +export function insertGateRow(g) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO quality_gates (milestone_id, slice_id, gate_id, scope, task_id, status) + VALUES (:mid, :sid, :gid, :scope, :tid, :status)`) + .run({ + ":mid": g.milestoneId, + ":sid": g.sliceId, + ":gid": g.gateId, + ":scope": g.scope, + ":tid": g.taskId ?? "", + ":status": g.status ?? "pending", + }); +} +export function saveGateResult(g) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE quality_gates + SET status = 'complete', verdict = :verdict, rationale = :rationale, + findings = :findings, evaluated_at = :evaluated_at + WHERE milestone_id = :mid AND slice_id = :sid AND gate_id = :gid + AND task_id = :tid`) + .run({ + ":mid": g.milestoneId, + ":sid": g.sliceId, + ":gid": g.gateId, + ":tid": g.taskId ?? "", + ":verdict": g.verdict, + ":rationale": g.rationale, + ":findings": g.findings, + ":evaluated_at": new Date().toISOString(), + }); + const outcome = + g.verdict === "pass" + ? "pass" + : g.verdict === "omitted" + ? "manual-attention" + : "fail"; + insertGateRun({ + traceId: `quality-gate:${g.milestoneId}:${g.sliceId}`, + turnId: `gate:${g.gateId}:${g.taskId ?? "slice"}`, + gateId: g.gateId, + gateType: "quality-gate", + milestoneId: g.milestoneId, + sliceId: g.sliceId, + taskId: g.taskId ?? undefined, + outcome, + failureClass: + outcome === "fail" + ? "verification" + : outcome === "manual-attention" + ? "manual-attention" + : "none", + rationale: g.rationale, + findings: g.findings, + attempt: 1, + maxAttempts: 1, + retryable: false, + evaluatedAt: new Date().toISOString(), + }); +} +export function getPendingGates(milestoneId, sliceId, scope) { + if (!currentDb) return []; + const sql = scope + ? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope AND status = 'pending'` + : `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`; + const params = { + ":mid": milestoneId, + ":sid": sliceId, + }; + if (scope) params[":scope"] = scope; + return currentDb.prepare(sql).all(params).map(rowToGate); +} +export function getGateResults(milestoneId, sliceId, scope) { + if (!currentDb) return []; + const sql = scope + ? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope` + : `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid`; + const params = { + ":mid": milestoneId, + ":sid": sliceId, + }; + if (scope) params[":scope"] = scope; + return currentDb.prepare(sql).all(params).map(rowToGate); +} +export function markAllGatesOmitted(milestoneId, sliceId) { + if (!currentDb) return; + currentDb + .prepare(`UPDATE quality_gates SET status = 'omitted', verdict = 'omitted', evaluated_at = :now + WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`) + .run({ + ":mid": milestoneId, + ":sid": sliceId, + ":now": new Date().toISOString(), + }); +} +export function getPendingSliceGateCount(milestoneId, sliceId) { + if (!currentDb) return 0; + const row = currentDb + .prepare(`SELECT COUNT(*) as cnt FROM quality_gates + WHERE milestone_id = :mid AND slice_id = :sid AND scope = 'slice' AND status = 'pending'`) + .get({ ":mid": milestoneId, ":sid": sliceId }); + return row ? row["cnt"] : 0; +} +/** + * Return pending gate rows owned by a specific workflow turn. + * + * Unlike `getPendingGates(..., scope)`, this filters by the registry's + * `ownerTurn` metadata so callers can distinguish Q3/Q4 (owned by + * gate-evaluate) from Q8 (owned by complete-slice) even though both are + * scope:"slice". Pass `taskId` to narrow task-scoped results to one task. + */ +export function getPendingGatesForTurn(milestoneId, sliceId, turn, taskId) { + if (!currentDb) return []; + const ids = getGateIdsForTurn(turn); + if (ids.size === 0) return []; + const idList = [...ids]; + const placeholders = idList.map((_, i) => `:gid${i}`).join(","); + const params = { + ":mid": milestoneId, + ":sid": sliceId, + }; + idList.forEach((id, i) => { + params[`:gid${i}`] = id; + }); + let sql = `SELECT * FROM quality_gates + WHERE milestone_id = :mid AND slice_id = :sid + AND status = 'pending' + AND gate_id IN (${placeholders})`; + if (taskId !== undefined) { + sql += ` AND task_id = :tid`; + params[":tid"] = taskId; + } + return currentDb.prepare(sql).all(params).map(rowToGate); +} +/** + * Count pending gates for a turn. Convenience wrapper used by state + * derivation to decide whether a phase transition should pause. + */ +export function getPendingGateCountForTurn(milestoneId, sliceId, turn) { + return getPendingGatesForTurn(milestoneId, sliceId, turn).length; +} +/** @deprecated Gate runs are now written to JSONL trace files via appendTraceEvent(). This is a no-op kept for import compatibility. */ +export function insertGateRun(_entry) { + // no-op: gate runs now written to JSONL trace files +} +/** @deprecated Turn git transactions are now written to JSONL audit events. This is a no-op kept for import compatibility. */ +export function upsertTurnGitTransaction(_entry) { + // no-op: turn git transactions now written to JSONL audit events +} +export function recordUokRunStart(entry) { + if (!currentDb) return; + const now = entry.startedAt ?? new Date().toISOString(); + currentDb + .prepare(`INSERT INTO uok_runs ( + run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at + ) VALUES ( + :run_id, :session_id, :path, 'started', :started_at, NULL, NULL, :flags_json, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + session_id = excluded.session_id, + path = excluded.path, + status = 'started', + started_at = excluded.started_at, + ended_at = NULL, + error = NULL, + flags_json = excluded.flags_json, + updated_at = excluded.updated_at`) + .run({ + ":run_id": entry.runId, + ":session_id": entry.sessionId ?? null, + ":path": entry.path ?? "", + ":started_at": now, + ":flags_json": JSON.stringify(entry.flags ?? {}), + ":updated_at": now, + }); +} +const MAX_ERROR_STORED_BYTES = 4096; +function capErrorForStorage(error, runId) { + if (!error || error.length <= MAX_ERROR_STORED_BYTES) return error; + try { + const errDir = join(dirname(currentPath), "runtime", "errors"); + mkdirSync(errDir, { recursive: true }); + writeFileSync(join(errDir, `${runId}.txt`), error, "utf-8"); + } catch { + // non-fatal — best-effort spill + } + const head = error.slice(0, 2048); + const tail = error.slice(-2048); + const dropped = error.length - MAX_ERROR_STORED_BYTES; + return `${head}\n\n[...${dropped} chars truncated — full error in .sf/runtime/errors/${runId}.txt]\n\n${tail}`; +} +export function recordUokRunExit(entry) { + if (!currentDb) return; + const now = entry.endedAt ?? new Date().toISOString(); + currentDb + .prepare(`INSERT INTO uok_runs ( + run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at + ) VALUES ( + :run_id, :session_id, :path, :status, :started_at, :ended_at, :error, :flags_json, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + session_id = COALESCE(excluded.session_id, uok_runs.session_id), + path = CASE WHEN excluded.path = '' THEN uok_runs.path ELSE excluded.path END, + status = excluded.status, + ended_at = excluded.ended_at, + error = excluded.error, + flags_json = CASE WHEN excluded.flags_json = '{}' THEN uok_runs.flags_json ELSE excluded.flags_json END, + updated_at = excluded.updated_at`) + .run({ + ":run_id": entry.runId, + ":session_id": entry.sessionId ?? null, + ":path": entry.path ?? "", + ":status": entry.status ?? "ok", + ":started_at": entry.startedAt ?? now, + ":ended_at": now, + ":error": entry.error ? capErrorForStorage(entry.error, entry.runId) : null, + ":flags_json": JSON.stringify(entry.flags ?? {}), + ":updated_at": now, + }); +} +export function getUokRuns(limit = 500) { + if (!currentDb) return []; + return currentDb + .prepare( + `SELECT run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at + FROM uok_runs + ORDER BY started_at DESC + LIMIT :limit`, + ) + .all({ ":limit": limit }) + .map((row) => ({ + runId: row.run_id, + sessionId: row.session_id, + path: row.path, + status: row.status, + startedAt: row.started_at, + endedAt: row.ended_at, + error: row.error, + flags: (() => { + try { + return JSON.parse(row.flags_json || "{}"); + } catch { + return {}; + } + })(), + updatedAt: row.updated_at, + })); +} +/** @deprecated Audit events are now written exclusively to JSONL files via emitUokAuditEvent(). This is a no-op kept for import compatibility. */ +export function insertAuditEvent(_entry) { + // no-op: audit events now written exclusively to JSONL files +} +// ─── Single-writer bypass wrappers ─────────────────────────────────────── +// These wrappers exist so modules outside this file never need to call +// `_getAdapter()` for writes. Each one is a byte-equivalent replacement for +// a raw prepare/run previously issued from another module. Keep them +// minimal and direct — they exist to hold SQL text in one place, not to +// add new behavior. +/** Delete a decision row by id. Used by db-writer.ts rollback on disk-write failure. */ +export function deleteDecisionById(id) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb.prepare("DELETE FROM decisions WHERE id = :id").run({ ":id": id }); +} +/** Delete a requirement row by id. Used by db-writer.ts rollback on disk-write failure. */ +export function deleteRequirementById(id) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare("DELETE FROM requirements WHERE id = :id") + .run({ ":id": id }); +} +/** Delete an artifact row by path. Used by db-writer.ts rollback on disk-write failure. */ +export function deleteArtifactByPath(path) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare("DELETE FROM artifacts WHERE path = :path") + .run({ ":path": path }); +} +/** + * Drop all rows from tasks/slices/milestones in dependency order inside a + * transaction. Used by `sf recover` to rebuild engine state from markdown. + */ +export function clearEngineHierarchy() { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + currentDb.exec("DELETE FROM tasks"); + currentDb.exec("DELETE FROM slices"); + currentDb.exec("DELETE FROM milestones"); + }); +} +/** + * INSERT OR IGNORE a slice during event replay (workflow-reconcile.ts). + * Strict insert-or-ignore semantics are required here to avoid the + * `insertSlice` ON CONFLICT path that could downgrade an already-completed + * slice back to 'pending'. + */ +export function insertOrIgnoreSlice(args) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO slices (milestone_id, id, title, status, created_at) + VALUES (:mid, :sid, :title, 'pending', :ts)`) + .run({ + ":mid": args.milestoneId, + ":sid": args.sliceId, + ":title": args.title, + ":ts": args.createdAt, + }); +} +/** + * INSERT OR IGNORE a task during event replay (workflow-reconcile.ts). + * Same rationale as `insertOrIgnoreSlice`. + */ +export function insertOrIgnoreTask(args) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO tasks (milestone_id, slice_id, id, title, status, created_at) + VALUES (:mid, :sid, :tid, :title, 'pending', :ts)`) + .run({ + ":mid": args.milestoneId, + ":sid": args.sliceId, + ":tid": args.taskId, + ":title": args.title, + ":ts": args.createdAt, + }); +} +/** + * Stamp the `replan_triggered_at` column on a slice. Used by triage-resolution + * when a user capture requests a replan so the dispatcher can detect the + * trigger via DB in addition to the on-disk REPLAN-TRIGGER.md marker. + */ +export function setSliceReplanTriggeredAt(milestoneId, sliceId, ts) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + "UPDATE slices SET replan_triggered_at = :ts WHERE milestone_id = :mid AND id = :sid", + ) + .run({ ":ts": ts, ":mid": milestoneId, ":sid": sliceId }); +} +function boolToInt(value) { + if (value === null || value === undefined) return null; + return value ? 1 : 0; +} +export function insertLlmTaskOutcome(input) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + try { + currentDb + .prepare(`INSERT INTO llm_task_outcomes ( + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + ) VALUES ( + :model_id, + :provider, + :unit_type, + :unit_id, + :succeeded, + :retries, + :escalated, + :verification_passed, + :blocker_discovered, + :duration_ms, + :tokens_total, + :cost_usd, + :recorded_at + ) + ON CONFLICT(unit_type, unit_id, recorded_at) DO UPDATE SET + model_id = excluded.model_id, + provider = excluded.provider, + succeeded = excluded.succeeded, + retries = excluded.retries, + escalated = excluded.escalated, + verification_passed = excluded.verification_passed, + blocker_discovered = excluded.blocker_discovered, + duration_ms = excluded.duration_ms, + tokens_total = excluded.tokens_total, + cost_usd = excluded.cost_usd`) + .run({ + ":model_id": input.modelId, + ":provider": input.provider, + ":unit_type": input.unitType, + ":unit_id": input.unitId, + ":succeeded": boolToInt(input.succeeded), + ":retries": input.retries ?? 0, + ":escalated": boolToInt(input.escalated ?? false), + ":verification_passed": boolToInt(input.verification_passed ?? null), + ":blocker_discovered": boolToInt(input.blocker_discovered ?? false), + ":duration_ms": input.duration_ms ?? null, + ":tokens_total": input.tokens_total ?? null, + ":cost_usd": input.cost_usd ?? null, + ":recorded_at": input.recorded_at ?? Date.now(), + }); + return true; + } catch { + return false; + } +} + +/** + * Query LLM task outcomes for a specific unit. + * + * Purpose: enable outcome-learning and cost-guard gates to inspect + * historical performance of a unit type + id combination. + * + * Consumer: uok/outcome-learning-gate.js, uok/cost-guard-gate.js. + */ +export function getLlmTaskOutcomesByUnit(unitType, unitId, limit = 20) { + if (!currentDb) return []; + try { + return currentDb + .prepare( + `SELECT + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + FROM llm_task_outcomes + WHERE unit_type = :unit_type + AND unit_id = :unit_id + ORDER BY recorded_at DESC + LIMIT :limit`, + ) + .all({ + ":unit_type": unitType, + ":unit_id": unitId, + ":limit": limit, + }); + } catch { + return []; + } +} +/** + * Query LLM task outcomes for a specific model. + * + * Purpose: enable cost-guard to detect models with high failure rates + * or excessive cumulative spend. + * + * Consumer: uok/cost-guard-gate.js. + */ +export function getLlmTaskOutcomesByModel(modelId, limit = 50) { + if (!currentDb) return []; + try { + return currentDb + .prepare( + `SELECT + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + FROM llm_task_outcomes + WHERE model_id = :model_id + ORDER BY recorded_at DESC + LIMIT :limit`, + ) + .all({ + ":model_id": modelId, + ":limit": limit, + }); + } catch { + return []; + } +} +/** + * Query recent LLM task outcomes across all units. + * + * Purpose: provide a rolling window of outcomes for system-wide + * health and spend analysis. + * + * Consumer: uok/diagnostic-synthesis.js, uok/cost-guard-gate.js. + */ +export function getRecentLlmTaskOutcomes(hours = 24, limit = 100): DbRow[] { + if (!currentDb) return []; + const cutoff = Date.now() - hours * 60 * 60 * 1000; + try { + return currentDb + .prepare( + `SELECT + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + FROM llm_task_outcomes + WHERE recorded_at >= :cutoff + ORDER BY recorded_at DESC + LIMIT :limit`, + ) + .all({ + ":cutoff": cutoff, + ":limit": limit, + }); + } catch { + return []; + } +} +/** + * Aggregate LLM task outcome statistics for a model over a time window. + * + * Returns { total, succeeded, failed, totalCostUsd, totalTokens, avgDurationMs }. + * + * Consumer: uok/cost-guard-gate.js, uok/outcome-learning-gate.js. + */ +export function getLlmTaskOutcomeStats(modelId: string, windowHours = 24): DbRow[] { + if (!currentDb) { + return { + total: 0, + succeeded: 0, + failed: 0, + totalCostUsd: 0, + totalTokens: 0, + avgDurationMs: 0, + }; + } + const cutoff = Date.now() - windowHours * 60 * 60 * 1000; + try { + const row = currentDb + .prepare( + `SELECT + COUNT(*) AS total, + COALESCE(SUM(CASE WHEN succeeded = 1 THEN 1 ELSE 0 END), 0) AS succeeded, + COALESCE(SUM(CASE WHEN succeeded = 0 THEN 1 ELSE 0 END), 0) AS failed, + COALESCE(SUM(cost_usd), 0) AS totalCostUsd, + COALESCE(SUM(tokens_total), 0) AS totalTokens, + COALESCE(AVG(duration_ms), 0) AS avgDurationMs + FROM llm_task_outcomes + WHERE model_id = :model_id + AND recorded_at >= :cutoff`, + ) + .get({ ":model_id": modelId, ":cutoff": cutoff }); + if (!row) { + return { + total: 0, + succeeded: 0, + failed: 0, + totalCostUsd: 0, + totalTokens: 0, + avgDurationMs: 0, + }; + } + return { + total: row.total ?? 0, + succeeded: row.succeeded ?? 0, + failed: row.failed ?? 0, + totalCostUsd: row.totalCostUsd ?? 0, + totalTokens: row.totalTokens ?? 0, + avgDurationMs: row.avgDurationMs ?? 0, + }; + } catch { + return { + total: 0, + succeeded: 0, + failed: 0, + totalCostUsd: 0, + totalTokens: 0, + avgDurationMs: 0, + }; + } +} +/** + * Aggregate gate run statistics for a specific gate over a time window. + * + * Returns { total, pass, fail, retry, manualAttention, lastEvaluatedAt }. + * + * Consumer: uok/diagnostic-synthesis.js, uok/gate-runner.js health checks. + */ +export function getGateRunStats(gateId: string, windowHours = 24): DbRow | undefined { + try { + const basePath = currentPath && currentPath !== ":memory:" + ? dirname(dirname(currentPath)) + : process.cwd(); + const events = readTraceEvents(basePath, "gate_run", windowHours) + .filter((e) => e.gateId === gateId); + const stats = { + total: events.length, + pass: 0, + fail: 0, + retry: 0, + manualAttention: 0, + lastEvaluatedAt: null, + }; + for (const e of events) { + if (e.outcome === "pass") stats.pass++; + else if (e.outcome === "fail") stats.fail++; + else if (e.outcome === "retry") stats.retry++; + else if (e.outcome === "manual-attention") stats.manualAttention++; + if ( + !stats.lastEvaluatedAt || + (e.evaluatedAt ?? e.ts) > stats.lastEvaluatedAt + ) + stats.lastEvaluatedAt = e.evaluatedAt ?? e.ts; + } + return stats; + } catch { + return { + total: 0, + pass: 0, + fail: 0, + retry: 0, + manualAttention: 0, + lastEvaluatedAt: null, + }; + } +} + +/** + * Read the circuit breaker state for a specific gate. + * + * Returns { gateId, state, failureStreak, lastFailureAt, openedAt, halfOpenAttempts, updatedAt }. + * If no record exists, returns a default closed state. + * + * Consumer: uok/gate-runner.js before executing a gate. + */ +export function getGateCircuitBreaker(gateId: string): DbRow | undefined { + if (!currentDb) { + return { + gateId, + state: "closed", + failureStreak: 0, + lastFailureAt: null, + openedAt: null, + halfOpenAttempts: 0, + updatedAt: null, + }; + } + try { + const row = currentDb + .prepare( + `SELECT gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at + FROM gate_circuit_breakers + WHERE gate_id = :gate_id`, + ) + .get({ ":gate_id": gateId }); + if (!row) { + return { + gateId, + state: "closed", + failureStreak: 0, + lastFailureAt: null, + openedAt: null, + halfOpenAttempts: 0, + updatedAt: null, + }; + } + return { + gateId: row.gate_id, + state: row.state, + failureStreak: row.failure_streak ?? 0, + lastFailureAt: row.last_failure_at ?? null, + openedAt: row.opened_at ?? null, + halfOpenAttempts: row.half_open_attempts ?? 0, + updatedAt: row.updated_at ?? null, + }; + } catch { + return { + gateId, + state: "closed", + failureStreak: 0, + lastFailureAt: null, + openedAt: null, + halfOpenAttempts: 0, + updatedAt: null, + }; + } +} +/** + * Update the circuit breaker state for a specific gate. + * + * Consumer: uok/gate-runner.js after executing a gate. + */ +export function updateGateCircuitBreaker(gateId: string, updates: Record): void { + if (!currentDb) return; + currentDb + .prepare( + `INSERT INTO gate_circuit_breakers ( + gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at + ) VALUES ( + :gate_id, :state, :failure_streak, :last_failure_at, :opened_at, :half_open_attempts, :updated_at + ) + ON CONFLICT(gate_id) DO UPDATE SET + state = excluded.state, + failure_streak = excluded.failure_streak, + last_failure_at = COALESCE(excluded.last_failure_at, gate_circuit_breakers.last_failure_at), + opened_at = COALESCE(excluded.opened_at, gate_circuit_breakers.opened_at), + half_open_attempts = excluded.half_open_attempts, + updated_at = excluded.updated_at`, + ) + .run({ + ":gate_id": gateId, + ":state": updates.state ?? "closed", + ":failure_streak": updates.failureStreak ?? 0, + ":last_failure_at": updates.lastFailureAt ?? null, + ":opened_at": updates.openedAt ?? null, + ":half_open_attempts": updates.halfOpenAttempts ?? 0, + ":updated_at": new Date().toISOString(), + }); + return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 }; +} +export function getGateLatencyStats(gateId: string, windowHours = 24): DbRow[] { + try { + const basePath = currentPath && currentPath !== ":memory:" + ? dirname(dirname(currentPath)) + : process.cwd(); + const durations = readTraceEvents(basePath, "gate_run", windowHours) + .filter((e) => e.gateId === gateId && typeof e.durationMs === "number") + .map((e) => e.durationMs) + .sort((a, b) => a - b); + if (durations.length === 0) return { p50: null, p95: null, count: 0, total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 }; + const p50Ms = durations[Math.floor(durations.length * 0.5)] ?? 0; + const p95Ms = durations[Math.floor(durations.length * 0.95)] ?? 0; + const maxMs = durations[durations.length - 1] ?? 0; + const avgMs = Math.round(durations.reduce((s, v) => s + v, 0) / durations.length); + return { + p50: p50Ms, + p95: p95Ms, + count: durations.length, + total: durations.length, + avgMs, + p50Ms, + p95Ms, + maxMs, + }; + } catch { + return { p50: null, p95: null, count: 0, total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 }; + } +} +export function getDistinctGateIds(): string[] { + try { + const basePath = currentPath && currentPath !== ":memory:" + ? dirname(dirname(currentPath)) + : process.cwd(); + const events = readTraceEvents(basePath, "gate_run", 24 * 30); // 30 days + return [...new Set(events.map((e) => e.gateId).filter(Boolean))]; + } catch { + return []; + } +} +export function insertUokMessage(msg: Record): void { + if (!currentDb) return; + currentDb + .prepare( + `INSERT OR IGNORE INTO uok_messages (id, from_agent, to_agent, body, metadata_json, sent_at, delivered_at) + VALUES (:id, :from_agent, :to_agent, :body, :metadata_json, :sent_at, :delivered_at)`, + ) + .run({ + ":id": msg.id, + ":from_agent": msg.from, + ":to_agent": msg.to, + ":body": msg.body ?? "", + ":metadata_json": JSON.stringify(msg.metadata ?? {}), + ":sent_at": msg.sentAt, + ":delivered_at": msg.deliveredAt ?? null, + }); +} +export function getUokMessagesForAgent( + agentId, + limit = 1000, + unreadOnly = false, +) { + if (!currentDb) return []; + try { + let sql = `SELECT m.id, m.from_agent AS "from", m.to_agent AS "to", m.body, m.metadata_json AS metadataJson, m.sent_at AS sentAt, m.delivered_at AS deliveredAt, + CASE WHEN r.agent_id IS NOT NULL THEN 1 ELSE 0 END AS read + FROM uok_messages m + LEFT JOIN uok_message_reads r ON r.message_id = m.id AND r.agent_id = :agent_id + WHERE m.to_agent = :agent_id`; + if (unreadOnly) { + sql += " AND r.agent_id IS NULL"; + } + sql += " ORDER BY m.sent_at ASC LIMIT :limit"; + const rows = currentDb.prepare(sql).all({ + ":agent_id": agentId, + ":limit": Math.max(1, Math.min(10_000, Number(limit) || 1000)), + }); + return rows.map((r) => ({ + id: r.id, + from: r.from, + to: r.to, + body: r.body, + metadata: parseJsonObject(r.metadataJson, {}), + sentAt: r.sentAt, + deliveredAt: r.deliveredAt, + read: !!r.read, + })); + } catch { + return []; + } +} +export function getUokConversation(agentA: string, agentB: string, limit = 1000): DbRow[] { + if (!currentDb) return []; + try { + const rows = currentDb + .prepare( + `SELECT id, from_agent AS "from", to_agent AS "to", body, metadata_json AS metadataJson, sent_at AS sentAt, delivered_at AS deliveredAt + FROM uok_messages + WHERE (from_agent = :a AND to_agent = :b) OR (from_agent = :b AND to_agent = :a) + ORDER BY sent_at DESC + LIMIT :limit`, + ) + .all({ ":a": agentA, ":b": agentB, ":limit": limit }); + return rows.map((r) => ({ + id: r.id, + from: r.from, + to: r.to, + body: r.body, + metadata: parseJsonObject(r.metadataJson, {}), + sentAt: r.sentAt, + deliveredAt: r.deliveredAt, + })); + } catch { + return []; + } +} +export function markUokMessageRead(messageId: string, agentId: string): void { + if (!currentDb) return; + try { + currentDb + .prepare( + `INSERT OR IGNORE INTO uok_message_reads (message_id, agent_id, read_at) VALUES (:message_id, :agent_id, :read_at)`, + ) + .run({ + ":message_id": messageId, + ":agent_id": agentId, + ":read_at": new Date().toISOString(), + }); + return true; + } catch { + return false; + } +} +export function getUokMessageUnreadCount(agentId: string): number { + if (!currentDb) return 0; + try { + const row = currentDb + .prepare( + `SELECT COUNT(*) AS cnt FROM uok_messages m + WHERE m.to_agent = :agent_id + AND NOT EXISTS ( + SELECT 1 FROM uok_message_reads r + WHERE r.message_id = m.id AND r.agent_id = :agent_id + )`, + ) + .get({ ":agent_id": agentId }); + return row?.cnt ?? 0; + } catch { + return 0; + } +} +export function compactUokMessages(retentionDays: number): void { + if (!currentDb) return { before: 0, after: 0 }; + try { + const cutoff = new Date( + Date.now() - retentionDays * 24 * 60 * 60 * 1000, + ).toISOString(); + const beforeRow = currentDb + .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") + .get(); + currentDb + .prepare("DELETE FROM uok_messages WHERE sent_at < :cutoff") + .run({ ":cutoff": cutoff }); + const afterRow = currentDb + .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") + .get(); + return { before: beforeRow?.cnt ?? 0, after: afterRow?.cnt ?? 0 }; + } catch { + return { before: 0, after: 0 }; + } +} +export function getUokMessageReadIds(agentId: string): string[] { + if (!currentDb) return []; + try { + const rows = currentDb + .prepare( + "SELECT message_id FROM uok_message_reads WHERE agent_id = :agent_id", + ) + .all({ ":agent_id": agentId }); + return rows.map((r) => r.message_id); + } catch { + return []; + } +} +export function getUokMessageBusMetrics(): DbRow | undefined { + if (!currentDb) { + return { + totalMessages: 0, + totalUnread: 0, + uniqueAgents: 0, + uniqueConversations: 0, + }; + } + try { + const totalRow = currentDb + .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") + .get(); + const unreadRow = currentDb + .prepare( + `SELECT COUNT(*) AS cnt FROM uok_messages m + WHERE NOT EXISTS ( + SELECT 1 FROM uok_message_reads r + WHERE r.message_id = m.id + AND r.agent_id = m.to_agent + )`, + ) + .get(); + const agentsRow = currentDb + .prepare(`SELECT COUNT(DISTINCT to_agent) AS cnt FROM uok_messages`) + .get(); + const convRow = currentDb + .prepare( + `SELECT COUNT(DISTINCT from_agent || ':' || to_agent) AS cnt FROM uok_messages`, + ) + .get(); + return { + totalMessages: totalRow?.cnt ?? 0, + totalUnread: unreadRow?.cnt ?? 0, + uniqueAgents: agentsRow?.cnt ?? 0, + uniqueConversations: convRow?.cnt ?? 0, + }; + } catch { + return { + totalMessages: 0, + totalUnread: 0, + uniqueAgents: 0, + uniqueConversations: 0, + }; + } +} +function normalizeScheduleScope(scope: unknown): string { + return scope === "global" ? "global" : "project"; +} +function scheduleEntryFromRow(row: Record): Record { + if (!row) return null; + const full = parseJsonObject(row.full_json, {}); + return { + ...full, + schemaVersion: row.schema_version ?? full.schemaVersion ?? 1, + id: row.id, + kind: row.kind, + status: row.status, + due_at: row.due_at, + created_at: row.created_at, + snoozed_at: row.snoozed_at ?? full.snoozed_at, + payload: parseJsonObject(row.payload_json, full.payload ?? {}), + created_by: row.created_by, + autonomous_dispatch: !!row.autonomous_dispatch, + }; +} +/** + * Append a schedule entry to the DB-backed schedule ledger. + * + * Purpose: keep time-bound reminders in structured SQLite state so status, + * due-date, and scope queries are schema-owned instead of JSONL-owned. + * + * Consumer: schedule-store.js for /schedule and launch/auto due-item checks. + */ +export function insertScheduleEntry(scope: string, entry: Record, importedFrom: string | null = null): void { + if (!currentDb) return; + const normalizedScope = normalizeScheduleScope(scope); + const schemaVersion = entry.schemaVersion ?? 1; + const full = { schemaVersion, ...entry }; + currentDb + .prepare( + `INSERT INTO schedule_entries ( + scope, id, schema_version, kind, status, due_at, created_at, + snoozed_at, payload_json, created_by, autonomous_dispatch, full_json, + imported_from + ) VALUES ( + :scope, :id, :schema_version, :kind, :status, :due_at, :created_at, + :snoozed_at, :payload_json, :created_by, :autonomous_dispatch, :full_json, + :imported_from + )`, + ) + .run({ + ":scope": normalizedScope, + ":id": entry.id, + ":schema_version": schemaVersion, + ":kind": entry.kind ?? "reminder", + ":status": entry.status ?? "pending", + ":due_at": entry.due_at ?? "", + ":created_at": entry.created_at ?? "", + ":snoozed_at": entry.snoozed_at ?? null, + ":payload_json": JSON.stringify(entry.payload ?? {}), + ":created_by": entry.created_by ?? "user", + ":autonomous_dispatch": entry.autonomous_dispatch ? 1 : 0, + ":full_json": JSON.stringify(full), + ":imported_from": importedFrom, + }); +} +/** + * Return latest schedule entries per id for a scope. + * + * Purpose: preserve append-ledger semantics while serving queries from SQLite. + * + * Consumer: schedule-store.js readEntries/findDue/findUpcoming. + */ +export function getScheduleEntries(scope: string): DbRow[] { + if (!currentDb) return []; + const normalizedScope = normalizeScheduleScope(scope); + try { + const rows = currentDb + .prepare( + `SELECT s.* + FROM schedule_entries s + JOIN ( + SELECT id, MAX(seq) AS max_seq + FROM schedule_entries + WHERE scope = :scope + GROUP BY id + ) latest ON latest.id = s.id AND latest.max_seq = s.seq + WHERE s.scope = :scope + ORDER BY s.due_at ASC, s.created_at ASC, s.seq ASC`, + ) + .all({ ":scope": normalizedScope }); + return rows.map(scheduleEntryFromRow).filter(Boolean); + } catch { + return []; + } +} +export function countScheduleEntries(scope: string): number { + if (!currentDb) return 0; + const normalizedScope = normalizeScheduleScope(scope); + try { + const row = currentDb + .prepare( + "SELECT COUNT(*) AS cnt FROM schedule_entries WHERE scope = :scope", + ) + .get({ ":scope": normalizedScope }); + return row?.cnt ?? 0; + } catch { + return 0; + } +} +function asStringOrNull(value: unknown): string | null { + return typeof value === "string" && value.length > 0 ? value : null; +} +/** + * Persist a repository profile snapshot and update current file observations. + * + * Purpose: make harness evolution's read-only repo facts queryable across + * sessions while preserving first-seen timestamps for untracked observations. + * + * Consumer: `/harness profile` and future pre-plan profile snapshots. + */ +export function recordRepoProfile(profile: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + currentDb + .prepare(`INSERT OR REPLACE INTO repo_profiles ( + profile_id, project_hash, project_root, head, branch, remote_hash, + dirty, profile_json, created_at + ) VALUES ( + :profile_id, :project_hash, :project_root, :head, :branch, :remote_hash, + :dirty, :profile_json, :created_at + )`) + .run({ + ":profile_id": profile.profileId, + ":project_hash": profile.projectHash, + ":project_root": profile.projectRoot, + ":head": profile.git.head, + ":branch": profile.git.branch, + ":remote_hash": profile.git.remoteHash, + ":dirty": profile.git.dirty ? 1 : 0, + ":profile_json": JSON.stringify(profile), + ":created_at": profile.createdAt, + }); + const stmt = currentDb.prepare(`INSERT INTO repo_file_observations ( + path, latest_profile_id, git_status, ownership, language, size_bytes, + content_hash, summary, first_seen_at, last_seen_at, adopted_at, + adoption_unit_id + ) VALUES ( + :path, :latest_profile_id, :git_status, :ownership, :language, :size_bytes, + :content_hash, :summary, :first_seen_at, :last_seen_at, :adopted_at, + :adoption_unit_id + ) + ON CONFLICT(path) DO UPDATE SET + latest_profile_id = excluded.latest_profile_id, + git_status = excluded.git_status, + ownership = CASE + WHEN repo_file_observations.ownership = 'sf_generated' + THEN repo_file_observations.ownership + WHEN repo_file_observations.ownership = 'candidate_harness' + THEN repo_file_observations.ownership + ELSE excluded.ownership + END, + language = excluded.language, + size_bytes = excluded.size_bytes, + content_hash = excluded.content_hash, + summary = excluded.summary, + first_seen_at = repo_file_observations.first_seen_at, + last_seen_at = excluded.last_seen_at, + adopted_at = COALESCE(repo_file_observations.adopted_at, excluded.adopted_at), + adoption_unit_id = COALESCE(repo_file_observations.adoption_unit_id, excluded.adoption_unit_id)`); + for (const file of profile.git.changedFiles) { + stmt.run({ + ":path": file.path, + ":latest_profile_id": profile.profileId, + ":git_status": file.gitStatus, + ":ownership": file.ownership, + ":language": file.language, + ":size_bytes": file.sizeBytes, + ":content_hash": file.contentHash, + ":summary": file.summary, + ":first_seen_at": file.firstSeenAt, + ":last_seen_at": file.lastSeenAt, + ":adopted_at": file.adoptedAt, + ":adoption_unit_id": file.adoptionUnitId, + }); + } + }); +} +/** + * Return the most recently recorded repository profile. + * + * Purpose: let harness planning and diagnostics inspect the latest factual + * repo snapshot without re-running the profiler. + * + * Consumer: harness status commands and future plan-phase coverage checks. + */ +export function getLatestRepoProfile(): DbRow | undefined { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const row = currentDb + .prepare(`SELECT profile_id, project_hash, project_root, head, branch, remote_hash, + dirty, profile_json, created_at + FROM repo_profiles + ORDER BY created_at DESC, profile_id DESC + LIMIT 1`) + .get(); + if (!row) return null; + return { + profileId: row["profile_id"], + projectHash: row["project_hash"], + projectRoot: row["project_root"], + head: asStringOrNull(row["head"]), + branch: asStringOrNull(row["branch"]), + remoteHash: asStringOrNull(row["remote_hash"]), + dirty: row["dirty"] === 1, + profileJson: row["profile_json"] ?? "{}", + createdAt: row["created_at"], + }; +} +/** + * Return the current file observations accumulated by repo profiling. + * + * Purpose: keep untracked and modified file awareness queryable without + * treating those paths as SF-owned artifacts. + * + * Consumer: harness planning, diagnostics, and future drift detection. + */ +export function getRepoFileObservations(): DbRow[] { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT path, latest_profile_id, git_status, ownership, language, + size_bytes, content_hash, summary, first_seen_at, last_seen_at, + adopted_at, adoption_unit_id + FROM repo_file_observations + ORDER BY path ASC`) + .all() + .map((row) => ({ + path: row["path"], + latestProfileId: row["latest_profile_id"], + gitStatus: row["git_status"], + ownership: row["ownership"], + language: asStringOrNull(row["language"]), + sizeBytes: row["size_bytes"] ?? 0, + contentHash: asStringOrNull(row["content_hash"]), + summary: asStringOrNull(row["summary"]), + firstSeenAt: row["first_seen_at"], + lastSeenAt: row["last_seen_at"], + adoptedAt: asStringOrNull(row["adopted_at"]), + adoptionUnitId: asStringOrNull(row["adoption_unit_id"]), + })); +} +function intBool(value: unknown): boolean { + return value ? 1 : 0; +} +function parseJsonObject(raw: unknown, fallback: Record = {}): Record { + try { + return JSON.parse(raw); + } catch { + return fallback; + } +} +function solverEvalRunFromRow(row: Record): Record { + return { + runId: row["run_id"], + suiteSource: row["suite_source"], + casesCount: row["cases_count"] ?? 0, + summary: parseJsonObject(row["summary_json"], {}), + reportPath: row["report_path"], + resultsPath: row["results_path"], + dbRecorded: row["db_recorded"] === 1, + createdAt: row["created_at"], + updatedAt: row["updated_at"], + }; +} +function solverEvalCaseFromRow(row: Record): Record { + return { + runId: row["run_id"], + caseId: row["case_id"], + title: row["title"], + mode: row["mode"], + passed: row["passed"] === 1, + falseComplete: row["false_complete"] === 1, + durationMs: row["duration_ms"], + commandStatus: row["command_status"], + solverOutcome: asStringOrNull(row["solver_outcome"]), + pddComplete: + row["pdd_complete"] === null || row["pdd_complete"] === undefined + ? null + : row["pdd_complete"] === 1, + result: parseJsonObject(row["result_json"], {}), + createdAt: row["created_at"], + }; +} +function headlessRunFromRow(row: Record): Record { + return { + runId: row["run_id"], + command: row["command"], + status: row["status"], + exitCode: row["exit_code"], + timedOut: row["timed_out"] === 1, + interrupted: row["interrupted"] === 1, + restartCount: row["restart_count"] ?? 0, + maxRestarts: row["max_restarts"] ?? 0, + durationMs: row["duration_ms"] ?? 0, + totalEvents: row["total_events"] ?? 0, + toolCalls: row["tool_calls"] ?? 0, + solverEvalRunId: asStringOrNull(row["solver_eval_run_id"]), + solverEvalReportPath: asStringOrNull(row["solver_eval_report_path"]), + details: parseJsonObject(row["details_json"], {}), + createdAt: row["created_at"], + updatedAt: row["updated_at"], + }; +} +/** + * Persist an autonomous solver eval run and its per-mode case results. + * + * Purpose: make solver-loop benchmark evidence queryable by SF commands, + * harness flows, UOK, and future memory retention instead of treating ignored + * `.sf/evals` JSON/JSONL evidence files as project state. + * + * Consumer: `/solver-eval` after each run completes. + */ +export function recordSolverEvalRun(report: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + transaction(() => { + currentDb + .prepare(`INSERT INTO solver_eval_runs ( + run_id, suite_source, cases_count, summary_json, report_path, + results_path, db_recorded, created_at, updated_at + ) VALUES ( + :run_id, :suite_source, :cases_count, :summary_json, :report_path, + :results_path, 1, :created_at, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + suite_source = excluded.suite_source, + cases_count = excluded.cases_count, + summary_json = excluded.summary_json, + report_path = excluded.report_path, + results_path = excluded.results_path, + db_recorded = 1, + updated_at = excluded.updated_at`) + .run({ + ":run_id": report.runId, + ":suite_source": report.suiteSource ?? "", + ":cases_count": report.summary?.cases ?? report.results?.length ?? 0, + ":summary_json": JSON.stringify(report.summary ?? {}), + ":report_path": report.reportPath ?? "", + ":results_path": report.resultsPath ?? "", + ":created_at": report.createdAt ?? now, + ":updated_at": now, + }); + const stmt = currentDb.prepare(`INSERT INTO solver_eval_case_results ( + run_id, case_id, title, mode, passed, false_complete, duration_ms, + command_status, solver_outcome, pdd_complete, result_json, created_at + ) VALUES ( + :run_id, :case_id, :title, :mode, :passed, :false_complete, :duration_ms, + :command_status, :solver_outcome, :pdd_complete, :result_json, :created_at + ) + ON CONFLICT(run_id, case_id, mode) DO UPDATE SET + title = excluded.title, + passed = excluded.passed, + false_complete = excluded.false_complete, + duration_ms = excluded.duration_ms, + command_status = excluded.command_status, + solver_outcome = excluded.solver_outcome, + pdd_complete = excluded.pdd_complete, + result_json = excluded.result_json, + created_at = excluded.created_at`); + for (const result of report.results ?? []) { + stmt.run({ + ":run_id": report.runId, + ":case_id": result.caseId, + ":title": result.title ?? "", + ":mode": result.mode, + ":passed": intBool(result.passed), + ":false_complete": intBool(result.falseComplete), + ":duration_ms": result.command?.durationMs ?? null, + ":command_status": result.command?.status ?? null, + ":solver_outcome": result.solverSignals?.outcome ?? null, + ":pdd_complete": + result.solverSignals?.pddComplete === undefined + ? null + : intBool(result.solverSignals.pddComplete), + ":result_json": JSON.stringify(result), + ":created_at": report.createdAt ?? now, + }); + } + }); +} +/** + * List recent autonomous solver eval runs. + * + * Purpose: let operators inspect benchmark history without scraping generated + * report files. + * + * Consumer: `/solver-eval history`. + */ +export function listSolverEvalRuns(limit = 10): DbRow[] { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT run_id, suite_source, cases_count, summary_json, + report_path, results_path, db_recorded, created_at, updated_at + FROM solver_eval_runs + ORDER BY created_at DESC, run_id DESC + LIMIT :limit`) + .all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 10)) }) + .map(solverEvalRunFromRow); +} +/** + * Read one autonomous solver eval run by id. + * + * Purpose: support `/solver-eval show ` and future evidence + * promotion without parsing JSON artifacts. + * + * Consumer: solver eval command handlers. + */ +export function getSolverEvalRun(runId: string): DbRow | undefined { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const row = currentDb + .prepare(`SELECT run_id, suite_source, cases_count, summary_json, + report_path, results_path, db_recorded, created_at, updated_at + FROM solver_eval_runs + WHERE run_id = :run_id`) + .get({ ":run_id": runId }); + return row ? solverEvalRunFromRow(row) : null; +} +/** + * Read per-case results for one autonomous solver eval run. + * + * Purpose: show raw-vs-SF comparisons from DB evidence. + * + * Consumer: `/solver-eval show `. + */ +export function getSolverEvalCaseResults(runId: string): DbRow[] { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT run_id, case_id, title, mode, passed, false_complete, + duration_ms, command_status, solver_outcome, pdd_complete, + result_json, created_at + FROM solver_eval_case_results + WHERE run_id = :run_id + ORDER BY case_id ASC, mode ASC`) + .all({ ":run_id": runId }) + .map(solverEvalCaseFromRow); +} +/** + * Persist one headless session outcome. + * + * Purpose: make headless lifecycle evidence queryable from `sf.db` so timeout, + * restart, and operator-bounded run behavior does not live only in stderr or + * generated JSON artifacts. + * + * Consumer: headless.ts after every session exits. + */ +export function recordHeadlessRun(entry: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + currentDb + .prepare(`INSERT INTO headless_runs ( + run_id, command, status, exit_code, timed_out, interrupted, + restart_count, max_restarts, duration_ms, total_events, tool_calls, + solver_eval_run_id, solver_eval_report_path, details_json, + created_at, updated_at + ) VALUES ( + :run_id, :command, :status, :exit_code, :timed_out, :interrupted, + :restart_count, :max_restarts, :duration_ms, :total_events, :tool_calls, + :solver_eval_run_id, :solver_eval_report_path, :details_json, + :created_at, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + command = excluded.command, + status = excluded.status, + exit_code = excluded.exit_code, + timed_out = excluded.timed_out, + interrupted = excluded.interrupted, + restart_count = excluded.restart_count, + max_restarts = excluded.max_restarts, + duration_ms = excluded.duration_ms, + total_events = excluded.total_events, + tool_calls = excluded.tool_calls, + solver_eval_run_id = excluded.solver_eval_run_id, + solver_eval_report_path = excluded.solver_eval_report_path, + details_json = excluded.details_json, + updated_at = excluded.updated_at`) + .run({ + ":run_id": entry.runId, + ":command": entry.command ?? "", + ":status": entry.status ?? "", + ":exit_code": Number(entry.exitCode ?? 0), + ":timed_out": intBool(entry.timedOut), + ":interrupted": intBool(entry.interrupted), + ":restart_count": Number(entry.restartCount ?? 0), + ":max_restarts": Number(entry.maxRestarts ?? 0), + ":duration_ms": Number(entry.durationMs ?? 0), + ":total_events": Number(entry.totalEvents ?? 0), + ":tool_calls": Number(entry.toolCalls ?? 0), + ":solver_eval_run_id": entry.solverEvalRunId ?? null, + ":solver_eval_report_path": entry.solverEvalReportPath ?? null, + ":details_json": JSON.stringify(entry.details ?? {}), + ":created_at": entry.createdAt ?? now, + ":updated_at": now, + }); +} +/** + * List recent headless session outcomes. + * + * Purpose: support status/doctor/query surfaces that need durable headless + * lifecycle evidence without parsing stderr logs. + * + * Consumer: tests now; headless query and doctor follow-on surfaces later. + */ +export function listHeadlessRuns(limit = 20): DbRow[] { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT run_id, command, status, exit_code, timed_out, + interrupted, restart_count, max_restarts, duration_ms, + total_events, tool_calls, solver_eval_run_id, + solver_eval_report_path, details_json, created_at, updated_at + FROM headless_runs + ORDER BY created_at DESC, run_id DESC + LIMIT :limit`) + .all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 20)) }) + .map(headlessRunFromRow); +} +/** + * Upsert a session row. Creates on first call; updates updated_at, branch, + * repo, and summary on subsequent calls. Safe to call on every session_start + * and again when context becomes available (e.g. after git detection). + * + * Purpose: establish the session entity that all turns, file-touches, and + * refs hang off — the missing structural layer for cross-session learning. + * + * Consumer: session-recorder.js on session_start and session_shutdown hooks. + */ +export function upsertSession(entry: Record): void { + if (!currentDb) return; + const now = new Date().toISOString(); + currentDb + .prepare(`INSERT INTO sessions + (session_id, trace_id, mode, cwd, repo, branch, summary, summary_count, created_at, updated_at) + VALUES (:session_id, :trace_id, :mode, :cwd, :repo, :branch, :summary, 0, :now, :now) + ON CONFLICT(session_id) DO UPDATE SET + trace_id = COALESCE(excluded.trace_id, sessions.trace_id), + repo = COALESCE(excluded.repo, sessions.repo), + branch = COALESCE(excluded.branch, sessions.branch), + summary = COALESCE(excluded.summary, sessions.summary), + summary_count = CASE WHEN excluded.summary IS NOT NULL + THEN sessions.summary_count + 1 + ELSE sessions.summary_count END, + updated_at = excluded.updated_at`) + .run({ + ":session_id": entry.sessionId, + ":trace_id": entry.traceId ?? null, + ":mode": entry.mode ?? "interactive", + ":cwd": entry.cwd ?? "", + ":repo": entry.repo ?? null, + ":branch": entry.branch ?? null, + ":summary": entry.summary ?? null, + ":now": now, + }); +} + +/** + * Mark a session as archived. Archived sessions are hidden from default + * session listings but retained for search and audit. + * + * Purpose: soft-delete sessions without losing their turn history or refs. + * Consumer: /sf sessions --archive , autonomous cleanup. + */ +export function archiveSession(sessionId: string): void { + if (!currentDb) return; + currentDb + .prepare( + `UPDATE sessions SET archived_at = :now, updated_at = :now WHERE session_id = :session_id`, + ) + .run({ ":session_id": sessionId, ":now": new Date().toISOString() }); +} + +/** + * Restore an archived session to active status. + * + * Purpose: undo an accidental archive without data loss. + * Consumer: /sf sessions --unarchive . + */ +export function unarchiveSession(sessionId: string): void { + if (!currentDb) return; + currentDb + .prepare( + `UPDATE sessions SET archived_at = NULL, updated_at = :now WHERE session_id = :session_id`, + ) + .run({ ":session_id": sessionId, ":now": new Date().toISOString() }); +} + +/** + * Insert a turn row for a session. Returns the new turn's integer id so the + * caller can link subsequent file-touches and refs to it. + * + * Purpose: record every user↔assistant exchange so turn text is searchable + * via turns_fts and promotable into the memory pipeline. + * + * Consumer: session-recorder.js on before_agent_start (user_message) and + * agent_end (assistant_response patch). + */ +export function insertSessionTurn(entry: Record): void { + if (!currentDb) return null; + const result = currentDb + .prepare(`INSERT INTO turns + (session_id, turn_index, user_message, assistant_response, ts) + VALUES (:session_id, :turn_index, :user_message, :assistant_response, :ts) + ON CONFLICT(session_id, turn_index) DO UPDATE SET + user_message = COALESCE(excluded.user_message, turns.user_message), + assistant_response = COALESCE(excluded.assistant_response, turns.assistant_response)`) + .run({ + ":session_id": entry.sessionId, + ":turn_index": entry.turnIndex, + ":user_message": entry.userMessage ?? null, + ":assistant_response": entry.assistantResponse ?? null, + ":ts": entry.ts ?? new Date().toISOString(), + }); + return result.lastInsertRowid ?? null; +} +/** + * Patch the assistant_response on an existing turn row. Called from agent_end + * after the model finishes so the full response is stored alongside the prompt. + * + * Purpose: complete the turn record so both halves of the exchange are + * searchable and promotable as a unit. + * + * Consumer: session-recorder.js on agent_end. + */ +export function patchTurnResponse(sessionId: string, turnIndex: number, assistantResponse: string): void { + if (!currentDb) return; + currentDb + .prepare(`UPDATE turns SET assistant_response = :resp + WHERE session_id = :sid AND turn_index = :idx AND assistant_response IS NULL`) + .run({ + ":resp": assistantResponse, + ":sid": sessionId, + ":idx": turnIndex, + }); +} +/** + * Record that a file path was touched in a session. UNIQUE(session_id, path) + * means repeated touches in one session are collapsed to a single row — + * only first_seen_at and tool_name (of the first touch) are retained. + * + * Purpose: enable "which files did I touch last session?" and cross-session + * file-history queries without storing a full audit log per touch. + * + * Consumer: session-recorder.js on tool_call for write-class tools. + */ +export function recordSessionFileTouch(entry: Record): void { + if (!currentDb) return; + currentDb + .prepare(`INSERT OR IGNORE INTO session_file_touches + (session_id, path, tool_name, turn_id, first_seen_at) + VALUES (:session_id, :path, :tool_name, :turn_id, :first_seen_at)`) + .run({ + ":session_id": entry.sessionId, + ":path": entry.path, + ":tool_name": entry.toolName ?? null, + ":turn_id": entry.turnId ?? null, + ":first_seen_at": entry.firstSeenAt ?? new Date().toISOString(), + }); +} +/** + * Record a PR / issue / commit / branch ref mentioned in a session. Idempotent + * via UNIQUE(session_id, ref_type, ref_value). + * + * Purpose: make sessions queryable by the work items they touched so + * "what session created PR #42?" is a single indexed lookup. + * + * Consumer: session-recorder.js when refs are detected in turn text. + */ +export function recordSessionRef(entry: Record): void { + if (!currentDb) return; + currentDb + .prepare(`INSERT OR IGNORE INTO session_refs + (session_id, ref_type, ref_value, turn_id, created_at) + VALUES (:session_id, :ref_type, :ref_value, :turn_id, :created_at)`) + .run({ + ":session_id": entry.sessionId, + ":ref_type": entry.refType, + ":ref_value": entry.refValue, + ":turn_id": entry.turnId ?? null, + ":created_at": entry.createdAt ?? new Date().toISOString(), + }); +} +/** + * Full-text search across turns via the FTS5 turns_fts virtual table. + * Returns matching turns with their session metadata ordered by relevance. + * + * Purpose: power cross-session keyword recall — "what did I ask about auth?", + * "find sessions where I worked on retry handling". + * + * Consumer: sf memory search, context-injection, and /session search command. + */ +export function searchSessionTurns(query: string, limit = 20): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare(`SELECT t.id, t.session_id, t.turn_index, t.ts, + t.user_message, t.assistant_response, + s.mode, s.cwd, s.repo, s.branch + FROM turns_fts + JOIN turns t ON turns_fts.rowid = t.id + JOIN sessions s ON t.session_id = s.session_id + WHERE turns_fts MATCH :query + ORDER BY rank + LIMIT :limit`) + .all({ ":query": query, ":limit": Math.max(1, Math.min(100, limit)) }); +} +/** + * List recent sessions with their turn count and last-touched file count. + * Useful for /session list and for memory-pipeline ingestion sweeps. + * + * Consumer: trajectory-command, memory-ingest, doctor checks. + */ +export function listRecentSessions(limit = 20): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare(`SELECT s.session_id, s.mode, s.cwd, s.repo, s.branch, + s.summary, s.created_at, s.updated_at, + COUNT(DISTINCT t.id) AS turn_count, + COUNT(DISTINCT f.id) AS file_count + FROM sessions s + LEFT JOIN turns t ON t.session_id = s.session_id + LEFT JOIN session_file_touches f ON f.session_id = s.session_id + GROUP BY s.session_id + ORDER BY s.updated_at DESC + LIMIT :limit`) + .all({ ":limit": Math.max(1, Math.min(100, limit)) }); +} +/** + * Record a snapshot checkpoint before an irreversible operation. Idempotent + * within a session: the snapshot_index is auto-incremented from the current + * max so callers can create multiple checkpoints per session without + * coordination. + * + * Purpose: give session_snapshots a first-class row so recovery paths and + * irreversible-ops gates can reference the stash ref and label without + * parsing free-text. + * + * Consumer: irreversible-ops safety gate (session_before_compact, future + * verify steps that call git stash before destructive actions). + * + * @param {{ sessionId: string, gitStashRef?: string|null, label?: string|null, ts?: string }} args + * @returns {number} The row id of the inserted snapshot (or 0 on failure). + */ +export function insertSessionSnapshot(args: Record): number { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const nextIndex = (() => { + const row = currentDb + .prepare( + "SELECT COALESCE(MAX(snapshot_index), -1) + 1 AS nxt FROM session_snapshots WHERE session_id = :sid", + ) + .get({ ":sid": args.sessionId }); + return row ? Number(row["nxt"]) : 0; + })(); + currentDb + .prepare(`INSERT INTO session_snapshots + (session_id, snapshot_index, git_stash_ref, label, ts) + VALUES (:sid, :idx, :ref, :label, :ts)`) + .run({ + ":sid": args.sessionId, + ":idx": nextIndex, + ":ref": args.gitStashRef ?? null, + ":label": args.label ?? null, + ":ts": args.ts ?? new Date().toISOString(), + }); + const row = currentDb + .prepare( + "SELECT id FROM session_snapshots WHERE session_id = :sid AND snapshot_index = :idx", + ) + .get({ ":sid": args.sessionId, ":idx": nextIndex }); + return row ? Number(row["id"]) : 0; +} +/** + * List all snapshots for a session, ordered by snapshot_index ascending. + * + * Purpose: let recovery tooling enumerate available restore points for a + * session and present them to the operator before a rollback. + * + * Consumer: future /session snapshots command and irreversible-ops skill. + * + * @param {string} sessionId + * @returns {Array<{id:number, session_id:string, snapshot_index:number, git_stash_ref:string|null, label:string|null, ts:string}>} + */ +export function listSessionSnapshots(sessionId: string): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT * FROM session_snapshots WHERE session_id = :sid ORDER BY snapshot_index ASC", + ) + .all({ ":sid": sessionId }); +} + +/** + * INSERT OR REPLACE a quality_gates row. Used by milestone-validation-gates.ts + * to persist milestone-level (MV*) gate outcomes after validate-milestone runs. + */ +export function upsertQualityGate(g: QualityGateInput): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO quality_gates + (milestone_id, slice_id, gate_id, scope, task_id, status, verdict, rationale, findings, evaluated_at) + VALUES (:mid, :sid, :gid, :scope, :tid, :status, :verdict, :rationale, :findings, :evaluated_at)`) + .run({ + ":mid": g.milestoneId, + ":sid": g.sliceId, + ":gid": g.gateId, + ":scope": g.scope, + ":tid": g.taskId, + ":status": g.status, + ":verdict": g.verdict, + ":rationale": g.rationale, + ":findings": g.findings, + ":evaluated_at": g.evaluatedAt, + }); +} +/** + * Atomically replace all workflow state from a manifest. Lifted verbatim from + * workflow-manifest.ts so the single-writer invariant holds. Only touches + * engine tables + decisions. Does NOT modify artifacts or memories. + */ +export function restoreManifest(manifest: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const db = currentDb; + transaction(() => { + // Clear engine tables (order matters for foreign-key-like consistency) + db.exec("DELETE FROM verification_evidence"); + db.exec("DELETE FROM tasks"); + db.exec("DELETE FROM slices"); + db.exec("DELETE FROM milestones"); + db.exec("DELETE FROM decisions WHERE 1=1"); + // Restore milestones + const msStmt = + db.prepare(`INSERT INTO milestones (id, title, status, depends_on, created_at, completed_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const m of manifest.milestones) { + msStmt.run( + m.id, + m.title, + m.status, + JSON.stringify(m.depends_on), + m.created_at, + m.completed_at, + m.vision, + JSON.stringify(m.success_criteria), + JSON.stringify(m.key_risks), + JSON.stringify(m.proof_strategy), + m.verification_contract, + m.verification_integration, + m.verification_operational, + m.verification_uat, + JSON.stringify(m.definition_of_done), + m.requirement_coverage, + m.boundary_map_markdown, + m.vision_meeting ? JSON.stringify(m.vision_meeting) : "", + m.product_research ? JSON.stringify(m.product_research) : "", + ); + } + // Restore slices + const slStmt = + db.prepare(`INSERT INTO slices (milestone_id, id, title, status, risk, depends, demo, + created_at, completed_at, full_summary_md, full_uat_md, + goal, success_criteria, proof_level, integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, + sequence, replan_triggered_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const s of manifest.slices) { + slStmt.run( + s.milestone_id, + s.id, + s.title, + s.status, + s.risk, + JSON.stringify(s.depends), + s.demo, + s.created_at, + s.completed_at, + s.full_summary_md, + s.full_uat_md, + s.goal, + s.success_criteria, + s.proof_level, + s.integration_closure, + s.observability_impact, + s.adversarial_partner ?? "", + s.adversarial_combatant ?? "", + s.adversarial_architect ?? "", + s.planning_meeting ? JSON.stringify(s.planning_meeting) : "", + s.sequence, + s.replan_triggered_at, + ); + } + // Restore tasks + const tkStmt = + db.prepare(`INSERT INTO tasks (milestone_id, slice_id, id, title, status, + one_liner, narrative, verification_result, duration, completed_at, + blocker_discovered, deviations, known_issues, key_files, key_decisions, + full_summary_md, description, estimate, files, verify, + inputs, expected_output, observability_impact, full_plan_md, sequence) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const t of manifest.tasks) { + tkStmt.run( + t.milestone_id, + t.slice_id, + t.id, + t.title, + t.status, + t.one_liner, + t.narrative, + t.verification_result, + t.duration, + t.completed_at, + t.blocker_discovered ? 1 : 0, + t.deviations, + t.known_issues, + JSON.stringify(t.key_files), + JSON.stringify(t.key_decisions), + t.full_summary_md, + t.description, + t.estimate, + JSON.stringify(t.files), + t.verify, + JSON.stringify(t.inputs), + JSON.stringify(t.expected_output), + t.observability_impact, + t.full_plan_md, + t.sequence, + ); + } + // Restore decisions + const dcStmt = + db.prepare(`INSERT INTO decisions (seq, id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const d of manifest.decisions) { + dcStmt.run( + d.seq, + d.id, + d.when_context, + d.scope, + d.decision, + d.choice, + d.rationale, + d.revisable, + d.made_by, + d.superseded_by, + ); + } + // Restore verification evidence + const evStmt = + db.prepare(`INSERT INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`); + for (const e of manifest.verification_evidence) { + evStmt.run( + e.task_id, + e.slice_id, + e.milestone_id, + e.command, + e.exit_code, + e.verdict, + e.duration_ms, + e.created_at, + ); + } + }); +} +/** + * Bulk delete + insert a legacy milestone hierarchy for markdown → DB migration. + * Used by workflow-migration.ts to populate engine tables from parsed ROADMAP/PLAN + * files. All operations run inside a single transaction. + */ +export function bulkInsertLegacyHierarchy(payload: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const db = currentDb; + const { milestones, slices, tasks, clearMilestoneIds, createdAt } = payload; + if (clearMilestoneIds.length === 0) return; + const placeholders = clearMilestoneIds.map(() => "?").join(","); + transaction(() => { + db.prepare(`DELETE FROM tasks WHERE milestone_id IN (${placeholders})`).run( + ...clearMilestoneIds, + ); + db.prepare( + `DELETE FROM slices WHERE milestone_id IN (${placeholders})`, + ).run(...clearMilestoneIds); + db.prepare(`DELETE FROM milestones WHERE id IN (${placeholders})`).run( + ...clearMilestoneIds, + ); + const insertMilestone = db.prepare( + "INSERT INTO milestones (id, title, status, created_at) VALUES (?, ?, ?, ?)", + ); + for (const m of milestones) { + insertMilestone.run(m.id, m.title, m.status, createdAt); + } + const insertSliceStmt = db.prepare( + "INSERT INTO slices (id, milestone_id, title, status, risk, depends, sequence, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + ); + for (const s of slices) { + insertSliceStmt.run( + s.id, + s.milestoneId, + s.title, + s.status, + s.risk, + "[]", + s.sequence, + createdAt, + ); + } + const insertTaskStmt = db.prepare( + "INSERT INTO tasks (id, slice_id, milestone_id, title, description, status, estimate, files, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", + ); + for (const t of tasks) { + insertTaskStmt.run( + t.id, + t.sliceId, + t.milestoneId, + t.title, + "", + t.status, + "", + "[]", + t.sequence, + ); + } + }); +} +// ─── Memory store writers ──────────────────────────────────────────────── +// All memory writes go through sf-db.ts so the single-writer invariant +// holds. These are direct pass-throughs to the SQL previously in +// memory-store.ts — same bindings, same behavior. +export function getActiveMemories({ category, limit = 200 }: { category?: string; limit?: number } = {}): DbRow[] { + if (!currentDb) return []; + const rows = category + ? currentDb.prepare("SELECT * FROM active_memories WHERE category = ? ORDER BY updated_at DESC LIMIT ?").all(category, limit) + : currentDb.prepare("SELECT * FROM active_memories ORDER BY updated_at DESC LIMIT ?").all(limit); + return rows.map((r) => ({ + id: r["id"], + category: r["category"], + content: r["content"], + confidence: r["confidence"], + sourceUnitId: r["source_unit_id"], + tags: (() => { try { return JSON.parse(r["tags"] ?? "[]"); } catch { return []; } })(), + createdAt: r["created_at"], + updatedAt: r["updated_at"], + })); +} +export function insertMemoryRow(args: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO memories (id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at, tags) + VALUES (:id, :category, :content, :confidence, :source_unit_type, :source_unit_id, :created_at, :updated_at, :tags)`) + .run({ + ":id": args.id, + ":category": args.category, + ":content": args.content, + ":confidence": args.confidence, + ":source_unit_type": args.sourceUnitType, + ":source_unit_id": args.sourceUnitId, + ":created_at": args.createdAt, + ":updated_at": args.updatedAt, + ":tags": JSON.stringify(args.tags ?? []), + }); +} +export function rewriteMemoryId(placeholderId: string, realId: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare("UPDATE memories SET id = :real_id WHERE id = :placeholder") + .run({ + ":real_id": realId, + ":placeholder": placeholderId, + }); +} +export function updateMemoryContentRow(id: string, content: string, confidence: number, updatedAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + if (confidence != null) { + currentDb + .prepare( + "UPDATE memories SET content = :content, confidence = :confidence, updated_at = :updated_at WHERE id = :id", + ) + .run({ + ":content": content, + ":confidence": confidence, + ":updated_at": updatedAt, + ":id": id, + }); + } else { + currentDb + .prepare( + "UPDATE memories SET content = :content, updated_at = :updated_at WHERE id = :id", + ) + .run({ ":content": content, ":updated_at": updatedAt, ":id": id }); + } +} +export function incrementMemoryHitCount(id: string, updatedAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + "UPDATE memories SET hit_count = hit_count + 1, updated_at = :updated_at WHERE id = :id", + ) + .run({ ":updated_at": updatedAt, ":id": id }); +} +export function supersedeMemoryRow(oldId: string, newId: string, updatedAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + "UPDATE memories SET superseded_by = :new_id, updated_at = :updated_at WHERE id = :old_id", + ) + .run({ ":new_id": newId, ":updated_at": updatedAt, ":old_id": oldId }); +} +export function markMemoryUnitProcessed(unitKey: string, activityFile: string | null, processedAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO memory_processed_units (unit_key, activity_file, processed_at) + VALUES (:key, :file, :at)`) + .run({ ":key": unitKey, ":file": activityFile, ":at": processedAt }); +} +export function decayMemoriesBefore(cutoffTs: string, now: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE memories + SET confidence = MAX(0.1, confidence - 0.1), updated_at = :now + WHERE superseded_by IS NULL AND updated_at < :cutoff AND confidence > 0.1`) + .run({ ":now": now, ":cutoff": cutoffTs }); +} +/** + * Supersede memories that have exceeded their TTL. + * + * Purpose: prevent stale memories from silently poisoning future sessions. + * Mirrors Copilot Memory's 28-day TTL model — memories that were never + * accessed expire sooner; memories actively used get a longer lease. + * + * Rules: + * - Never accessed (hit_count = 0) + older than unstartedTtlDays → expire + * - Any memory older than maxTtlDays → expire regardless of hit_count + * + * Consumer: called at autonomous mode startup from auto-start.js. + * Returns the number of memories superseded. + */ +export function expireStaleMemories(unstartedTtlDays = 28, maxTtlDays = 90): void { + if (!currentDb) return 0; + const now = new Date().toISOString(); + const cutoffUnstarted = new Date( + Date.now() - unstartedTtlDays * 86_400_000, + ).toISOString(); + const cutoffMax = new Date( + Date.now() - maxTtlDays * 86_400_000, + ).toISOString(); + const result = currentDb + .prepare(`UPDATE memories SET superseded_by = 'ttl-expired', updated_at = :now + WHERE superseded_by IS NULL + AND ( + (hit_count = 0 AND updated_at < :cutoff_unstarted) + OR updated_at < :cutoff_max + )`) + .run({ + ":now": now, + ":cutoff_unstarted": cutoffUnstarted, + ":cutoff_max": cutoffMax, + }); + return result.changes ?? 0; +} +export function supersedeLowestRankedMemories(limit: number, now: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE memories SET superseded_by = 'CAP_EXCEEDED', updated_at = :now + WHERE id IN ( + SELECT id FROM memories + WHERE superseded_by IS NULL + ORDER BY (confidence * (1.0 + hit_count * 0.1)) ASC + LIMIT :limit + )`) + .run({ ":now": now, ":limit": limit }); +} +// ─── Memory Sources ────────────────────────────────────────────────────────── +export function insertMemorySourceRow(args: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO memory_sources (id, kind, uri, title, content, content_hash, imported_at, scope, tags) + VALUES (:id, :kind, :uri, :title, :content, :content_hash, :imported_at, :scope, :tags)`) + .run({ + ":id": args.id, + ":kind": args.kind, + ":uri": args.uri, + ":title": args.title, + ":content": args.content, + ":content_hash": args.contentHash, + ":imported_at": args.importedAt, + ":scope": args.scope ?? "project", + ":tags": JSON.stringify(args.tags ?? []), + }); +} +export function deleteMemorySourceRow(id: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const res = currentDb + .prepare("DELETE FROM memory_sources WHERE id = :id") + .run({ ":id": id }); + return (res?.changes ?? 0) > 0; +} +// ─── Judgments ─────────────────────────────────────────────────────────────── +export function insertJudgment(entry: Record): void { + if (!currentDb) return; + try { + currentDb + .prepare(`INSERT INTO judgments (unit_id, decision, alternatives_json, reasoning, confidence, ts) + VALUES (:unit_id, :decision, :alternatives_json, :reasoning, :confidence, :ts)`) + .run({ + ":unit_id": entry.unitId ?? "", + ":decision": entry.decision ?? "", + ":alternatives_json": JSON.stringify(entry.alternatives ?? []), + ":reasoning": entry.reasoning ?? "", + ":confidence": entry.confidence ?? "medium", + ":ts": entry.ts ?? new Date().toISOString(), + }); + } catch { + // Judgment logging is best-effort + } +} +export function getJudgmentsForUnit(unitIdPrefix: string, limit = 1000): DbRow[] { + if (!currentDb) return []; + try { + const rows = currentDb + .prepare( + `SELECT id, unit_id AS unitId, decision, alternatives_json AS alternativesJson, reasoning, confidence, ts + FROM judgments + WHERE unit_id LIKE :prefix + ORDER BY ts DESC + LIMIT :limit`, + ) + .all({ + ":prefix": `${unitIdPrefix}%`, + ":limit": limit, + }); + return rows.map((r) => ({ + id: r.id, + unitId: r.unitId, + decision: r.decision, + alternatives: parseJsonObject(r.alternativesJson, []), + reasoning: r.reasoning, + confidence: r.confidence, + ts: r.ts, + })); + } catch { + return []; + } +} +// ─── Retrieval Evidence ───────────────────────────────────────────────────── + +/** + * Record a retrieval lookup with source provenance. + * Purpose: let SF compare live code, semantic, docs, and web context by the same + * freshness and scope contract before planning or implementation trusts it. + * Consumer: Sift/codebase search tools and future Context7/web retrieval bridges. + */ +export function insertRetrievalEvidence(args: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = args.recordedAt ?? new Date().toISOString(); + currentDb + .prepare(`INSERT INTO retrieval_evidence ( + backend, source_kind, query, strategy, scope, project_root, + git_head, git_branch, worktree_dirty, freshness, status, + hit_count, elapsed_ms, cache_path, error, result_json, recorded_at + ) VALUES ( + :backend, :source_kind, :query, :strategy, :scope, :project_root, + :git_head, :git_branch, :worktree_dirty, :freshness, :status, + :hit_count, :elapsed_ms, :cache_path, :error, :result_json, :recorded_at + )`) + .run({ + ":backend": args.backend, + ":source_kind": args.sourceKind ?? "code", + ":query": args.query ?? "", + ":strategy": args.strategy ?? "", + ":scope": args.scope ?? "", + ":project_root": args.projectRoot ?? "", + ":git_head": args.gitHead ?? null, + ":git_branch": args.gitBranch ?? null, + ":worktree_dirty": intBool(args.worktreeDirty), + ":freshness": args.freshness ?? "unknown", + ":status": args.status ?? "ok", + ":hit_count": args.hitCount ?? 0, + ":elapsed_ms": args.elapsedMs ?? 0, + ":cache_path": args.cachePath ?? null, + ":error": args.error ?? null, + ":result_json": JSON.stringify(args.result ?? {}), + ":recorded_at": now, + }); +} + +/** + * Return recent retrieval evidence rows. + * Purpose: support audits that need to distinguish live source evidence from + * stale indexed or prose-only context. + * Consumer: inspect/doctor tooling and tests for retrieval provenance. + */ +export function getRetrievalEvidence(limit = 100): DbRow[] { + if (!currentDb) return []; + const rows = currentDb + .prepare(`SELECT + id, backend, source_kind AS sourceKind, query, strategy, scope, + project_root AS projectRoot, git_head AS gitHead, + git_branch AS gitBranch, worktree_dirty AS worktreeDirty, + freshness, status, hit_count AS hitCount, elapsed_ms AS elapsedMs, + cache_path AS cachePath, error, result_json AS resultJson, recorded_at AS recordedAt + FROM retrieval_evidence + ORDER BY recorded_at DESC, id DESC + LIMIT :limit`) + .all({ ":limit": limit }); + return rows.map((row) => ({ + ...row, + worktreeDirty: row.worktreeDirty === 1, + result: parseJsonObject(row.resultJson, {}), + })); +} +// ─── Memory Embeddings ─────────────────────────────────────────────────────── +export function upsertMemoryEmbedding(args: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO memory_embeddings (memory_id, model, dim, vector, updated_at) + VALUES (:memory_id, :model, :dim, :vector, :updated_at) + ON CONFLICT(memory_id) DO UPDATE SET + model = excluded.model, + dim = excluded.dim, + vector = excluded.vector, + updated_at = excluded.updated_at`) + .run({ + ":memory_id": args.memoryId, + ":model": args.model, + ":dim": args.dim, + ":vector": args.vector, + ":updated_at": args.updatedAt, + }); +} +export function deleteMemoryEmbedding(memoryId: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const res = currentDb + .prepare("DELETE FROM memory_embeddings WHERE memory_id = :id") + .run({ ":id": memoryId }); + return (res?.changes ?? 0) > 0; +} +// ─── Tier 1.3: Spec/Runtime/Evidence Schema ────────────────────────────────── +// Functions for managing evidence in the new spec schema (v32+) + +/** + * Record evidence for a milestone. Appends to milestone_evidence table. + * Purpose: Create audit trail of decisions, verifications, and incidents. + * Consumer: complete-milestone, reassess-milestone, and other tools. + */ +export function insertMilestoneEvidence( + milestoneId, + evidenceType, + content, + phaseName, + recordedBy, +) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO milestone_evidence (milestone_id, evidence_type, content, recorded_at, phase_name, recorded_by) + VALUES (?, ?, ?, ?, ?, ?)`) + .run( + milestoneId, + evidenceType, + content, + new Date().toISOString(), + phaseName || "", + recordedBy || "", + ); +} + +/** + * Record evidence for a slice. Appends to slice_evidence table. + * Purpose: Create audit trail of slice decisions, verifications, and incidents. + * Consumer: complete-slice, execute-slice, and other tools. + */ +export function insertSliceEvidence( + milestoneId, + sliceId, + evidenceType, + content, + phaseName, + recordedBy, +) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO slice_evidence (milestone_id, slice_id, evidence_type, content, recorded_at, phase_name, recorded_by) + VALUES (?, ?, ?, ?, ?, ?, ?)`) + .run( + milestoneId, + sliceId, + evidenceType, + content, + new Date().toISOString(), + phaseName || "", + recordedBy || "", + ); +} + +/** + * Record evidence for a task. Appends to task_evidence table. + * Purpose: Create audit trail of task decisions, verifications, and incidents. + * Consumer: complete-task, execute-task, and other tools. + */ +export function insertTaskEvidence( + milestoneId, + sliceId, + taskId, + evidenceType, + content, + phaseName, + recordedBy, +) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO task_evidence (milestone_id, slice_id, task_id, evidence_type, content, recorded_at, phase_name, recorded_by) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`) + .run( + milestoneId, + sliceId, + taskId, + evidenceType, + content, + new Date().toISOString(), + phaseName || "", + recordedBy || "", + ); +} + +/** + * Query milestone audit trail (spec + evidence). Returns rows with spec intent and evidence history. + * Purpose: Support data archaeology and decision-tree reconstruction. + * Consumer: forensics tools, doctor checks, audit/compliance queries. + */ +export function getMilestoneAuditTrail(milestoneId: string): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare(` + SELECT + r.id, r.title, r.status, + s.vision, s.spec_version, + e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by + FROM milestones r + LEFT JOIN milestone_specs s ON r.id = s.id + LEFT JOIN milestone_evidence e ON r.id = e.milestone_id + WHERE r.id = ? + ORDER BY e.recorded_at ASC + `) + .all(milestoneId); +} + +/** + * Query slice audit trail (spec + evidence). + * Purpose: Support data archaeology and decision-tree reconstruction. + * Consumer: forensics tools, doctor checks, audit/compliance queries. + */ +export function getSliceAuditTrail(milestoneId: string, sliceId: string): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare(` + SELECT + r.id, r.title, r.status, + s.goal, s.spec_version, + e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by + FROM slices r + LEFT JOIN slice_specs s ON r.milestone_id = s.milestone_id AND r.id = s.slice_id + LEFT JOIN slice_evidence e ON r.milestone_id = e.milestone_id AND r.id = e.slice_id + WHERE r.milestone_id = ? AND r.id = ? + ORDER BY e.recorded_at ASC + `) + .all(milestoneId, sliceId); +} + +/** + * Query task audit trail (spec + evidence). + * Purpose: Support data archaeology and decision-tree reconstruction. + * Consumer: forensics tools, doctor checks, audit/compliance queries. + */ +export function getTaskAuditTrail(milestoneId: string, sliceId: string, taskId: string): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare(` + SELECT + r.id, r.title, r.status, + s.verify, s.spec_version, + e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by + FROM tasks r + LEFT JOIN task_specs s ON r.milestone_id = s.milestone_id AND r.slice_id = s.slice_id AND r.id = s.task_id + LEFT JOIN task_evidence e ON r.milestone_id = e.milestone_id AND r.slice_id = e.slice_id AND r.id = e.task_id + WHERE r.milestone_id = ? AND r.slice_id = ? AND r.id = ? + ORDER BY e.recorded_at ASC + `) + .all(milestoneId, sliceId, taskId); +} + +/** + * Get milestone spec only (immutable intent, no runtime state). + * Purpose: Retrieve spec intent for re-planning or spec validation. + * Consumer: plan-milestone and spec validation tools. + */ +export function getMilestoneSpec(milestoneId: string): DbRow | undefined { + if (!currentDb) return null; + return currentDb + .prepare("SELECT * FROM milestone_specs WHERE id = ?") + .get(milestoneId); +} + +/** + * Get slice spec only (immutable intent, no runtime state). + * Purpose: Retrieve spec intent for re-planning or spec validation. + * Consumer: plan-slice and spec validation tools. + */ +export function getSliceSpec(milestoneId: string, sliceId: string): DbRow | undefined { + if (!currentDb) return null; + return currentDb + .prepare( + "SELECT * FROM slice_specs WHERE milestone_id = ? AND slice_id = ?", + ) + .get(milestoneId, sliceId); +} + +/** + * Get task spec only (immutable intent, no runtime state). + * Purpose: Retrieve spec intent for re-planning or spec validation. + * Consumer: plan-task and spec validation tools. + */ +export function getTaskSpec(milestoneId: string, sliceId: string, taskId: string): DbRow | undefined { + if (!currentDb) return null; + return currentDb + .prepare( + "SELECT * FROM task_specs WHERE milestone_id = ? AND slice_id = ? AND task_id = ?", + ) + .get(milestoneId, sliceId, taskId); +} + +// ─── Validation Runs ─────────────────────────────────────────────────────────── + +/** + * Start a validation run for a milestone, slice, or task. + * Mirrors droid's validation-state.json creation from validation-contract.md. + * + * Purpose: Track explicit validation contracts and their execution state in the + * DB so any surface (CLI, TUI, headless) can answer "what are we validating and + * where are we" with a single query. + * + * Consumer: autonomous-solver, plan-slice, quality gates, eval runners. + */ +export function startValidationRun({ milestoneId, sliceId, taskId, contract }: { milestoneId: string; sliceId: string; taskId: string; contract: string }): string { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const runId = crypto.randomUUID(); + currentDb + .prepare( + `INSERT INTO validation_runs + (run_id, milestone_id, slice_id, task_id, contract, status, started_at, created_at) + VALUES (:run_id, :milestone_id, :slice_id, :task_id, :contract, 'running', datetime('now'), datetime('now'))`, + ) + .run({ + ":run_id": runId, + ":milestone_id": milestoneId, + ":slice_id": sliceId ?? null, + ":task_id": taskId ?? null, + ":contract": contract ?? "", + }); + return runId; +} + +/** + * Complete a validation run with verdict and findings. + * Mirrors droid's update of validation-state.json after run finishes. + * + * Consumer: autonomous-solver after eval execution, quality gate evaluators. + */ +export function completeValidationRun({ + runId, + verdict, + rationale = "", + findings = "", +}) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const status = + verdict === "pass" ? "pass" : verdict === "fail" ? "fail" : "error"; + const result = currentDb + .prepare( + `UPDATE validation_runs SET + status = :status, + verdict = :verdict, + rationale = :rationale, + findings = :findings, + completed_at = datetime('now') + WHERE run_id = :run_id AND status = 'running'`, + ) + .run({ + ":run_id": runId, + ":status": status, + ":verdict": verdict ?? "", + ":rationale": rationale ?? "", + ":findings": findings ?? "", + }); + if (result.changes === 0) { + throw new SFError( + SF_STALE_STATE, + `sf-db: completeValidationRun: no running validation run found for run_id=${runId}`, + ); + } +} + +/** + * Get the latest validation state for a scope (milestone, slice, or task). + * Returns the most recent run — mirrors droid's validation-state.json read. + * + * Consumer: any surface that needs "are we passing?" for a milestone/slice/task. + */ +export function getLatestValidationState(milestoneId: string, sliceId: string, taskId: string): DbRow | undefined { + if (!currentDb) return null; + const rows = currentDb + .prepare( + `SELECT * FROM validation_runs + WHERE milestone_id = :milestone_id + AND slice_id IS :slice_id + AND task_id IS :task_id + ORDER BY created_at DESC, run_id DESC + LIMIT 1`, + ) + .all({ + ":milestone_id": milestoneId, + ":slice_id": sliceId ?? null, + ":task_id": taskId ?? null, + }); + return rows[0] ?? null; +} + +/** + * Get validation run history for a scope. + * Mirrors droid's historical validation-state.json files. + * + * Consumer: forensics, eval review, audit trail queries. + */ +export function getValidationHistory(milestoneId: string, sliceId: string, taskId: string, limit = 20): DbRow[] { + if (!currentDb) return []; + return currentDb + .prepare( + `SELECT * FROM validation_runs + WHERE milestone_id = :milestone_id + AND slice_id IS :slice_id + AND task_id IS :task_id + ORDER BY created_at DESC, run_id DESC + LIMIT :limit`, + ) + .all({ + ":milestone_id": milestoneId, + ":slice_id": sliceId ?? null, + ":task_id": taskId ?? null, + ":limit": limit, + }); +} + +// ─── Triage DB CRUD ─────────────────────────────────────────────────────────── + +/** + * Insert a triage run record. + * Purpose: replace .sf/triage/evals|inbox|skills JSONL files with queryable DB rows. + * Consumer: commands-todo.js triageTodoDump after successful triage. + */ +export function insertTriageRun(id: string, sourceFile: string | null, createdAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_runs (id, source_file, status, created_at) + VALUES (:id, :source_file, 'complete', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":source_file": sourceFile ?? null, + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +/** + * Insert a triage eval candidate row. + * Purpose: store eval candidates in DB instead of .evals.jsonl. + * Consumer: commands-todo.js triageTodoDump. + */ +export function insertTriageEval(id: string, runId: string, data: Record, createdAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_evals (id, run_id, task_input, expected_behavior, evidence, failure_mode, status, created_at) + VALUES (:id, :run_id, :task_input, :expected_behavior, :evidence, :failure_mode, 'pending', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":run_id": runId, + ":task_input": data.task_input ?? "", + ":expected_behavior": data.expected_behavior ?? "", + ":evidence": data.evidence ?? null, + ":failure_mode": data.failure_mode ?? null, + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +/** + * Insert a normalized triage inbox item row. + * Purpose: store triage inbox items (eval_candidate, implementation_task, etc.) in DB. + * Consumer: commands-todo.js triageTodoDump. + */ +export function insertTriageItem( + id: string, + runId: string, + kind: string, + content: string, + evidence: string | null, + createdAt: string, +): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_items (id, run_id, kind, content, evidence, status, created_at) + VALUES (:id, :run_id, :kind, :content, :evidence, 'pending', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":run_id": runId, + ":kind": kind, + ":content": content, + ":evidence": evidence ?? null, + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +/** + * Insert a triage skill proposal row. + * Purpose: store skill proposals in DB instead of .skills.jsonl. + * Consumer: commands-todo.js triageTodoDump. + */ +export function insertTriageSkill(id: string, runId: string, data: Record, createdAt: string): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_skills (id, run_id, name, description, trigger, raw_json, status, created_at) + VALUES (:id, :run_id, :name, :description, :trigger, :raw_json, 'pending', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":run_id": runId, + ":name": data.title ?? data.name ?? null, + ":description": data.description ?? null, + ":trigger": data.trigger_pattern ?? data.trigger ?? null, + ":raw_json": JSON.stringify(data), + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +// ─── Runtime Counters ───────────────────────────────────────────────────────── + +/** + * Get a runtime counter value by key. Returns 0 if the key does not exist. + * Purpose: replace per-key JSON files in .sf/runtime/ with queryable DB rows. + * Consumer: auto-dispatch.js rewrite-count and uat-count logic. + */ +export function getRuntimeCounter(key: string): number { + if (!currentDb) return 0; + const row = currentDb + .prepare("SELECT value FROM runtime_counters WHERE key = ?") + .get(key); + return typeof row?.value === "number" ? row.value : 0; +} + +/** + * Set a runtime counter to an explicit value. + * Purpose: replace JSON file writes for named counters. + * Consumer: auto-dispatch.js setRewriteCount. + */ +export function setRuntimeCounter(key: string, value: number): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO runtime_counters (key, value, updated_at) + VALUES (:key, :value, :updated_at) + ON CONFLICT(key) DO UPDATE SET value = excluded.value, updated_at = excluded.updated_at`, + ) + .run({ + ":key": key, + ":value": value, + ":updated_at": new Date().toISOString(), + }); +} + +/** + * Atomically increment a runtime counter and return the new value. + * Purpose: replace read-modify-write JSON file pattern for counters. + * Consumer: auto-dispatch.js incrementUatCount. + */ +export function incrementRuntimeCounter(key: string): number { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO runtime_counters (key, value, updated_at) + VALUES (:key, 1, :updated_at) + ON CONFLICT(key) DO UPDATE SET value = value + 1, updated_at = excluded.updated_at`, + ) + .run({ ":key": key, ":updated_at": new Date().toISOString() }); + const row = currentDb + .prepare("SELECT value FROM runtime_counters WHERE key = ?") + .get(key); + return typeof row?.value === "number" ? row.value : 1; +} + +// ─── Validation Attention Markers ───────────────────────────────────────────── + +/** + * Get a validation attention marker for a milestone, or null if absent. + * Purpose: replace .sf/runtime/validation-attention/{mid}.json reads. + * Consumer: auto-dispatch.js hasActiveValidationAttentionMarker. + */ +export function getValidationAttentionMarker(milestoneId: string): DbRow | undefined { + if (!currentDb) return null; + return ( + currentDb + .prepare( + "SELECT * FROM validation_attention_markers WHERE milestone_id = ?", + ) + .get(milestoneId) ?? null + ); +} + +/** + * Upsert a validation attention marker for a milestone. + * Purpose: replace .sf/runtime/validation-attention/{mid}.json writes. + * Consumer: auto-dispatch.js writeValidationAttentionMarker. + */ +export function upsertValidationAttentionMarker(milestoneId: string, marker: Record): void { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + currentDb + .prepare( + `INSERT INTO validation_attention_markers + (milestone_id, created_at, source, remediation_round, revalidation_round, revalidation_requested_at) + VALUES (:milestone_id, :created_at, :source, :remediation_round, :revalidation_round, :revalidation_requested_at) + ON CONFLICT(milestone_id) DO UPDATE SET + source = excluded.source, + remediation_round = excluded.remediation_round, + revalidation_round = excluded.revalidation_round, + revalidation_requested_at = excluded.revalidation_requested_at`, + ) + .run({ + ":milestone_id": milestoneId, + ":created_at": marker.createdAt ?? now, + ":source": marker.source ?? null, + ":remediation_round": marker.remediationRound ?? null, + ":revalidation_round": marker.revalidationRound ?? null, + ":revalidation_requested_at": marker.revalidationRequestedAt ?? null, + }); +} + +// ─── Routing History ────────────────────────────────────────────────────────── + +/** + * Upsert a routing outcome for a pattern/tier pair, incrementing success or fail count. + * Purpose: persist adaptive tier learning to DB so routing decisions survive restarts. + * Consumer: routing-history.js recordOutcome. + */ +export function upsertRoutingOutcome(db: DbAdapter, pattern: string, tier: string, success: boolean): void { + db.prepare( + `INSERT INTO routing_history (pattern, tier, success_count, fail_count, updated_at) + VALUES (:pattern, :tier, :success_count, :fail_count, :updated_at) + ON CONFLICT(pattern, tier) DO UPDATE SET + success_count = success_count + excluded.success_count, + fail_count = fail_count + excluded.fail_count, + updated_at = excluded.updated_at`, + ).run({ + ":pattern": pattern, + ":tier": tier, + ":success_count": success ? 1 : 0, + ":fail_count": success ? 0 : 1, + ":updated_at": new Date().toISOString(), + }); +} + +/** + * Get all routing history rows. + * Purpose: load full routing state into memory on init. + * Consumer: routing-history.js initRoutingHistory. + */ +export function getAllRoutingHistory(db: DbAdapter): DbRow[] { + return db + .prepare( + "SELECT pattern, tier, success_count, fail_count, updated_at FROM routing_history", + ) + .all(); +} + +/** + * Get routing history rows for a specific pattern. + * Purpose: targeted pattern lookup for adaptive tier queries. + * Consumer: routing-history.js getRoutingHistoryForPattern. + */ +export function getRoutingHistoryForPattern(db: DbAdapter, pattern: string): DbRow[] { + return db + .prepare( + "SELECT tier, success_count, fail_count FROM routing_history WHERE pattern = ?", + ) + .all(pattern); +} + +/** + * Insert a routing feedback signal into the audit table. + * Purpose: persist user feedback for later analysis and weighted outcome application. + * Consumer: routing-history.js recordFeedback. + */ +export function insertRoutingFeedback(db: DbAdapter, pattern: string, tier: string, feedback: string): void { + db.prepare( + `INSERT INTO routing_feedback (pattern, tier, feedback, recorded_at) + VALUES (:pattern, :tier, :feedback, :recorded_at)`, + ).run({ + ":pattern": pattern, + ":tier": tier, + ":feedback": feedback, + ":recorded_at": new Date().toISOString(), + }); +} + +/** + * Clear all routing history and feedback rows. + * Purpose: full reset of adaptive learning state on user request. + * Consumer: routing-history.js clearRoutingHistory. + */ +export function clearRoutingHistory(db: DbAdapter): void { + db.prepare("DELETE FROM routing_history").run(); + db.prepare("DELETE FROM routing_feedback").run(); +} + +// ─── Unit Metrics CRUD ──────────────────────────────────────────────────────── + +function rowToUnitMetrics(row: Record): Record { + const unit = { + type: row["type"], + id: row["id"], + model: row["model"], + startedAt: row["started_at"], + finishedAt: row["finished_at"], + tokens: { + input: row["tokens_input"], + output: row["tokens_output"], + cacheRead: row["tokens_cache_read"], + cacheWrite: row["tokens_cache_write"], + total: row["tokens_total"], + }, + cost: row["cost"], + toolCalls: row["tool_calls"], + assistantMessages: row["assistant_messages"], + userMessages: row["user_messages"], + apiRequests: row["api_requests"], + }; + if (row["auto_session_key"] != null) + unit.autoSessionKey = row["auto_session_key"]; + if (row["tier"] != null) unit.tier = row["tier"]; + if (row["model_downgraded"] != null) + unit.modelDowngraded = row["model_downgraded"] === 1; + if (row["context_window_tokens"] != null) + unit.contextWindowTokens = row["context_window_tokens"]; + if (row["truncation_sections"] != null) + unit.truncationSections = row["truncation_sections"]; + if (row["continue_here_fired"] != null) + unit.continueHereFired = row["continue_here_fired"] === 1; + if (row["prompt_char_count"] != null) + unit.promptCharCount = row["prompt_char_count"]; + if (row["baseline_char_count"] != null) + unit.baselineCharCount = row["baseline_char_count"]; + if (row["cache_hit_rate"] != null) unit.cacheHitRate = row["cache_hit_rate"]; + if (row["skills"] != null) unit.skills = JSON.parse(row["skills"]); + return unit; +} + +/** + * Upsert a single unit metrics record into the DB. + * + * Purpose: persist per-unit token/cost telemetry from autonomous mode so + * history, cost, and export commands can read from the canonical DB store + * instead of a fragile JSON file on disk. + * + * Consumer: metrics.js saveLedger (called after every unit snapshot). + */ +export function upsertUnitMetrics(db: DbAdapter, unit: Record): void { + db.prepare( + `INSERT OR REPLACE INTO unit_metrics ( + type, id, started_at, finished_at, model, auto_session_key, + tokens_input, tokens_output, tokens_cache_read, tokens_cache_write, tokens_total, + cost, tool_calls, assistant_messages, user_messages, api_requests, + tier, model_downgraded, context_window_tokens, truncation_sections, + continue_here_fired, prompt_char_count, baseline_char_count, cache_hit_rate, skills + ) VALUES ( + :type, :id, :started_at, :finished_at, :model, :auto_session_key, + :tokens_input, :tokens_output, :tokens_cache_read, :tokens_cache_write, :tokens_total, + :cost, :tool_calls, :assistant_messages, :user_messages, :api_requests, + :tier, :model_downgraded, :context_window_tokens, :truncation_sections, + :continue_here_fired, :prompt_char_count, :baseline_char_count, :cache_hit_rate, :skills + )`, + ).run({ + ":type": unit.type, + ":id": unit.id, + ":started_at": unit.startedAt, + ":finished_at": unit.finishedAt, + ":model": unit.model, + ":auto_session_key": unit.autoSessionKey ?? null, + ":tokens_input": unit.tokens.input, + ":tokens_output": unit.tokens.output, + ":tokens_cache_read": unit.tokens.cacheRead, + ":tokens_cache_write": unit.tokens.cacheWrite, + ":tokens_total": unit.tokens.total, + ":cost": unit.cost, + ":tool_calls": unit.toolCalls, + ":assistant_messages": unit.assistantMessages, + ":user_messages": unit.userMessages, + ":api_requests": unit.apiRequests ?? unit.assistantMessages, + ":tier": unit.tier ?? null, + ":model_downgraded": + unit.modelDowngraded != null ? (unit.modelDowngraded ? 1 : 0) : null, + ":context_window_tokens": unit.contextWindowTokens ?? null, + ":truncation_sections": unit.truncationSections ?? null, + ":continue_here_fired": + unit.continueHereFired != null ? (unit.continueHereFired ? 1 : 0) : null, + ":prompt_char_count": unit.promptCharCount ?? null, + ":baseline_char_count": unit.baselineCharCount ?? null, + ":cache_hit_rate": unit.cacheHitRate ?? null, + ":skills": unit.skills != null ? JSON.stringify(unit.skills) : null, + }); +} + +/** + * Load all unit metrics ordered by started_at ASC (oldest first). + * + * Purpose: reconstruct the in-memory ledger from the canonical DB store + * on session init or on demand from history/cost commands. + * + * Consumer: metrics.js loadLedgerFromDisk and loadLedger. + */ +export function getAllUnitMetrics(db: DbAdapter): DbRow[] { + return db + .prepare("SELECT * FROM unit_metrics ORDER BY started_at ASC") + .all() + .map(rowToUnitMetrics); +} + +/** + * Delete oldest unit_metrics rows keeping only the N most recent by finished_at. + * + * Purpose: enforce a max-ledger-size cap so the DB doesn't bloat over long + * autonomous runs. Called by the doctor when the ledger exceeds its threshold. + * + * Consumer: metrics.js pruneMetricsLedger. + */ +export function pruneUnitMetrics(db: DbAdapter, keepCount: number): void { + db.prepare( + `DELETE FROM unit_metrics WHERE rowid NOT IN ( + SELECT rowid FROM unit_metrics ORDER BY finished_at DESC LIMIT :keepCount + )`, + ).run({ ":keepCount": keepCount }); +} + +/** + * Get the project start timestamp stored in project_metrics_meta. + * + * Purpose: surface when the autonomous run started for elapsed-time display. + * + * Consumer: metrics.js loadLedger and loadLedgerFromDisk. + */ +export function getProjectStartedAt(db: DbAdapter): string | null { + const row = db + .prepare( + "SELECT value FROM project_metrics_meta WHERE key = 'projectStartedAt'", + ) + .get(); + if (!row) return null; + const ts = Number(row["value"]); + return Number.isFinite(ts) ? ts : null; +} + +/** + * Persist the project start timestamp in project_metrics_meta. + * + * Purpose: survive process restarts so the dashboard shows wall-clock elapsed + * time for the full autonomous session, not just the current process lifetime. + * + * Consumer: metrics.js initMetrics (via loadLedger → defaultLedger path). + */ +export function setProjectStartedAt(db: DbAdapter, ts: string): void { + db.prepare( + `INSERT INTO project_metrics_meta (key, value) VALUES ('projectStartedAt', :value) + ON CONFLICT(key) DO UPDATE SET value = excluded.value`, + ).run({ ":value": String(ts) }); +} diff --git a/packages/pi-agent-core/src/db/task-frontmatter.ts b/packages/pi-agent-core/src/db/task-frontmatter.ts new file mode 100644 index 000000000..547eddc8c --- /dev/null +++ b/packages/pi-agent-core/src/db/task-frontmatter.ts @@ -0,0 +1,481 @@ +/** + * Task Frontmatter - schema-backed task metadata + * + * Purpose: add structured fields to task records for risk assessment, + * mutation scope declaration, verification requirements, plan approval, and + * task lifecycle status while keeping scheduler status as a separate view field. + * + * Consumer: plan-v2 task creation, UOK gate runner, parallel orchestrator, + * sf-db row mapping, and task state machine. + */ + +export const RISK_LEVELS = ["none", "low", "medium", "high", "critical"] as const; +export type RiskLevel = (typeof RISK_LEVELS)[number]; + +export const MUTATION_SCOPES = [ + "none", + "docs-only", + "config", + "test-only", + "isolated", + "bounded", + "cross-cutting", + "systemic", +] as const; +export type MutationScope = (typeof MUTATION_SCOPES)[number]; + +export const VERIFICATION_TYPES = [ + "none", + "self-check", + "review", + "test", + "integration", + "manual-qa", +] as const; +export type VerificationType = (typeof VERIFICATION_TYPES)[number]; + +export const PLAN_APPROVAL_STATES = [ + "not-required", + "pending", + "approved", + "rejected", + "auto-approved", +] as const; +export type PlanApprovalState = (typeof PLAN_APPROVAL_STATES)[number]; + +export const TASK_STATUSES = [ + "todo", + "running", + "verifying", + "reviewing", + "done", + "blocked", + "paused", + "failed", + "cancelled", + "retrying", +] as const; +export type TaskStatus = (typeof TASK_STATUSES)[number]; + +export const SCHEDULER_STATUSES = [ + "queued", + "due", + "claimed", + "dispatched", + "consumed", + "expired", +] as const; +export type SchedulerStatus = (typeof SCHEDULER_STATUSES)[number]; + +export interface TaskFrontmatter { + risk: RiskLevel; + mutationScope: MutationScope; + verification: VerificationType; + planApproval: PlanApprovalState; + taskStatus: TaskStatus; + schedulerStatus: SchedulerStatus; + estimatedEffort: number | null; + keyFiles: string[]; + dependencies: string[]; + blocksParallel: boolean; + requiresUserInput: boolean; + autoRetry: boolean; + maxRetries: number; +} + +const TASK_STATUS_ALIASES: Record = { + complete: "done", + completed: "done", + in_progress: "running", + "manual-attention": "reviewing", + manual_attention: "reviewing", + pending: "todo", + review: "reviewing", +}; + +const SCHEDULER_STATUS_ALIASES: Record = { + completed: "consumed", + done: "consumed", + pending: "queued", +}; + +export const DEFAULT_TASK_FRONTMATTER: TaskFrontmatter = { + risk: "low", + mutationScope: "isolated", + verification: "self-check", + planApproval: "not-required", + taskStatus: "todo", + schedulerStatus: "queued", + estimatedEffort: null, + keyFiles: [], + dependencies: [], + blocksParallel: false, + requiresUserInput: false, + autoRetry: true, + maxRetries: 2, +}; + +export function normalizeTaskStatus(value: unknown): string | null { + if (typeof value !== "string" || value.trim() === "") return "todo"; + const status = value.trim().toLowerCase(); + if ((TASK_STATUSES as readonly string[]).includes(status)) return status; + return TASK_STATUS_ALIASES[status] ?? null; +} + +export function normalizeSchedulerStatus(value: unknown): string | null { + if (typeof value !== "string" || value.trim() === "") return "queued"; + const status = value.trim().toLowerCase(); + if ((SCHEDULER_STATUSES as readonly string[]).includes(status)) return status; + return SCHEDULER_STATUS_ALIASES[status] ?? null; +} + +function normalizeArray(value: unknown): string[] { + if (Array.isArray(value)) return value.filter((v) => typeof v === "string"); + if (typeof value !== "string" || value.trim() === "") return []; + try { + const parsed = JSON.parse(value); + if (Array.isArray(parsed)) + return parsed.filter((v) => typeof v === "string"); + return []; + } catch { + return value + .split(",") + .map((v) => v.trim()) + .filter(Boolean); + } +} + +function normalizeBoolean(value: unknown): boolean { + if (value === true || value === 1) return true; + if (value === false || value === 0 || value == null) return false; + if (typeof value === "string") { + const normalized = value.trim().toLowerCase(); + if (["1", "true", "yes", "y"].includes(normalized)) return true; + if (["0", "false", "no", "n", ""].includes(normalized)) return false; + } + return Boolean(value); +} + +function validateChoice( + field: string, + value: unknown, + allowed: readonly string[], + normalized: Record, + errors: string[], +): void { + if (value === undefined || value === null || value === "") return; + if (typeof value === "string" && allowed.includes(value)) { + normalized[field] = value; + return; + } + errors.push( + `Invalid ${field} "${String(value)}". Must be one of: ${allowed.join(", ")}`, + ); +} + +export interface FrontmatterInput { + risk?: unknown; + mutationScope?: unknown; + verification?: unknown; + planApproval?: unknown; + taskStatus?: unknown; + schedulerStatus?: unknown; + estimatedEffort?: unknown; + keyFiles?: unknown; + dependencies?: unknown; + blocksParallel?: unknown; + requiresUserInput?: unknown; + autoRetry?: unknown; + maxRetries?: unknown; + [key: string]: unknown; +} + +export interface ValidationResult { + valid: boolean; + errors: string[]; + normalized: TaskFrontmatter; +} + +export function validateTaskFrontmatter(frontmatter: FrontmatterInput = {}): ValidationResult { + const errors: string[] = []; + const normalized: Record = { + ...DEFAULT_TASK_FRONTMATTER, + keyFiles: [], + dependencies: [], + }; + + validateChoice("risk", frontmatter.risk, RISK_LEVELS, normalized, errors); + validateChoice( + "mutationScope", + frontmatter.mutationScope, + MUTATION_SCOPES, + normalized, + errors, + ); + validateChoice( + "verification", + frontmatter.verification, + VERIFICATION_TYPES, + normalized, + errors, + ); + validateChoice( + "planApproval", + frontmatter.planApproval, + PLAN_APPROVAL_STATES, + normalized, + errors, + ); + + if (frontmatter.taskStatus !== undefined) { + const status = normalizeTaskStatus(frontmatter.taskStatus); + if (status) { + normalized.taskStatus = status; + } else { + errors.push( + `Invalid taskStatus "${String(frontmatter.taskStatus)}". Must be one of: ${TASK_STATUSES.join(", ")}`, + ); + } + } + + if (frontmatter.schedulerStatus !== undefined) { + const status = normalizeSchedulerStatus(frontmatter.schedulerStatus); + if (status) { + normalized.schedulerStatus = status; + } else { + errors.push( + `Invalid schedulerStatus "${String(frontmatter.schedulerStatus)}". Must be one of: ${SCHEDULER_STATUSES.join(", ")}`, + ); + } + } + + if (frontmatter.estimatedEffort !== undefined) { + const effort = Number(frontmatter.estimatedEffort); + if (!Number.isNaN(effort) && effort >= 0) { + normalized.estimatedEffort = effort; + } else if (frontmatter.estimatedEffort !== null) { + errors.push( + `Invalid estimatedEffort "${String(frontmatter.estimatedEffort)}". Must be a non-negative number or null.`, + ); + } + } + + if (frontmatter.keyFiles !== undefined) { + normalized.keyFiles = normalizeArray(frontmatter.keyFiles); + } + if (frontmatter.dependencies !== undefined) { + normalized.dependencies = normalizeArray(frontmatter.dependencies); + } + + for (const field of ["blocksParallel", "requiresUserInput", "autoRetry"]) { + if (frontmatter[field] !== undefined) { + normalized[field] = normalizeBoolean(frontmatter[field]); + } + } + + if (frontmatter.maxRetries !== undefined) { + const retries = Number(frontmatter.maxRetries); + if (Number.isInteger(retries) && retries >= 0 && retries <= 10) { + normalized.maxRetries = retries; + } else { + errors.push( + `Invalid maxRetries "${String(frontmatter.maxRetries)}". Must be an integer 0-10.`, + ); + } + } + + return { + valid: errors.length === 0, + errors, + normalized: normalized as unknown as TaskFrontmatter, + }; +} + +export interface TaskRecord { + risk?: unknown; + mutation_scope?: unknown; + mutationScope?: unknown; + verification_type?: unknown; + verificationType?: unknown; + verification?: unknown; + plan_approval?: unknown; + planApproval?: unknown; + task_status?: unknown; + taskStatus?: unknown; + status?: unknown; + scheduler_status?: unknown; + schedulerStatus?: unknown; + estimated_effort?: unknown; + estimatedEffort?: unknown; + frontmatter_key_files?: unknown; + frontmatterKeyFiles?: unknown; + files?: unknown; + key_files?: unknown; + keyFiles?: unknown; + dependencies?: unknown; + depends_on?: unknown; + dependsOn?: unknown; + depends?: unknown; + blocks_parallel?: unknown; + blocksParallel?: unknown; + requires_user_input?: unknown; + requiresUserInput?: unknown; + auto_retry?: unknown; + autoRetry?: unknown; + max_retries?: unknown; + maxRetries?: unknown; + frontmatter?: TaskFrontmatter; + [key: string]: unknown; +} + +export function taskFrontmatterFromRecord( + task: TaskRecord = {}, + overrides: Partial = {}, +): ValidationResult { + const rawFrontmatter: FrontmatterInput = { + risk: task.risk, + mutationScope: task.mutation_scope ?? task.mutationScope, + verification: + task.verification_type ?? task.verificationType ?? task.verification, + planApproval: task.plan_approval ?? task.planApproval, + taskStatus: task.task_status ?? task.taskStatus ?? task.status, + schedulerStatus: task.scheduler_status ?? task.schedulerStatus, + estimatedEffort: task.estimated_effort ?? task.estimatedEffort, + keyFiles: + task.frontmatter_key_files ?? + task.frontmatterKeyFiles ?? + task.files ?? + task.key_files ?? + task.keyFiles ?? + [], + dependencies: + task.dependencies ?? + task.depends_on ?? + task.dependsOn ?? + task.depends ?? + [], + blocksParallel: task.blocks_parallel ?? task.blocksParallel, + requiresUserInput: task.requires_user_input ?? task.requiresUserInput, + autoRetry: task.auto_retry ?? task.autoRetry, + maxRetries: task.max_retries ?? task.maxRetries, + ...overrides, + }; + + return validateTaskFrontmatter(rawFrontmatter); +} + +export interface BuiltTaskRecord extends TaskRecord { + frontmatter: TaskFrontmatter; + frontmatterValid: boolean; + frontmatterErrors: string[]; +} + +export function buildTaskRecord( + task: TaskRecord = {}, + overrides: Partial = {}, +): BuiltTaskRecord { + const validation = taskFrontmatterFromRecord(task, overrides); + return { + ...task, + frontmatter: validation.normalized, + frontmatterValid: validation.valid, + frontmatterErrors: validation.errors, + }; +} + +export function withTaskFrontmatter( + task: TaskRecord = {}, + overrides: Partial = {}, +): BuiltTaskRecord { + return buildTaskRecord(task, overrides); +} + +export interface ParallelCheckResult { + canParallel: boolean; + reason?: string; +} + +export function canRunInParallel( + taskA: TaskRecord, + taskB: TaskRecord, +): ParallelCheckResult { + if ( + !taskA || + !taskB || + typeof taskA !== "object" || + typeof taskB !== "object" + ) { + return { canParallel: false, reason: "Invalid task input" }; + } + const fmA = taskA.frontmatter ?? buildTaskRecord(taskA).frontmatter; + const fmB = taskB.frontmatter ?? buildTaskRecord(taskB).frontmatter; + + if (fmA.blocksParallel || fmB.blocksParallel) { + return { + canParallel: false, + reason: "One or both tasks block parallel execution", + }; + } + + if (fmA.mutationScope === "systemic" || fmB.mutationScope === "systemic") { + return { + canParallel: false, + reason: "One or both tasks have systemic mutation scope", + }; + } + + const highRisk = ["high", "critical"]; + if (highRisk.includes(fmA.risk) && highRisk.includes(fmB.risk)) { + return { canParallel: false, reason: "Both tasks are high/critical risk" }; + } + + if (fmA.keyFiles.length > 0 && fmB.keyFiles.length > 0) { + const filesB = new Set(fmB.keyFiles); + const overlap = fmA.keyFiles.filter((file) => filesB.has(file)); + if (overlap.length > 0) { + return { + canParallel: false, + reason: `File overlap: ${overlap.join(", ")}`, + }; + } + } + + return { canParallel: true }; +} + +export function canTasksRunInParallel( + taskA: TaskRecord, + taskB: TaskRecord, +): ParallelCheckResult { + return canRunInParallel(taskA, taskB); +} + +export function computeTaskPriority(task: TaskRecord): number { + const fm = task.frontmatter ?? buildTaskRecord(task).frontmatter; + let score = 50; + + const riskScores: Record = { none: 0, low: 5, medium: 15, high: 30, critical: 50 }; + score += riskScores[fm.risk] ?? 0; + + const scopeScores: Record = { + none: 0, + "docs-only": 2, + config: 5, + "test-only": 3, + isolated: 5, + bounded: 10, + "cross-cutting": 25, + systemic: 40, + }; + score += scopeScores[fm.mutationScope] ?? 0; + + if (fm.blocksParallel) score += 20; + if (fm.requiresUserInput) score += 10; + if (fm.planApproval === "pending") score += 10; + + return Math.min(100, score); +} + +export function scoreTaskFrontmatterPriority(task: TaskRecord): number { + return computeTaskPriority(task); +} diff --git a/packages/pi-agent-core/src/db/uok/trace-writer.ts b/packages/pi-agent-core/src/db/uok/trace-writer.ts new file mode 100644 index 000000000..8c6980362 --- /dev/null +++ b/packages/pi-agent-core/src/db/uok/trace-writer.ts @@ -0,0 +1,76 @@ +/** + * Trace event reader for .sf/traces/ directory. + * + * Purpose: read typed trace events from JSONL files for gate statistics + * and performance analysis. Uses a minimal sfRoot implementation (fast path: + * basePath/.sf) to avoid pulling in the full paths.js dependency chain. + * + * Consumer: sf-db.ts gate statistics functions (getGateStats, etc.) + */ +import { + existsSync, + readdirSync, + readFileSync, + statSync, +} from "node:fs"; +import { join } from "node:path"; + +function sfRoot(basePath: string): string { + return join(basePath, ".sf"); +} + +function tracesDir(basePath: string): string { + return join(sfRoot(basePath), "traces"); +} + +export function appendTraceEvent( + basePath: string, + traceId: string, + event: Record, +): void { + if (!basePath || !traceId) return; + // No-op in pi-agent-core — writes are handled by the sf extension. + void event; +} + +export function readTraceEvents( + basePath: string, + type: string, + windowHours = 24, +): Record[] { + const dir = tracesDir(basePath); + if (!existsSync(dir)) return []; + const cutoff = Date.now() - windowHours * 60 * 60 * 1000; + const results: Record[] = []; + let files: string[]; + try { + files = readdirSync(dir).filter( + (f) => f.endsWith(".jsonl") && f !== "latest", + ); + } catch { + return []; + } + for (const file of files) { + try { + const filePath = join(dir, file); + if (statSync(filePath).mtimeMs < cutoff) continue; + const lines = readFileSync(filePath, "utf-8") + .split("\n") + .filter(Boolean); + for (const line of lines) { + try { + const ev = JSON.parse(line) as Record; + if (!type || ev["type"] === type) { + const tsMs = ev["ts"] ? new Date(ev["ts"] as string).getTime() : 0; + if (!ev["ts"] || tsMs >= cutoff) results.push(ev); + } + } catch { + /* skip malformed lines */ + } + } + } catch { + /* skip unreadable files */ + } + } + return results; +} diff --git a/packages/pi-agent-core/src/db/workflow-logger.ts b/packages/pi-agent-core/src/db/workflow-logger.ts new file mode 100644 index 000000000..7a0a21d20 --- /dev/null +++ b/packages/pi-agent-core/src/db/workflow-logger.ts @@ -0,0 +1,94 @@ +/** + * SF Workflow Logger — minimal adapter for pi-agent-core. + * + * Purpose: provide logWarning/logError without pulling in the full + * workflow-logger dependency chain (file-lock, notification-store, paths). + * The sf extension's own workflow-logger handles the full audit/notification + * pipeline; this module is used only by sf-db.ts in pi-agent-core context. + * + * Consumer: sf-db.ts in pi-agent-core for operational warnings and errors. + */ + +export interface LogEntry { + ts: string; + severity: "warn" | "error"; + component: string; + message: string; + context?: Record; +} + +const MAX_BUFFER = 100; +let _buffer: LogEntry[] = []; +let _stderrEnabled = true; + +export function setStderrLoggingEnabled(enabled: boolean): boolean { + const previous = _stderrEnabled; + _stderrEnabled = enabled; + return previous; +} + +export function logWarning( + component: string, + message: string, + context?: Record, +): void { + _push("warn", component, message, context); +} + +export function logError( + component: string, + message: string, + context?: Record, +): void { + _push("error", component, message, context); +} + +export function drainLogs(): LogEntry[] { + const entries = _buffer; + _buffer = []; + return entries; +} + +export function peekLogs(): LogEntry[] { + return _buffer; +} + +export function hasErrors(): boolean { + return _buffer.some((e) => e.severity === "error"); +} + +export function hasWarnings(): boolean { + return _buffer.some((e) => e.severity === "warn"); +} + +export function hasAnyIssues(): boolean { + return _buffer.length > 0; +} + +export function _resetLogs(): void { + _buffer = []; +} + +function _push( + severity: "warn" | "error", + component: string, + message: string, + context?: Record, +): void { + const entry: LogEntry = { + ts: new Date().toISOString(), + severity, + component, + message, + ...(context ? { context } : {}), + }; + const prefix = severity === "error" ? "ERROR" : "WARN"; + const ctxStr = context ? ` ${JSON.stringify(context)}` : ""; + if (_stderrEnabled) { + process.stderr.write(`[sf:${component}] ${prefix}: ${message}${ctxStr}\n`); + } + _buffer.push(entry); + if (_buffer.length > MAX_BUFFER) { + _buffer.shift(); + } +} diff --git a/packages/pi-agent-core/src/index.ts b/packages/pi-agent-core/src/index.ts new file mode 100644 index 000000000..fc0bdcad7 --- /dev/null +++ b/packages/pi-agent-core/src/index.ts @@ -0,0 +1 @@ +export * from "./db/index.js"; diff --git a/packages/pi-agent-core/tsconfig.json b/packages/pi-agent-core/tsconfig.json new file mode 100644 index 000000000..6a2c23cf2 --- /dev/null +++ b/packages/pi-agent-core/tsconfig.json @@ -0,0 +1,38 @@ +{ + "compilerOptions": { + "target": "ES2024", + "module": "Node16", + "lib": [ + "ES2024" + ], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "incremental": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "inlineSources": true, + "inlineSourceMap": false, + "moduleResolution": "Node16", + "resolveJsonModule": true, + "allowImportingTsExtensions": false, + "useDefineForClassFields": false, + "types": [ + "node" + ], + "outDir": "./dist", + "rootDir": "./src" + }, + "include": [ + "src/**/*.ts" + ], + "exclude": [ + "node_modules", + "dist", + "**/*.d.ts", + "src/**/*.d.ts", + "src/**/*.test.ts" + ] +} diff --git a/src/resources/extensions/bg-shell/bg-shell-lifecycle.js b/src/resources/extensions/bg-shell/bg-shell-lifecycle.js index 4c6d658c7..0e6027bab 100644 --- a/src/resources/extensions/bg-shell/bg-shell-lifecycle.js +++ b/src/resources/extensions/bg-shell/bg-shell-lifecycle.js @@ -3,7 +3,7 @@ * context injection, process discovery, footer widget, and periodic maintenance. */ import { truncateToWidth, visibleWidth } from "@singularity-forge/tui"; -import { formatTokenCount } from "../shared/format-utils.js"; +import { formatTokenCount } from "@singularity-forge/coding-agent"; import { cleanupAll, cleanupSessionProcesses, diff --git a/src/resources/extensions/bg-shell/bg-shell-tool.js b/src/resources/extensions/bg-shell/bg-shell-tool.js index bf5765377..dafb033cc 100644 --- a/src/resources/extensions/bg-shell/bg-shell-tool.js +++ b/src/resources/extensions/bg-shell/bg-shell-tool.js @@ -4,7 +4,7 @@ import { Type } from "@sinclair/typebox"; import { StringEnum } from "@singularity-forge/ai"; import { Text } from "@singularity-forge/tui"; -import { toPosixPath } from "../shared/path-display.js"; +import { toPosixPath } from "@singularity-forge/coding-agent"; import { queryShellEnv, runOnSession, sendAndWait } from "./interaction.js"; import { formatDigestText, diff --git a/src/resources/extensions/search-the-web/native-search.js b/src/resources/extensions/search-the-web/native-search.js index 2d4052a68..7e09831ab 100644 --- a/src/resources/extensions/search-the-web/native-search.js +++ b/src/resources/extensions/search-the-web/native-search.js @@ -1,40 +1,20 @@ /** - * Native Anthropic web search hook logic. + * Native Anthropic web search extension hooks. * - * Extracted from index.ts so it can be unit-tested without importing - * the heavy tool-registration modules. - * - * The core injection logic (before_provider_request) now lives in: + * The injection logic (before_provider_request) lives in the native provider middleware: * packages/coding-agent/src/core/providers/web-search-middleware.ts * - * This file exports the constants and functions needed by the extension and by tests, - * and delegates before_provider_request to the native middleware singleton so that - * (a) tests exercise the same code path as production and (b) PREFERENCES.md-based - * search_provider overrides are respected via setPreferBraveResolver. + * This file owns only the extension-layer concerns: model_select diagnostics, + * active-tool management, session reset, and PREFERENCES.md-aware provider resolution. */ import { CUSTOM_SEARCH_TOOL_NAMES, - MAX_NATIVE_SEARCHES_PER_SESSION, setPreferBraveResolver, - stripThinkingFromHistory, webSearchMiddleware, } from "@singularity-forge/coding-agent"; import { resolveSearchProviderFromPreferences } from "../sf/preferences.js"; /** Tool names for the Brave-backed custom search tools */ export const BRAVE_TOOL_NAMES = ["search-the-web", "search_and_read"]; -/** All custom search tool names that should be disabled when native search is active */ -export { CUSTOM_SEARCH_TOOL_NAMES, MAX_NATIVE_SEARCHES_PER_SESSION, stripThinkingFromHistory }; -/** - * Returns true when the provider supports native Anthropic web_search injection. - * - * Purpose: github-copilot, minimax, and kimi use Claude-compatible wire format - * but do NOT support the web_search tool — injecting it causes a 400 error. - * The `claude-` model-name prefix heuristic is too broad (those providers also - * use claude-* names). Only the explicit "anthropic" provider tag is trusted. - */ -export function supportsNativeWebSearch(provider) { - return provider === "anthropic"; -} /** When true, skip native web search injection and keep Brave/custom tools active on Anthropic. */ export function preferBraveSearch() { // PREFERENCES.md takes priority over env var @@ -57,14 +37,13 @@ export function preferBraveSearch() { ); } /** - * Register model_select, before_provider_request, and session_start hooks - * for native Anthropic web search injection. + * Register model_select and session_start hooks for native Anthropic web search. * - * Returns the isAnthropicProvider getter for testing. + * before_provider_request injection runs natively in sdk.ts via webSearchMiddleware — + * nothing is registered here for that event. */ export function registerNativeSearchHooks(pi) { - // null = unknown (model_select not yet fired); true/false = provider is/isn't Anthropic. - let isAnthropicProvider = null; + let isAnthropicProvider = false; // Register the PREFERENCES.md-aware resolver so the native middleware (shared // singleton in web-search-middleware.ts) respects search_provider overrides. // Called here so each test invocation resets the resolver to the current context. @@ -127,26 +106,8 @@ export function registerNativeSearchHooks(pi) { ); } }); - // before_provider_request is now handled natively by WebSearchMiddleware in sdk.ts. - // This handler delegates to the same singleton so that: - // (a) existing tests continue to exercise the injection logic end-to-end, and - // (b) the double-injection guard (tools.some(web_search_20250305)) is a no-op - // in production where sdk.ts already ran the middleware first. - // - // When event.model is absent but model_select has already run (isAnthropicProvider - // is not null), synthesize a provider hint from the cached state so the middleware - // does not fall back to the model-name heuristic and wrongly inject into Copilot - // claude-* requests (#copilot-false-positive). - pi.on("before_provider_request", (event) => { - let modelHint = event.model; - if (!modelHint && isAnthropicProvider !== null) { - modelHint = { provider: isAnthropicProvider ? "anthropic" : "not-anthropic" }; - } - return webSearchMiddleware.applyToPayload(event.payload, modelHint); - }); pi.on("session_start", async (_event, _ctx) => { // Reset the shared middleware session budget (#1309). webSearchMiddleware.resetSession(); }); - return { getIsAnthropic: () => isAnthropicProvider }; } diff --git a/src/resources/extensions/sf/auto-runaway-guard.js b/src/resources/extensions/sf/auto-runaway-guard.js index b0526856b..1dc06a184 100644 --- a/src/resources/extensions/sf/auto-runaway-guard.js +++ b/src/resources/extensions/sf/auto-runaway-guard.js @@ -8,7 +8,7 @@ import { execFileSync } from "node:child_process"; import { createHash } from "node:crypto"; import { existsSync, lstatSync, readdirSync, readFileSync } from "node:fs"; -import { formatTokenCount } from "../shared/format-utils.js"; +import { formatTokenCount } from "@singularity-forge/coding-agent"; export const DEFAULT_RUNAWAY_TOOL_CALL_WARNING = 60; export const DEFAULT_RUNAWAY_TOKEN_WARNING = 1_000_000; export const DEFAULT_RUNAWAY_ELAPSED_MINUTES = 20; diff --git a/src/resources/extensions/sf/commands-session-report.js b/src/resources/extensions/sf/commands-session-report.js index a3e69c9ed..87f1a9d3c 100644 --- a/src/resources/extensions/sf/commands-session-report.js +++ b/src/resources/extensions/sf/commands-session-report.js @@ -6,7 +6,7 @@ */ import { mkdirSync, writeFileSync } from "node:fs"; import { join } from "node:path"; -import { formatDuration } from "../shared/format-utils.js"; +import { formatDuration } from "@singularity-forge/coding-agent"; import { aggregateByModel, formatCost, diff --git a/src/resources/extensions/sf/commands-ship.js b/src/resources/extensions/sf/commands-ship.js index a43cf64b7..d774fe5f8 100644 --- a/src/resources/extensions/sf/commands-ship.js +++ b/src/resources/extensions/sf/commands-ship.js @@ -6,7 +6,7 @@ */ import { execFileSync } from "node:child_process"; import { existsSync, readdirSync, readFileSync } from "node:fs"; -import { formatDuration } from "../shared/format-utils.js"; +import { formatDuration } from "@singularity-forge/coding-agent"; import { aggregateByModel, formatCost, diff --git a/src/resources/extensions/sf/export-html.js b/src/resources/extensions/sf/export-html.js index dbc08ef50..ca022b82e 100644 --- a/src/resources/extensions/sf/export-html.js +++ b/src/resources/extensions/sf/export-html.js @@ -19,7 +19,7 @@ * * Design: Linear-inspired — restrained palette, geometric status, no emoji. */ -import { formatDateShort, formatDuration } from "../shared/format-utils.js"; +import { formatDateShort, formatDuration } from "@singularity-forge/coding-agent"; import { formatCost, formatTokenCount } from "./metrics.js"; export function generateHtmlReport(data, opts) { const generated = new Date().toISOString(); diff --git a/src/resources/extensions/sf/export.js b/src/resources/extensions/sf/export.js index 15d06bb62..d656209a7 100644 --- a/src/resources/extensions/sf/export.js +++ b/src/resources/extensions/sf/export.js @@ -3,7 +3,7 @@ import { execFile } from "node:child_process"; import { mkdirSync, writeFileSync } from "node:fs"; import { basename, join } from "node:path"; -import { fileLink, formatDuration } from "../shared/format-utils.js"; +import { fileLink, formatDuration } from "@singularity-forge/coding-agent"; import { getErrorMessage } from "./error-utils.js"; import { aggregateByModel, diff --git a/src/resources/extensions/sf/extension-manifest.json b/src/resources/extensions/sf/extension-manifest.json index 6f7614cb7..d18f67ed7 100644 --- a/src/resources/extensions/sf/extension-manifest.json +++ b/src/resources/extensions/sf/extension-manifest.json @@ -37,6 +37,7 @@ "resume_agent", "run_command", "save_decision", + "save_knowledge", "save_requirement", "save_summary", "search_evidence", diff --git a/src/resources/extensions/sf/forensics.js b/src/resources/extensions/sf/forensics.js index 9dcbd5415..f90b94a13 100644 --- a/src/resources/extensions/sf/forensics.js +++ b/src/resources/extensions/sf/forensics.js @@ -17,7 +17,7 @@ import { } from "node:fs"; import { homedir } from "node:os"; import { join, relative } from "node:path"; -import { formatDuration } from "../shared/format-utils.js"; +import { formatDuration } from "@singularity-forge/coding-agent"; import { showNextAction } from "../shared/tui.js"; import { atomicWriteSync } from "./atomic-write.js"; import { isAutoActive } from "./auto.js"; diff --git a/src/resources/extensions/sf/history.js b/src/resources/extensions/sf/history.js index 1cad85653..f27c54929 100644 --- a/src/resources/extensions/sf/history.js +++ b/src/resources/extensions/sf/history.js @@ -3,7 +3,7 @@ import { formatDuration, truncateWithEllipsis, -} from "../shared/format-utils.js"; +} from "@singularity-forge/coding-agent"; import { padRight } from "../shared/layout-utils.js"; import { aggregateByModel, diff --git a/src/resources/extensions/sf/learning/outcome-aggregator.mjs b/src/resources/extensions/sf/learning/outcome-aggregator.mjs index 2004c936f..bee2c539b 100644 --- a/src/resources/extensions/sf/learning/outcome-aggregator.mjs +++ b/src/resources/extensions/sf/learning/outcome-aggregator.mjs @@ -294,6 +294,7 @@ export function recentOutcomes(db, opts = {}) { duration_ms, tokens_total, cost_usd, + failure_mode, recorded_at FROM llm_task_outcomes ${where} diff --git a/src/resources/extensions/sf/metrics.js b/src/resources/extensions/sf/metrics.js index d9ed04703..00c312bac 100644 --- a/src/resources/extensions/sf/metrics.js +++ b/src/resources/extensions/sf/metrics.js @@ -29,7 +29,7 @@ import { isAuditEnvelopeEnabled } from "./uok/audit-toggle.js"; // Re-export from shared — import directly from format-utils to avoid pulling // in the full barrel (mod.js → ui.js → @singularity-forge/tui) which breaks when loaded // outside jiti's alias resolution (e.g. dynamic import in auto-loop reports). -export { formatTokenCount } from "../shared/format-utils.js"; +export { formatTokenCount } from "@singularity-forge/coding-agent"; // ─── Learning Integration ───────────────────────────────────────────────────── function formatAggregateModelIdentity(modelId) { diff --git a/src/resources/extensions/sf/migrate/parsers.js b/src/resources/extensions/sf/migrate/parsers.js index c672a075e..09f4339f1 100644 --- a/src/resources/extensions/sf/migrate/parsers.js +++ b/src/resources/extensions/sf/migrate/parsers.js @@ -1,7 +1,7 @@ // Old .planning format per-file parsers // Pure functions that take file content (string) and return typed data. // Zero Pi dependencies — uses only exported helpers from files.ts. -import { normalizeStringArray } from "../../shared/format-utils.js"; +import { normalizeStringArray } from "@singularity-forge/coding-agent"; import { extractBoldField, parseFrontmatterMap, diff --git a/src/resources/extensions/sf/preferences-validation.js b/src/resources/extensions/sf/preferences-validation.js index b1f05a256..f9fb871a3 100644 --- a/src/resources/extensions/sf/preferences-validation.js +++ b/src/resources/extensions/sf/preferences-validation.js @@ -5,7 +5,7 @@ * Accepts a raw SFPreferences object and returns a sanitized copy * together with any errors and warnings. */ -import { normalizeStringArray } from "../shared/format-utils.js"; +import { normalizeStringArray } from "@singularity-forge/coding-agent"; import { VALID_BRANCH_NAME } from "./git-constants.js"; import { CURRENT_PREFERENCES_SCHEMA_VERSION, diff --git a/src/resources/extensions/sf/preferences.js b/src/resources/extensions/sf/preferences.js index 345fa7748..dc7eb22db 100644 --- a/src/resources/extensions/sf/preferences.js +++ b/src/resources/extensions/sf/preferences.js @@ -13,7 +13,7 @@ import { existsSync, readFileSync } from "node:fs"; import { homedir } from "node:os"; import { dirname, join, resolve } from "node:path"; import { parse as parseYaml } from "yaml"; -import { normalizeStringArray } from "../shared/format-utils.js"; +import { normalizeStringArray } from "@singularity-forge/coding-agent"; import { sfRoot } from "./paths.js"; import { _initPrefsLoader, diff --git a/src/resources/extensions/sf/reports.js b/src/resources/extensions/sf/reports.js index c8b85b8ca..2806d813f 100644 --- a/src/resources/extensions/sf/reports.js +++ b/src/resources/extensions/sf/reports.js @@ -15,7 +15,7 @@ */ import { existsSync, mkdirSync, readFileSync } from "node:fs"; import { join } from "node:path"; -import { formatDateShort, formatDuration } from "../shared/format-utils.js"; +import { formatDateShort, formatDuration } from "@singularity-forge/coding-agent"; import { atomicWriteSync } from "./atomic-write.js"; import { formatCost, formatTokenCount } from "./metrics.js"; import { sfRoot } from "./paths.js"; diff --git a/src/resources/extensions/sf/session-forensics.js b/src/resources/extensions/sf/session-forensics.js index 369c24450..66eacc6be 100644 --- a/src/resources/extensions/sf/session-forensics.js +++ b/src/resources/extensions/sf/session-forensics.js @@ -19,7 +19,7 @@ */ import { existsSync, readdirSync, readFileSync, statSync } from "node:fs"; import { basename, join } from "node:path"; -import { truncateWithEllipsis } from "../shared/format-utils.js"; +import { truncateWithEllipsis } from "@singularity-forge/coding-agent"; import { MAX_JSONL_BYTES, parseJSONL } from "./jsonl-utils.js"; import { nativeDiffStat, diff --git a/src/resources/extensions/sf/sf-db.js b/src/resources/extensions/sf/sf-db.js index 446f15ae9..5ff26a8ed 100644 --- a/src/resources/extensions/sf/sf-db.js +++ b/src/resources/extensions/sf/sf-db.js @@ -3222,6 +3222,7 @@ function migrateSchema(db) { "failure_mode", "ALTER TABLE llm_task_outcomes ADD COLUMN failure_mode TEXT DEFAULT NULL", ); + db.exec("CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_failure_mode ON llm_task_outcomes(model_id, failure_mode, recorded_at DESC)"); db.prepare( "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", ).run({ diff --git a/src/resources/extensions/shared/format-utils.js b/src/resources/extensions/shared/format-utils.js deleted file mode 100644 index 608ba2768..000000000 --- a/src/resources/extensions/shared/format-utils.js +++ /dev/null @@ -1,20 +0,0 @@ -/** - * Compatibility shim — re-exports pure formatting utilities from the - * canonical implementation in @singularity-forge/coding-agent. - * - * All 13 importers of this module continue to work without any import changes. - * The implementations now live in packages/coding-agent/src/utils/format.ts. - * - * ANSI-aware layout helpers (padRight, joinColumns, centerLine, fitColumns) - * still live in layout-utils.js (depend on @singularity-forge/tui). - */ -export { - fileLink, - formatDateShort, - formatDuration, - formatTokenCount, - normalizeStringArray, - sparkline, - stripAnsi, - truncateWithEllipsis, -} from "@singularity-forge/coding-agent"; diff --git a/src/resources/extensions/shared/mod.js b/src/resources/extensions/shared/mod.js index a97d94fca..c2495ca51 100644 --- a/src/resources/extensions/shared/mod.js +++ b/src/resources/extensions/shared/mod.js @@ -8,7 +8,8 @@ export { sparkline, stripAnsi, truncateWithEllipsis, -} from "./format-utils.js"; + toPosixPath, +} from "@singularity-forge/coding-agent"; export { parseFrontmatterMap, splitFrontmatter } from "./frontmatter.js"; export { centerLine, @@ -16,7 +17,6 @@ export { joinColumns, padRight, } from "./layout-utils.js"; -export { toPosixPath } from "./path-display.js"; export { maskEditorLine, sanitizeError } from "./sanitize.js"; export { shortcutDesc } from "./terminal.js"; export { GLYPH, INDENT, STATUS_COLOR, STATUS_GLYPH } from "./ui.js"; diff --git a/src/resources/extensions/shared/path-display.js b/src/resources/extensions/shared/path-display.js deleted file mode 100644 index 893e9489f..000000000 --- a/src/resources/extensions/shared/path-display.js +++ /dev/null @@ -1,12 +0,0 @@ -/** - * Compatibility shim — re-exports toPosixPath from the canonical - * implementation in @singularity-forge/coding-agent. - * - * All importers of this module continue to work without any import changes. - * The implementation now lives in packages/coding-agent/src/utils/path-display.ts. - * - * Use ONLY for paths entering text the LLM or shell sees. - * Filesystem operations (fs.readFile, path.join, spawn cwd) handle native - * separators correctly and should NOT be normalized. - */ -export { toPosixPath } from "@singularity-forge/coding-agent"; diff --git a/src/tests/native-search.test.ts b/src/tests/native-search.test.ts index a03f092dd..32e3f9fbe 100644 --- a/src/tests/native-search.test.ts +++ b/src/tests/native-search.test.ts @@ -1,12 +1,14 @@ import assert from "node:assert/strict"; import { afterEach, test } from "vitest"; import { - BRAVE_TOOL_NAMES, CUSTOM_SEARCH_TOOL_NAMES, MAX_NATIVE_SEARCHES_PER_SESSION, - type NativeSearchPI, - registerNativeSearchHooks, stripThinkingFromHistory, + webSearchMiddleware, +} from "@singularity-forge/coding-agent"; +import { + BRAVE_TOOL_NAMES, + registerNativeSearchHooks, } from "../resources/extensions/search-the-web/native-search.ts"; import { getMiniMaxSearchApiKey, @@ -46,11 +48,14 @@ function createMockPI() { }, }; - const pi: NativeSearchPI & { + const pi: { handlers: MockHandler[]; notifications: typeof notifications; mockCtx: typeof mockCtx; fire(event: string, eventData: any, ctx?: any): Promise; + on(event: string, handler: (...args: any[]) => any): void; + getActiveTools(): string[]; + setActiveTools(tools: string[]): void; } = { handlers, notifications, @@ -81,27 +86,20 @@ function createMockPI() { // ─── Tests ────────────────────────────────────────────────────────────────── -test("before_provider_request injects web_search for claude models", async () => { - const pi = createMockPI(); - registerNativeSearchHooks(pi); +// ─── webSearchMiddleware.applyToPayload tests ──────────────────────────────── +// before_provider_request injection runs natively in sdk.ts; tests call the +// middleware directly instead of routing through the extension hook. - // Confirm Anthropic provider via model_select before request - await pi.fire("model_select", { - type: "model_select", - model: { provider: "anthropic", name: "claude-sonnet-4-6" }, - previousModel: undefined, - source: "set", - }); +test("applyToPayload injects web_search for Anthropic provider", async () => { + const pi = createMockPI(); + registerNativeSearchHooks(pi); // resets session counter const payload: Record = { model: "claude-sonnet-4-6-20250514", tools: [{ name: "bash", type: "custom" }], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - }); + const result = webSearchMiddleware.applyToPayload(payload, { provider: "anthropic" }); const tools = (result as any)?.tools ?? payload.tools; const nativeTool = (tools as any[]).find( @@ -120,11 +118,10 @@ test("before_provider_request injects web_search for claude models", async () => ); }); -test("before_provider_request injects web_search for claude models even without model_select", async () => { +test("applyToPayload injects web_search based on claude model name heuristic", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - // NO model_select fired — simulates session restore where modelsAreEqual suppresses the event const payload: Record = { model: "claude-opus-4-6", tools: [ @@ -134,10 +131,7 @@ test("before_provider_request injects web_search for claude models even without ], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - }); + const result = webSearchMiddleware.applyToPayload(payload); const tools = ((result as any)?.tools ?? payload.tools) as any[]; const names = tools.map((t: any) => t.name ?? t.type); @@ -151,7 +145,7 @@ test("before_provider_request injects web_search for claude models even without assert.ok(names.includes("bash"), "Should keep non-search tools"); }); -test("before_provider_request does NOT inject for non-claude models", async () => { +test("applyToPayload does NOT inject for non-claude model names", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); @@ -160,37 +154,23 @@ test("before_provider_request does NOT inject for non-claude models", async () = tools: [{ name: "bash", type: "custom" }], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - }); + const result = webSearchMiddleware.applyToPayload(payload); assert.equal(result, undefined, "Should not modify non-claude payload"); const tools = payload.tools as any[]; assert.equal(tools.length, 1, "Should not add tools to non-claude payload"); }); -test("before_provider_request does NOT inject for claude model on non-Anthropic provider", async () => { +test("applyToPayload does NOT inject for claude model when provider is non-Anthropic", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - // GitHub Copilot (or Bedrock, etc.) serving a claude model - await pi.fire("model_select", { - type: "model_select", - model: { provider: "copilot", name: "claude-sonnet-4-6" }, - previousModel: undefined, - source: "set", - }); - const payload: Record = { model: "claude-sonnet-4-6-20250514", tools: [{ name: "bash", type: "custom" }], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - }); + const result = webSearchMiddleware.applyToPayload(payload, { provider: "copilot" }); assert.equal( result, @@ -209,29 +189,23 @@ test("before_provider_request does NOT inject for claude model on non-Anthropic ); }); -// ─── Issue #444 regression: Copilot claude-* model without model_select ────── +// ─── Issue #444 regression: Copilot claude-* model ─────────────────────────── -test("before_provider_request does NOT inject when event.model indicates non-Anthropic provider (no model_select)", async () => { +test("applyToPayload does NOT inject when provider is github-copilot", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - // NO model_select fired — simulates a new session where model was set before - // extensions were bound. The event.model field from the SDK reveals the true provider. const payload: Record = { model: "claude-sonnet-4-6-20250514", tools: [{ name: "bash", type: "custom" }], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - model: { provider: "github-copilot", id: "claude-sonnet-4-6" }, - }); + const result = webSearchMiddleware.applyToPayload(payload, { provider: "github-copilot" }); assert.equal( result, undefined, - "Should not modify payload when event.model says non-Anthropic", + "Should not modify payload when provider is Copilot", ); const tools = payload.tools as any[]; assert.equal( @@ -245,74 +219,49 @@ test("before_provider_request does NOT inject when event.model indicates non-Ant ); }); -test("before_provider_request DOES inject when event.model indicates Anthropic provider (no model_select)", async () => { +test("applyToPayload DOES inject when provider is anthropic", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - // NO model_select fired, but event.model confirms Anthropic provider const payload: Record = { model: "claude-sonnet-4-6-20250514", tools: [{ name: "bash", type: "custom" }], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - model: { provider: "anthropic", id: "claude-sonnet-4-6" }, - }); + const result = webSearchMiddleware.applyToPayload(payload, { provider: "anthropic" }); const tools = ((result as any)?.tools ?? payload.tools) as any[]; assert.ok( tools.some((t: any) => t.type === "web_search_20250305"), - "Should inject web_search when event.model confirms Anthropic", + "Should inject web_search when provider is anthropic", ); }); -test("before_provider_request does not double-inject", async () => { +test("applyToPayload does not double-inject", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - await pi.fire("model_select", { - type: "model_select", - model: { provider: "anthropic", name: "claude-opus-4-6" }, - previousModel: undefined, - source: "set", - }); - const payload: Record = { model: "claude-opus-4-6-20250514", tools: [{ type: "web_search_20250305", name: "web_search" }], }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - }); + const result = webSearchMiddleware.applyToPayload(payload, { provider: "anthropic" }); assert.equal(result, undefined, "Should not modify when already injected"); const tools = payload.tools as any[]; assert.equal(tools.length, 1, "Should not duplicate web_search tool"); }); -test("before_provider_request creates tools array if missing", async () => { +test("applyToPayload creates tools array if missing", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - await pi.fire("model_select", { - type: "model_select", - model: { provider: "anthropic", name: "claude-haiku-4-5" }, - previousModel: undefined, - source: "set", - }); - const payload: Record = { model: "claude-haiku-4-5-20251001", }; - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload, - }); + const result = webSearchMiddleware.applyToPayload(payload, { provider: "anthropic" }); const tools = (result as any)?.tools ?? payload.tools; assert.ok(Array.isArray(tools), "Should create tools array"); @@ -325,14 +274,11 @@ test("before_provider_request creates tools array if missing", async () => { ); }); -test("before_provider_request skips when payload is falsy", async () => { +test("applyToPayload skips when payload is falsy", async () => { const pi = createMockPI(); registerNativeSearchHooks(pi); - const result = await pi.fire("before_provider_request", { - type: "before_provider_request", - payload: null, - }); + const result = webSearchMiddleware.applyToPayload(null); assert.equal(result, undefined, "Should return undefined for null payload"); });