From 90dc3c679877f549043198b11e648c2ae2996ff3 Mon Sep 17 00:00:00 2001 From: Mikael Hugo Date: Mon, 11 May 2026 13:51:44 +0200 Subject: [PATCH] refactor(sf-ext): split sf-db.js (9073 lines) into 18 domain modules MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sf-db.js is now a pure barrel re-export. All logic lives in sf-db/: - sf-db-core.js — adapter, schema, transactions, shared helpers - sf-db-mode-state.js — Ask/Build/YOLO mode state - sf-db-decisions.js — ADR / decision records - sf-db-artifacts.js — file artifacts and attachments - sf-db-milestones.js — milestone CRUD - sf-db-slices.js — slice CRUD - sf-db-tasks.js — task CRUD - sf-db-worktree.js — worktree state - sf-db-evidence.js — retrieval evidence - sf-db-spec.js — spec/contract records - sf-db-gates.js — UOK gate records - sf-db-uok.js — unit-of-knowledge state - sf-db-session-store — session store / FTS - sf-db-backlog.js — backlog items - sf-db-learning.js — model learning / performance - sf-db-memory.js — memory / embeddings - sf-db-profile.js — user profile - sf-db-self-feedback — self-feedback triage sf-db/index.js re-exports sf-db.js for backward compat. All 4375 tests pass. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/resources/extensions/sf/sf-db.js | 9093 +---------------- .../extensions/sf/sf-db/sf-db-artifacts.js | 49 + .../extensions/sf/sf-db/sf-db-backlog.js | 101 + .../extensions/sf/sf-db/sf-db-core.js | 4140 ++++++++ .../extensions/sf/sf-db/sf-db-decisions.js | 191 + .../extensions/sf/sf-db/sf-db-evidence.js | 259 + .../extensions/sf/sf-db/sf-db-gates.js | 372 + .../extensions/sf/sf-db/sf-db-learning.js | 543 + .../extensions/sf/sf-db/sf-db-memory.js | 329 + .../extensions/sf/sf-db/sf-db-milestones.js | 427 + .../extensions/sf/sf-db/sf-db-mode-state.js | 49 + .../extensions/sf/sf-db/sf-db-profile.js | 347 + .../sf/sf-db/sf-db-self-feedback.js | 98 + .../sf/sf-db/sf-db-session-store.js | 191 + .../extensions/sf/sf-db/sf-db-slices.js | 464 + .../extensions/sf/sf-db/sf-db-spec.js | 163 + .../extensions/sf/sf-db/sf-db-tasks.js | 455 + .../extensions/sf/sf-db/sf-db-uok.js | 366 + .../extensions/sf/sf-db/sf-db-worktree.js | 265 + 19 files changed, 8830 insertions(+), 9072 deletions(-) create mode 100644 src/resources/extensions/sf/sf-db/sf-db-artifacts.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-backlog.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-core.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-decisions.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-evidence.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-gates.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-learning.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-memory.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-milestones.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-mode-state.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-profile.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-self-feedback.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-session-store.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-slices.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-spec.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-tasks.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-uok.js create mode 100644 src/resources/extensions/sf/sf-db/sf-db-worktree.js diff --git a/src/resources/extensions/sf/sf-db.js b/src/resources/extensions/sf/sf-db.js index 2e116b4ec..983b62d5c 100644 --- a/src/resources/extensions/sf/sf-db.js +++ b/src/resources/extensions/sf/sf-db.js @@ -1,9073 +1,22 @@ -// SF Database Abstraction Layer -// Provides a SQLite database via node:sqlite (Node >= 26 built-in). -// -// Exposes a unified sync API for decisions and requirements storage. -// Schema is initialized on first open with WAL mode for file-backed DBs. -// -// ─── Single-writer invariant ───────────────────────────────────────────── -// This file is the ONLY place in the codebase that issues write SQL -// (INSERT / UPDATE / DELETE / REPLACE / BEGIN-COMMIT transactions) against -// the engine database at `.sf/sf.db`. All other modules must call the -// typed wrappers exported here. The structural test -// `tests/single-writer-invariant.test.ts` fails CI if a new bypass appears. -// -// `_getAdapter()` is retained for read-only SELECTs in query modules -// (context-store, memory-store queries, doctor checks, projections). -// Do NOT use it for writes — add a wrapper here instead. -// -// The separate `.sf/unit-claims.db` managed by `unit-ownership.ts` is an -// intentionally independent store for cross-worktree claim races and is -// excluded from this invariant. -import { - copyFileSync, - existsSync, - mkdirSync, - readdirSync, - readFileSync, - realpathSync, - statSync, - unlinkSync, - writeFileSync, -} from "node:fs"; -import { dirname, join } from "node:path"; -import { DatabaseSync } from "node:sqlite"; -import { SF_STALE_STATE, SFError } from "./errors.js"; -import { getGateIdsForTurn } from "./gate-registry.js"; -import { - normalizeSchedulerStatus, - normalizeTaskStatus, - taskFrontmatterFromRecord, - withTaskFrontmatter, -} from "./task-frontmatter.js"; -import { readTraceEvents } from "./uok/trace-writer.js"; -import { logError, logWarning } from "./workflow-logger.js"; +// sf-db.js — barrel re-export. All implementations live in sf-db/ domain files. +// Do NOT add logic here; add it to the appropriate domain file. + +export * from './sf-db/sf-db-core.js'; +export * from './sf-db/sf-db-mode-state.js'; +export * from './sf-db/sf-db-decisions.js'; +export * from './sf-db/sf-db-artifacts.js'; +export * from './sf-db/sf-db-milestones.js'; +export * from './sf-db/sf-db-slices.js'; +export * from './sf-db/sf-db-tasks.js'; +export * from './sf-db/sf-db-worktree.js'; +export * from './sf-db/sf-db-evidence.js'; +export * from './sf-db/sf-db-spec.js'; +export * from './sf-db/sf-db-gates.js'; +export * from './sf-db/sf-db-uok.js'; +export * from './sf-db/sf-db-session-store.js'; +export * from './sf-db/sf-db-backlog.js'; +export * from './sf-db/sf-db-learning.js'; +export * from './sf-db/sf-db-memory.js'; +export * from './sf-db/sf-db-profile.js'; +export * from './sf-db/sf-db-self-feedback.js'; -let loadAttempted = false; -function loadProvider() { - if (loadAttempted) return; - loadAttempted = true; - // node:sqlite is built-in in Node >= 26 -} -function normalizeRow(row) { - if (row == null) return undefined; - if (Object.getPrototypeOf(row) === null) { - return { ...row }; - } - return row; -} -function normalizeRows(rows) { - return rows.map((r) => normalizeRow(r)); -} -const DB_QUERY_TIMEOUT_MS = 30_000; -const DB_BACKUP_MIN_INTERVAL_MS = 15 * 60 * 1000; -const DB_BACKUP_RETENTION = 24; -const DB_FULL_VACUUM_MIN_INTERVAL_MS = 6 * 60 * 60 * 1000; - -function createAdapter(rawDb) { - const db = rawDb; - const stmtCache = new Map(); - function wrapStmt(raw) { - return { - run(...params) { - return raw.run(...params); - }, - get(...params) { - return normalizeRow(raw.get(...params)); - }, - all(...params) { - return normalizeRows(raw.all(...params)); - }, - }; - } - return { - exec(sql) { - db.exec(sql); - }, - prepare(sql) { - let cached = stmtCache.get(sql); - if (cached) return cached; - cached = wrapStmt(db.prepare(sql)); - stmtCache.set(sql, cached); - return cached; - }, - close() { - stmtCache.clear(); - db.close(); - }, - }; -} - -/** - * Execute a database query with timeout protection. - * Falls back to empty result if query exceeds timeout. - * - * Purpose: Prevent hanging reads from blocking autonomous dispatch. - * - * Consumer: memory-repository.js, context-store.js, and any read query - * that needs a safety ceiling. - */ -export function withQueryTimeout( - operation, - fallbackValue, - timeoutMs = DB_QUERY_TIMEOUT_MS, -) { - try { - return operation(); - } catch (err) { - if (err?.message?.includes("timeout") || err?.message?.includes("busy")) { - logWarning( - "sf-db", - `Query timed out after ${timeoutMs}ms, returning fallback`, - ); - return fallbackValue; - } - throw err; - } -} -function openRawDb(path) { - loadProvider(); - return new DatabaseSync(path); -} -function sqliteStringLiteral(value) { - return `'${String(value).replaceAll("'", "''")}'`; -} -function databaseBackupDir(path) { - return join(dirname(path), "backups", "db"); -} -function latestDatabaseBackupMtime(dir) { - if (!existsSync(dir)) return 0; - let latest = 0; - for (const entry of readdirSync(dir)) { - if (!entry.startsWith("sf.db.")) continue; - const file = join(dir, entry); - try { - const stat = statSync(file); - if (stat.isFile() && stat.mtimeMs > latest) latest = stat.mtimeMs; - } catch { - // Ignore files that disappear during pruning. - } - } - return latest; -} -function pruneDatabaseBackups(dir) { - if (!existsSync(dir)) return; - const backups = []; - for (const entry of readdirSync(dir)) { - if (!entry.startsWith("sf.db.")) continue; - const file = join(dir, entry); - try { - const stat = statSync(file); - if (stat.isFile()) backups.push({ file, mtimeMs: stat.mtimeMs }); - } catch { - // Ignore files that disappear during pruning. - } - } - backups.sort((a, b) => b.mtimeMs - a.mtimeMs); - for (const backup of backups.slice(DB_BACKUP_RETENTION)) { - try { - unlinkSync(backup.file); - } catch { - // Best-effort retention; never block DB open on pruning. - } - } -} -function databaseMaintenancePath(path) { - return join(databaseBackupDir(path), "maintenance.json"); -} -function readDatabaseMaintenanceState(path) { - try { - return JSON.parse(readFileSync(databaseMaintenancePath(path), "utf-8")); - } catch { - return {}; - } -} -function writeDatabaseMaintenanceState(path, state) { - try { - writeFileSync( - databaseMaintenancePath(path), - JSON.stringify(state, null, 2) + "\n", - "utf-8", - ); - } catch { - // Best-effort maintenance metadata. - } -} -function createDatabaseSnapshot(rawDb, path) { - if (path === ":memory:" || process.env.SF_DB_BACKUP_DISABLE === "1") return; - const dir = databaseBackupDir(path); - try { - mkdirSync(dir, { recursive: true }); - const latest = latestDatabaseBackupMtime(dir); - if (latest > 0 && Date.now() - latest < DB_BACKUP_MIN_INTERVAL_MS) return; - const stamp = new Date().toISOString().replace(/[:.]/g, "-"); - const backupPath = join(dir, `sf.db.${stamp}`); - rawDb.exec(`VACUUM INTO ${sqliteStringLiteral(backupPath)}`); - pruneDatabaseBackups(dir); - } catch (err) { - logWarning( - "sf-db", - `database snapshot failed: ${err instanceof Error ? err.message : String(err)}`, - ); - } -} -function performDatabaseMaintenance(rawDb, path) { - if (path === ":memory:" || process.env.SF_DB_MAINTENANCE_DISABLE === "1") - return; - try { - const quickCheck = rawDb.prepare("PRAGMA quick_check").get(); - if (quickCheck?.quick_check !== "ok") { - logWarning("sf-db", "database quick_check failed; skipping maintenance"); - return; - } - rawDb.exec("PRAGMA wal_checkpoint(PASSIVE)"); - rawDb.exec("PRAGMA optimize"); - rawDb.exec("PRAGMA incremental_vacuum(128)"); - - const state = readDatabaseMaintenanceState(path); - const lastFullVacuumAt = - typeof state.lastFullVacuumAt === "string" - ? Date.parse(state.lastFullVacuumAt) - : 0; - if ( - !Number.isFinite(lastFullVacuumAt) || - Date.now() - lastFullVacuumAt >= DB_FULL_VACUUM_MIN_INTERVAL_MS - ) { - rawDb.exec("VACUUM"); - writeDatabaseMaintenanceState(path, { - ...state, - lastFullVacuumAt: new Date().toISOString(), - }); - } - } catch (err) { - logWarning( - "sf-db", - `database maintenance failed: ${err instanceof Error ? err.message : String(err)}`, - ); - } -} -const SCHEMA_VERSION = 61; -function indexExists(db, name) { - return !!db - .prepare( - "SELECT 1 as present FROM sqlite_master WHERE type = 'index' AND name = ?", - ) - .get(name); -} -function dedupeVerificationEvidenceRows(db) { - db.exec(` - DELETE FROM verification_evidence - WHERE rowid NOT IN ( - SELECT MIN(rowid) - FROM verification_evidence - GROUP BY task_id, slice_id, milestone_id, command, verdict - ) - `); -} -function ensureVerificationEvidenceDedupIndex(db) { - if (indexExists(db, "idx_verification_evidence_dedup")) return; - dedupeVerificationEvidenceRows(db); - db.exec( - "CREATE UNIQUE INDEX IF NOT EXISTS idx_verification_evidence_dedup ON verification_evidence(task_id, slice_id, milestone_id, command, verdict)", - ); -} -function ensureRepoProfileTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS repo_profiles ( - profile_id TEXT PRIMARY KEY, - project_hash TEXT NOT NULL, - project_root TEXT NOT NULL DEFAULT '', - head TEXT DEFAULT NULL, - branch TEXT DEFAULT NULL, - remote_hash TEXT DEFAULT NULL, - dirty INTEGER NOT NULL DEFAULT 0, - profile_json TEXT NOT NULL DEFAULT '{}', - created_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS repo_file_observations ( - path TEXT PRIMARY KEY, - latest_profile_id TEXT NOT NULL, - git_status TEXT NOT NULL, - ownership TEXT NOT NULL, - language TEXT DEFAULT NULL, - size_bytes INTEGER NOT NULL DEFAULT 0, - content_hash TEXT DEFAULT NULL, - summary TEXT DEFAULT NULL, - first_seen_at TEXT NOT NULL, - last_seen_at TEXT NOT NULL, - adopted_at TEXT DEFAULT NULL, - adoption_unit_id TEXT DEFAULT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_repo_profiles_created ON repo_profiles(created_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_repo_file_observations_status ON repo_file_observations(git_status, ownership)", - ); -} -function ensureBacklogTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS backlog_items ( - id TEXT PRIMARY KEY, - title TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'pending', - note TEXT NOT NULL DEFAULT '', - source TEXT NOT NULL DEFAULT '', - triage_run_id TEXT DEFAULT NULL, - sequence INTEGER NOT NULL DEFAULT 0, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - promoted_at TEXT DEFAULT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_backlog_items_status_sequence ON backlog_items(status, sequence, id)", - ); -} -function ensureScheduleTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS schedule_entries ( - seq INTEGER PRIMARY KEY AUTOINCREMENT, - scope TEXT NOT NULL DEFAULT 'project', - id TEXT NOT NULL, - schema_version INTEGER NOT NULL DEFAULT 1, - kind TEXT NOT NULL DEFAULT 'reminder', - status TEXT NOT NULL DEFAULT 'pending', - due_at TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL DEFAULT '', - snoozed_at TEXT DEFAULT NULL, - payload_json TEXT NOT NULL DEFAULT '{}', - created_by TEXT NOT NULL DEFAULT 'user', - autonomous_dispatch INTEGER NOT NULL DEFAULT 0, - full_json TEXT NOT NULL DEFAULT '{}', - imported_from TEXT DEFAULT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_id_created ON schedule_entries(scope, id, created_at DESC, seq DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_due ON schedule_entries(scope, status, due_at)", - ); - ensureColumn( - db, - "schedule_entries", - "autonomous_dispatch", - "ALTER TABLE schedule_entries ADD COLUMN autonomous_dispatch INTEGER NOT NULL DEFAULT 0", - ); -} -function ensureSolverEvalTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS solver_eval_runs ( - run_id TEXT PRIMARY KEY, - suite_source TEXT NOT NULL DEFAULT '', - cases_count INTEGER NOT NULL DEFAULT 0, - summary_json TEXT NOT NULL DEFAULT '{}', - report_path TEXT NOT NULL DEFAULT '', - results_path TEXT NOT NULL DEFAULT '', - db_recorded INTEGER NOT NULL DEFAULT 1, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS solver_eval_case_results ( - run_id TEXT NOT NULL, - case_id TEXT NOT NULL, - title TEXT NOT NULL DEFAULT '', - mode TEXT NOT NULL, - passed INTEGER NOT NULL DEFAULT 0, - false_complete INTEGER NOT NULL DEFAULT 0, - duration_ms INTEGER DEFAULT NULL, - command_status INTEGER DEFAULT NULL, - solver_outcome TEXT DEFAULT NULL, - pdd_complete INTEGER DEFAULT NULL, - result_json TEXT NOT NULL DEFAULT '{}', - created_at TEXT NOT NULL, - PRIMARY KEY (run_id, case_id, mode), - FOREIGN KEY (run_id) REFERENCES solver_eval_runs(run_id) ON DELETE CASCADE - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_solver_eval_runs_created ON solver_eval_runs(created_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_solver_eval_case_lookup ON solver_eval_case_results(run_id, case_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_solver_eval_case_false_complete ON solver_eval_case_results(false_complete, mode)", - ); -} -function ensureSessionTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS sessions ( - session_id TEXT PRIMARY KEY, - trace_id TEXT DEFAULT NULL, - mode TEXT NOT NULL DEFAULT 'interactive', - cwd TEXT NOT NULL DEFAULT '', - repo TEXT DEFAULT NULL, - branch TEXT DEFAULT NULL, - summary TEXT DEFAULT NULL, - summary_count INTEGER NOT NULL DEFAULT 0, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS turns ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, - turn_index INTEGER NOT NULL, - user_message TEXT, - assistant_response TEXT, - ts TEXT NOT NULL, - UNIQUE(session_id, turn_index) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS session_file_touches ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, - path TEXT NOT NULL, - tool_name TEXT DEFAULT NULL, - turn_id INTEGER DEFAULT NULL REFERENCES turns(id), - first_seen_at TEXT NOT NULL, - UNIQUE(session_id, path) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS session_refs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, - ref_type TEXT NOT NULL, - ref_value TEXT NOT NULL, - turn_id INTEGER DEFAULT NULL REFERENCES turns(id), - created_at TEXT NOT NULL, - UNIQUE(session_id, ref_type, ref_value) - ) - `); - // FTS5 external-content table over turns for keyword recall. - // content_rowid links to turns.id; triggers below keep it in sync. - db.exec(` - CREATE VIRTUAL TABLE IF NOT EXISTS turns_fts USING fts5( - user_message, - assistant_response, - content='turns', - content_rowid='id' - ) - `); - db.exec(` - CREATE TRIGGER IF NOT EXISTS turns_fts_insert AFTER INSERT ON turns BEGIN - INSERT INTO turns_fts(rowid, user_message, assistant_response) - VALUES (new.id, new.user_message, new.assistant_response); - END - `); - db.exec(` - CREATE TRIGGER IF NOT EXISTS turns_fts_update AFTER UPDATE ON turns BEGIN - INSERT INTO turns_fts(turns_fts, rowid, user_message, assistant_response) - VALUES ('delete', old.id, old.user_message, old.assistant_response); - INSERT INTO turns_fts(rowid, user_message, assistant_response) - VALUES (new.id, new.user_message, new.assistant_response); - END - `); - db.exec(` - CREATE TRIGGER IF NOT EXISTS turns_fts_delete AFTER DELETE ON turns BEGIN - INSERT INTO turns_fts(turns_fts, rowid, user_message, assistant_response) - VALUES ('delete', old.id, old.user_message, old.assistant_response); - END - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_sessions_created ON sessions(created_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_sessions_repo ON sessions(repo, created_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_turns_session ON turns(session_id, turn_index)", - ); - db.exec("CREATE INDEX IF NOT EXISTS idx_turns_ts ON turns(ts DESC)"); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_session_file_touches_session ON session_file_touches(session_id, first_seen_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_session_file_touches_path ON session_file_touches(path, session_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_session_refs_session ON session_refs(session_id, created_at DESC)", - ); -} -function ensureSessionSnapshotTable(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS session_snapshots ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - -- Session that triggered this checkpoint. FK to sessions(session_id). - session_id TEXT NOT NULL, - -- Zero-based counter within the session (first snapshot = 0). - snapshot_index INTEGER NOT NULL DEFAULT 0, - -- Optional git stash ref so the snapshot can be restored exactly. - -- NULL when the working tree had no changes to stash. - git_stash_ref TEXT, - -- Free-text label for the snapshot (e.g. "before migration deploy"). - label TEXT, - ts TEXT NOT NULL, - UNIQUE(session_id, snapshot_index) - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_session_snapshots_session ON session_snapshots(session_id, snapshot_index)", - ); -} -function ensureHeadlessRunTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS headless_runs ( - run_id TEXT PRIMARY KEY, - command TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT '', - exit_code INTEGER NOT NULL DEFAULT 0, - timed_out INTEGER NOT NULL DEFAULT 0, - interrupted INTEGER NOT NULL DEFAULT 0, - restart_count INTEGER NOT NULL DEFAULT 0, - max_restarts INTEGER NOT NULL DEFAULT 0, - duration_ms INTEGER NOT NULL DEFAULT 0, - total_events INTEGER NOT NULL DEFAULT 0, - tool_calls INTEGER NOT NULL DEFAULT 0, - solver_eval_run_id TEXT DEFAULT NULL, - solver_eval_report_path TEXT DEFAULT NULL, - details_json TEXT NOT NULL DEFAULT '{}', - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_headless_runs_created ON headless_runs(created_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_headless_runs_status ON headless_runs(status, created_at DESC)", - ); -} -function ensureUokMessageTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS uok_messages ( - id TEXT PRIMARY KEY, - from_agent TEXT NOT NULL, - to_agent TEXT NOT NULL, - body TEXT NOT NULL DEFAULT '', - metadata_json TEXT NOT NULL DEFAULT '{}', - sent_at TEXT NOT NULL DEFAULT '', - delivered_at TEXT DEFAULT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS uok_message_reads ( - message_id TEXT NOT NULL, - agent_id TEXT NOT NULL, - read_at TEXT NOT NULL DEFAULT '', - PRIMARY KEY (message_id, agent_id), - FOREIGN KEY (message_id) REFERENCES uok_messages(id) ON DELETE CASCADE - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_messages_to ON uok_messages(to_agent, sent_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_messages_conversation ON uok_messages(from_agent, to_agent, sent_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_messages_sent ON uok_messages(sent_at DESC)", - ); -} -function ensureDeployTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS deploy_runs ( - id TEXT PRIMARY KEY, - milestone_id TEXT NOT NULL, - target TEXT NOT NULL, - command TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'pending', - exit_code INTEGER DEFAULT NULL, - output TEXT DEFAULT NULL, - deployed_url TEXT DEFAULT NULL, - created_at TEXT NOT NULL, - finished_at TEXT DEFAULT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS smoke_results ( - id TEXT PRIMARY KEY, - deploy_run_id TEXT NOT NULL, - milestone_id TEXT NOT NULL, - url TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'pending', - verdict TEXT DEFAULT NULL, - checks_json TEXT NOT NULL DEFAULT '[]', - created_at TEXT NOT NULL, - finished_at TEXT DEFAULT NULL, - FOREIGN KEY (deploy_run_id) REFERENCES deploy_runs(id) ON DELETE CASCADE - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS release_records ( - id TEXT PRIMARY KEY, - milestone_id TEXT NOT NULL, - version TEXT NOT NULL, - prev_version TEXT DEFAULT NULL, - changelog_entry TEXT DEFAULT NULL, - git_tag TEXT DEFAULT NULL, - published INTEGER NOT NULL DEFAULT 0, - created_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS rollback_runs ( - id TEXT PRIMARY KEY, - deploy_run_id TEXT NOT NULL, - milestone_id TEXT NOT NULL, - reason TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'pending', - output TEXT DEFAULT NULL, - created_at TEXT NOT NULL, - finished_at TEXT DEFAULT NULL, - FOREIGN KEY (deploy_run_id) REFERENCES deploy_runs(id) ON DELETE CASCADE - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_deploy_runs_milestone ON deploy_runs(milestone_id, created_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_smoke_results_deploy ON smoke_results(deploy_run_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_release_records_milestone ON release_records(milestone_id, created_at DESC)", - ); -} -function ensureSleeptimeQueueTable(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS sleeptime_consolidation_queue ( - id TEXT PRIMARY KEY, - conversation_agent TEXT NOT NULL, - memory_agent TEXT NOT NULL, - content TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'pending', - created_at TEXT NOT NULL, - processed_at TEXT DEFAULT NULL, - result TEXT DEFAULT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_sleeptime_queue_status ON sleeptime_consolidation_queue(status, created_at ASC)", - ); -} -function ensureSelfFeedbackTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS self_feedback ( - id TEXT PRIMARY KEY, - ts TEXT NOT NULL, - kind TEXT NOT NULL, - severity TEXT NOT NULL, - blocking INTEGER NOT NULL DEFAULT 0, - repo_identity TEXT NOT NULL DEFAULT '', - sf_version TEXT NOT NULL DEFAULT '', - base_path TEXT NOT NULL DEFAULT '', - unit_type TEXT DEFAULT NULL, - milestone_id TEXT DEFAULT NULL, - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - summary TEXT NOT NULL DEFAULT '', - evidence TEXT NOT NULL DEFAULT '', - suggested_fix TEXT NOT NULL DEFAULT '', - full_json TEXT NOT NULL, - resolved_at TEXT DEFAULT NULL, - resolved_reason TEXT DEFAULT NULL, - resolved_by_sf_version TEXT DEFAULT NULL, - resolved_evidence_json TEXT DEFAULT NULL, - resolved_criteria_json TEXT DEFAULT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)", - ); -} -function ensureRetrievalEvidenceTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS retrieval_evidence ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - backend TEXT NOT NULL, - source_kind TEXT NOT NULL DEFAULT 'code', - query TEXT NOT NULL DEFAULT '', - strategy TEXT NOT NULL DEFAULT '', - scope TEXT NOT NULL DEFAULT '', - project_root TEXT NOT NULL DEFAULT '', - git_head TEXT DEFAULT NULL, - git_branch TEXT DEFAULT NULL, - worktree_dirty INTEGER NOT NULL DEFAULT 0, - freshness TEXT NOT NULL DEFAULT 'unknown', - status TEXT NOT NULL DEFAULT 'ok', - hit_count INTEGER NOT NULL DEFAULT 0, - elapsed_ms INTEGER NOT NULL DEFAULT 0, - cache_path TEXT DEFAULT NULL, - error TEXT DEFAULT NULL, - result_json TEXT NOT NULL DEFAULT '{}', - recorded_at TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_backend_recorded ON retrieval_evidence(backend, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_scope_recorded ON retrieval_evidence(scope, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_status_recorded ON retrieval_evidence(status, recorded_at DESC)", - ); -} -function ensureTriageTables(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS triage_runs ( - id TEXT PRIMARY KEY, - source_file TEXT, - status TEXT NOT NULL DEFAULT 'complete', - result_summary_json TEXT, - created_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS triage_evals ( - id TEXT PRIMARY KEY, - run_id TEXT NOT NULL REFERENCES triage_runs(id), - task_input TEXT NOT NULL, - expected_behavior TEXT, - evidence TEXT, - failure_mode TEXT, - status TEXT NOT NULL DEFAULT 'pending', - created_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS triage_items ( - id TEXT PRIMARY KEY, - run_id TEXT NOT NULL REFERENCES triage_runs(id), - kind TEXT NOT NULL, - content TEXT NOT NULL, - evidence TEXT, - status TEXT NOT NULL DEFAULT 'pending', - created_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS triage_skills ( - id TEXT PRIMARY KEY, - run_id TEXT NOT NULL REFERENCES triage_runs(id), - name TEXT, - description TEXT, - trigger TEXT, - raw_json TEXT, - status TEXT NOT NULL DEFAULT 'pending', - created_at TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_triage_evals_run ON triage_evals(run_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_triage_items_run_kind ON triage_items(run_id, kind)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_triage_skills_run ON triage_skills(run_id)", - ); -} -function ensureRuntimeCounterTable(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS runtime_counters ( - key TEXT PRIMARY KEY, - value INTEGER NOT NULL DEFAULT 0, - updated_at TEXT NOT NULL - ) - `); -} -function ensureValidationAttentionMarkersTable(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS validation_attention_markers ( - milestone_id TEXT PRIMARY KEY, - created_at TEXT NOT NULL, - source TEXT, - remediation_round INTEGER, - revalidation_round INTEGER, - revalidation_requested_at TEXT - ) - `); -} -function ensureSpecSchemaTables(db) { - // Tier 1.3: Spec/Runtime/Evidence schema separation - // Creates 9 normalized tables for milestone, slice, task entities - // Each entity type has: _specs (immutable intent), (runtime state), _evidence (audit trail) - - // ── Milestone Spec Table (immutable record of intent) ─────────── - db.exec(` - CREATE TABLE IF NOT EXISTS milestone_specs ( - id TEXT NOT NULL, - vision TEXT NOT NULL DEFAULT '', - success_criteria TEXT DEFAULT '', - key_risks TEXT DEFAULT '', - proof_strategy TEXT DEFAULT '', - verification_contract TEXT DEFAULT '', - verification_integration TEXT DEFAULT '', - verification_operational TEXT DEFAULT '', - verification_uat TEXT DEFAULT '', - definition_of_done TEXT DEFAULT '', - requirement_coverage TEXT DEFAULT '', - boundary_map_markdown TEXT DEFAULT '', - vision_meeting_json TEXT DEFAULT '', - product_research_json TEXT DEFAULT '', - spec_version INTEGER NOT NULL DEFAULT 1, - created_at TEXT NOT NULL, - PRIMARY KEY (id), - FOREIGN KEY (id) REFERENCES milestones(id) - ) - `); - - // ── Slice Spec Table (immutable record of intent) ─────────── - db.exec(` - CREATE TABLE IF NOT EXISTS slice_specs ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - goal TEXT NOT NULL DEFAULT '', - success_criteria TEXT DEFAULT '', - proof_level TEXT DEFAULT '', - integration_closure TEXT DEFAULT '', - observability_impact TEXT DEFAULT '', - adversarial_partner TEXT DEFAULT '', - adversarial_combatant TEXT DEFAULT '', - adversarial_architect TEXT DEFAULT '', - planning_meeting_json TEXT DEFAULT '', - spec_version INTEGER NOT NULL DEFAULT 1, - created_at TEXT NOT NULL, - PRIMARY KEY (milestone_id, slice_id), - FOREIGN KEY (milestone_id) REFERENCES milestones(id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) - ) - `); - - // ── Task Spec Table (immutable record of intent) ─────────── - db.exec(` - CREATE TABLE IF NOT EXISTS task_specs ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - task_id TEXT NOT NULL, - verify TEXT NOT NULL DEFAULT '', - inputs TEXT DEFAULT '', - expected_output TEXT DEFAULT '', - risk TEXT NOT NULL DEFAULT 'low', - mutation_scope TEXT NOT NULL DEFAULT 'isolated', - verification_type TEXT NOT NULL DEFAULT 'self-check', - plan_approval TEXT NOT NULL DEFAULT 'not-required', - estimated_effort INTEGER DEFAULT NULL, - dependencies TEXT NOT NULL DEFAULT '[]', - blocks_parallel INTEGER NOT NULL DEFAULT 0, - requires_user_input INTEGER NOT NULL DEFAULT 0, - auto_retry INTEGER NOT NULL DEFAULT 1, - max_retries INTEGER NOT NULL DEFAULT 2, - spec_version INTEGER NOT NULL DEFAULT 1, - created_at TEXT NOT NULL, - PRIMARY KEY (milestone_id, slice_id, task_id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), - FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) - ) - `); - - // ── Milestone Evidence Table (append-only audit trail) ─────────── - db.exec(` - CREATE TABLE IF NOT EXISTS milestone_evidence ( - milestone_id TEXT NOT NULL, - evidence_type TEXT NOT NULL, - content TEXT NOT NULL, - recorded_at TEXT NOT NULL, - phase_name TEXT DEFAULT '', - recorded_by TEXT DEFAULT '', - evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), - PRIMARY KEY (milestone_id, evidence_id), - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - - // ── Slice Evidence Table (append-only audit trail) ─────────── - db.exec(` - CREATE TABLE IF NOT EXISTS slice_evidence ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - evidence_type TEXT NOT NULL, - content TEXT NOT NULL, - recorded_at TEXT NOT NULL, - phase_name TEXT DEFAULT '', - recorded_by TEXT DEFAULT '', - evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), - PRIMARY KEY (milestone_id, slice_id, evidence_id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) - ) - `); - - // ── Task Evidence Table (append-only audit trail) ─────────── - db.exec(` - CREATE TABLE IF NOT EXISTS task_evidence ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - task_id TEXT NOT NULL, - evidence_type TEXT NOT NULL, - content TEXT NOT NULL, - recorded_at TEXT NOT NULL, - phase_name TEXT DEFAULT '', - recorded_by TEXT DEFAULT '', - evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), - PRIMARY KEY (milestone_id, slice_id, task_id, evidence_id), - FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) - ) - `); - - // Indices for efficient querying of evidence trails - db.exec(` - CREATE INDEX IF NOT EXISTS idx_milestone_evidence_type - ON milestone_evidence(milestone_id, evidence_type, recorded_at DESC) - `); - db.exec(` - CREATE INDEX IF NOT EXISTS idx_slice_evidence_type - ON slice_evidence(milestone_id, slice_id, evidence_type, recorded_at DESC) - `); - db.exec(` - CREATE INDEX IF NOT EXISTS idx_task_evidence_type - ON task_evidence(milestone_id, slice_id, task_id, evidence_type, recorded_at DESC) - `); -} -function initSchema(db, fileBacked) { - if (fileBacked) db.exec("PRAGMA journal_mode=WAL"); - if (fileBacked) db.exec("PRAGMA busy_timeout = 5000"); - if (fileBacked) db.exec("PRAGMA synchronous = NORMAL"); - // Disable SQLite's automatic WAL checkpoint (default: every 1000 pages). - // Auto-checkpoint fires at unpredictable times — if the process is killed - // mid-checkpoint (e.g., OOM), the main DB is partially written with an - // empty WAL and cannot be recovered. Explicit checkpoints are issued at - // safe loop boundaries instead (post-unit finalize, close). - if (fileBacked) db.exec("PRAGMA wal_autocheckpoint=0"); - if (fileBacked) db.exec("PRAGMA auto_vacuum = INCREMENTAL"); - if (fileBacked) db.exec("PRAGMA cache_size = -8000"); // 8 MB page cache - if (fileBacked && process.platform !== "darwin") - db.exec("PRAGMA mmap_size = 67108864"); // 64 MB mmap - db.exec("PRAGMA temp_store = MEMORY"); - db.exec("PRAGMA foreign_keys = ON"); - db.exec("BEGIN"); - try { - db.exec(` - CREATE TABLE IF NOT EXISTS schema_version ( - version INTEGER NOT NULL, - applied_at TEXT NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS decisions ( - seq INTEGER PRIMARY KEY AUTOINCREMENT, - id TEXT NOT NULL UNIQUE, - when_context TEXT NOT NULL DEFAULT '', - scope TEXT NOT NULL DEFAULT '', - decision TEXT NOT NULL DEFAULT '', - choice TEXT NOT NULL DEFAULT '', - rationale TEXT NOT NULL DEFAULT '', - revisable TEXT NOT NULL DEFAULT '', - made_by TEXT NOT NULL DEFAULT 'agent', - superseded_by TEXT DEFAULT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS requirements ( - id TEXT PRIMARY KEY, - class TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT '', - description TEXT NOT NULL DEFAULT '', - why TEXT NOT NULL DEFAULT '', - source TEXT NOT NULL DEFAULT '', - primary_owner TEXT NOT NULL DEFAULT '', - supporting_slices TEXT NOT NULL DEFAULT '', - validation TEXT NOT NULL DEFAULT '', - notes TEXT NOT NULL DEFAULT '', - full_content TEXT NOT NULL DEFAULT '', - superseded_by TEXT DEFAULT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS artifacts ( - path TEXT PRIMARY KEY, - artifact_type TEXT NOT NULL DEFAULT '', - milestone_id TEXT DEFAULT NULL, - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - full_content TEXT NOT NULL DEFAULT '', - imported_at TEXT NOT NULL DEFAULT '' - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS memories ( - seq INTEGER PRIMARY KEY AUTOINCREMENT, - id TEXT NOT NULL UNIQUE, - category TEXT NOT NULL, - content TEXT NOT NULL, - confidence REAL NOT NULL DEFAULT 0.8, - source_unit_type TEXT, - source_unit_id TEXT, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - superseded_by TEXT DEFAULT NULL, - hit_count INTEGER NOT NULL DEFAULT 0, - tags TEXT NOT NULL DEFAULT '[]' - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS memory_processed_units ( - unit_key TEXT PRIMARY KEY, - activity_file TEXT, - processed_at TEXT NOT NULL - ) - `); - // memory_embeddings, memory_relations, memory_sources used to be referenced - // by helper functions and queries (memory-embeddings.ts, memory-relations.ts, - // memory-ingest.ts) without a corresponding CREATE TABLE — any actual write - // would have failed with "no such table". Creating them as IF NOT EXISTS so - // existing DBs that somehow have them survive, and fresh DBs work. - db.exec(` - CREATE TABLE IF NOT EXISTS memory_embeddings ( - memory_id TEXT PRIMARY KEY, - model TEXT NOT NULL, - dim INTEGER NOT NULL, - vector BLOB NOT NULL, - updated_at TEXT NOT NULL, - FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS memory_relations ( - from_id TEXT NOT NULL, - to_id TEXT NOT NULL, - rel TEXT NOT NULL, - confidence REAL NOT NULL DEFAULT 0.8, - created_at TEXT NOT NULL, - PRIMARY KEY (from_id, to_id, rel), - FOREIGN KEY (from_id) REFERENCES memories(id) ON DELETE CASCADE, - FOREIGN KEY (to_id) REFERENCES memories(id) ON DELETE CASCADE - ) - `); - // PK covers from_id as leading column already; reverse lookups - // (memory-relations.ts queries WHERE to_id = ?) need their own index - // to avoid a full table scan as the relation count grows. - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memory_relations_to ON memory_relations(to_id)", - ); - db.exec(` - CREATE TABLE IF NOT EXISTS memory_sources ( - id TEXT PRIMARY KEY, - kind TEXT NOT NULL, - uri TEXT, - title TEXT, - content TEXT NOT NULL, - content_hash TEXT NOT NULL, - imported_at TEXT NOT NULL, - scope TEXT NOT NULL DEFAULT 'project', - tags TEXT NOT NULL DEFAULT '[]' - ) - `); - // content_hash is queried on every insert for deduplication; without an - // index the lookup becomes a full table scan as ingestion volume grows. - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)", - ); - // Category GROUP BY queries (e.g. /memory stats) need a covering - // index that filters active memories and groups by category. - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)", - ); - db.exec(` - CREATE TABLE IF NOT EXISTS judgments ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - unit_id TEXT NOT NULL, - decision TEXT NOT NULL DEFAULT '', - alternatives_json TEXT NOT NULL DEFAULT '[]', - reasoning TEXT NOT NULL DEFAULT '', - confidence TEXT NOT NULL DEFAULT 'medium', - ts TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)", - ); - db.exec(` - CREATE TABLE IF NOT EXISTS milestones ( - id TEXT PRIMARY KEY, - title TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'active', - depends_on TEXT NOT NULL DEFAULT '[]', - created_at TEXT NOT NULL DEFAULT '', - completed_at TEXT DEFAULT NULL, - vision TEXT NOT NULL DEFAULT '', - success_criteria TEXT NOT NULL DEFAULT '[]', - key_risks TEXT NOT NULL DEFAULT '[]', - proof_strategy TEXT NOT NULL DEFAULT '[]', - verification_contract TEXT NOT NULL DEFAULT '', - verification_integration TEXT NOT NULL DEFAULT '', - verification_operational TEXT NOT NULL DEFAULT '', - verification_uat TEXT NOT NULL DEFAULT '', - definition_of_done TEXT NOT NULL DEFAULT '[]', - requirement_coverage TEXT NOT NULL DEFAULT '', - boundary_map_markdown TEXT NOT NULL DEFAULT '', - vision_meeting_json TEXT NOT NULL DEFAULT '', - product_research_json TEXT NOT NULL DEFAULT '', - sequence INTEGER DEFAULT 0 - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS slices ( - milestone_id TEXT NOT NULL, - id TEXT NOT NULL, - title TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'pending', - risk TEXT NOT NULL DEFAULT 'medium', - depends TEXT NOT NULL DEFAULT '[]', - demo TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL DEFAULT '', - completed_at TEXT DEFAULT NULL, - full_summary_md TEXT NOT NULL DEFAULT '', - full_uat_md TEXT NOT NULL DEFAULT '', - goal TEXT NOT NULL DEFAULT '', - success_criteria TEXT NOT NULL DEFAULT '', - proof_level TEXT NOT NULL DEFAULT '', - integration_closure TEXT NOT NULL DEFAULT '', - observability_impact TEXT NOT NULL DEFAULT '', - adversarial_partner TEXT NOT NULL DEFAULT '', - adversarial_combatant TEXT NOT NULL DEFAULT '', - adversarial_architect TEXT NOT NULL DEFAULT '', - planning_meeting_json TEXT NOT NULL DEFAULT '', - sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order - replan_triggered_at TEXT DEFAULT NULL, - is_sketch INTEGER NOT NULL DEFAULT 0, -- SF ADR-011: 1 = slice is a sketch awaiting refine-slice - sketch_scope TEXT NOT NULL DEFAULT '', -- SF ADR-011: 2-3 sentence scope hint from plan-milestone - PRIMARY KEY (milestone_id, id), - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS tasks ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - id TEXT NOT NULL, - title TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'pending', - one_liner TEXT NOT NULL DEFAULT '', - narrative TEXT NOT NULL DEFAULT '', - verification_result TEXT NOT NULL DEFAULT '', - duration TEXT NOT NULL DEFAULT '', - completed_at TEXT DEFAULT NULL, - blocker_discovered INTEGER DEFAULT 0, - deviations TEXT NOT NULL DEFAULT '', - known_issues TEXT NOT NULL DEFAULT '', - key_files TEXT NOT NULL DEFAULT '[]', - key_decisions TEXT NOT NULL DEFAULT '[]', - full_summary_md TEXT NOT NULL DEFAULT '', - description TEXT NOT NULL DEFAULT '', - estimate TEXT NOT NULL DEFAULT '', - files TEXT NOT NULL DEFAULT '[]', - verify TEXT NOT NULL DEFAULT '', - inputs TEXT NOT NULL DEFAULT '[]', - expected_output TEXT NOT NULL DEFAULT '[]', - observability_impact TEXT NOT NULL DEFAULT '', - full_plan_md TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL DEFAULT '', - verification_status TEXT NOT NULL DEFAULT '', - risk TEXT NOT NULL DEFAULT 'low', - mutation_scope TEXT NOT NULL DEFAULT 'isolated', - verification_type TEXT NOT NULL DEFAULT 'self-check', - plan_approval TEXT NOT NULL DEFAULT 'not-required', - task_status TEXT NOT NULL DEFAULT 'todo', - estimated_effort INTEGER DEFAULT NULL, - dependencies TEXT NOT NULL DEFAULT '[]', - blocks_parallel INTEGER NOT NULL DEFAULT 0, - requires_user_input INTEGER NOT NULL DEFAULT 0, - auto_retry INTEGER NOT NULL DEFAULT 1, - max_retries INTEGER NOT NULL DEFAULT 2, - frontmatter_version INTEGER NOT NULL DEFAULT 1, - sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order - escalation_pending INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): pause-on-escalation flag - escalation_awaiting_review INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): continueWithDefault=true marker (no pause) - escalation_override_applied INTEGER NOT NULL DEFAULT 0, -- SF ADR-011 P2: 1 once carry-forward injected into a downstream prompt - escalation_artifact_path TEXT DEFAULT NULL, -- ADR-011 P2 (SF): path to T##-ESCALATION.json - PRIMARY KEY (milestone_id, slice_id, id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) - ) - `); - ensureTaskSchedulerTable(db); - if (columnExists(db, "tasks", "escalation_pending")) { - db.exec(` - CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending) - `); - } - db.exec(` - CREATE TABLE IF NOT EXISTS verification_evidence ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id TEXT NOT NULL DEFAULT '', - slice_id TEXT NOT NULL DEFAULT '', - milestone_id TEXT NOT NULL DEFAULT '', - command TEXT NOT NULL DEFAULT '', - exit_code INTEGER DEFAULT 0, - verdict TEXT NOT NULL DEFAULT '', - duration_ms INTEGER DEFAULT 0, - created_at TEXT NOT NULL DEFAULT '', - FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS replan_history ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - milestone_id TEXT NOT NULL DEFAULT '', - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - summary TEXT NOT NULL DEFAULT '', - previous_artifact_path TEXT DEFAULT NULL, - replacement_artifact_path TEXT DEFAULT NULL, - created_at TEXT NOT NULL DEFAULT '', - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS assessments ( - path TEXT PRIMARY KEY, - milestone_id TEXT NOT NULL DEFAULT '', - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - status TEXT NOT NULL DEFAULT '', - scope TEXT NOT NULL DEFAULT '', - full_content TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL DEFAULT '', - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS quality_gates ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - gate_id TEXT NOT NULL, - scope TEXT NOT NULL DEFAULT 'slice', - task_id TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'pending', - verdict TEXT NOT NULL DEFAULT '', - rationale TEXT NOT NULL DEFAULT '', - findings TEXT NOT NULL DEFAULT '', - evaluated_at TEXT DEFAULT NULL, - PRIMARY KEY (milestone_id, slice_id, gate_id, task_id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) - ) - `); - // Slice dependency junction table (v14) - db.exec(` - CREATE TABLE IF NOT EXISTS slice_dependencies ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - depends_on_slice_id TEXT NOT NULL, - PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), - FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS gate_circuit_breakers ( - gate_id TEXT PRIMARY KEY, - state TEXT NOT NULL DEFAULT 'closed', - failure_streak INTEGER NOT NULL DEFAULT 0, - last_failure_at TEXT DEFAULT NULL, - opened_at TEXT DEFAULT NULL, - half_open_attempts INTEGER NOT NULL DEFAULT 0, - updated_at TEXT NOT NULL DEFAULT '' - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS audit_turn_index ( - trace_id TEXT NOT NULL, - turn_id TEXT NOT NULL, - first_ts TEXT NOT NULL, - last_ts TEXT NOT NULL, - event_count INTEGER NOT NULL DEFAULT 0, - PRIMARY KEY (trace_id, turn_id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS llm_task_outcomes ( - model_id TEXT NOT NULL, - provider TEXT NOT NULL, - unit_type TEXT NOT NULL, - unit_id TEXT NOT NULL, - succeeded INTEGER NOT NULL DEFAULT 0, - retries INTEGER NOT NULL DEFAULT 0, - escalated INTEGER NOT NULL DEFAULT 0, - verification_passed INTEGER DEFAULT NULL, - blocker_discovered INTEGER NOT NULL DEFAULT 0, - duration_ms INTEGER DEFAULT NULL, - tokens_total INTEGER DEFAULT NULL, - cost_usd REAL DEFAULT NULL, - failure_mode TEXT DEFAULT NULL, - recorded_at INTEGER NOT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS uok_runs ( - run_id TEXT PRIMARY KEY, - session_id TEXT DEFAULT NULL, - path TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'started', - started_at TEXT NOT NULL, - ended_at TEXT DEFAULT NULL, - error TEXT DEFAULT NULL, - flags_json TEXT NOT NULL DEFAULT '{}', - updated_at TEXT NOT NULL - ) - `); - ensureSelfFeedbackTables(db); - ensureSolverEvalTables(db); - ensureRetrievalEvidenceTables(db); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)", - ); - // v13 indexes — hot-path dispatch queries - db.exec( - "CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)", - ); - ensureVerificationEvidenceDedupIndex(db); - // v14 index — slice dependency lookups - db.exec( - "CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)", - ); - db.exec( - "CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)", - ); - ensureRepoProfileTables(db); - ensureBacklogTables(db); - ensureScheduleTables(db); - ensureSolverEvalTables(db); - ensureHeadlessRunTables(db); - ensureSessionTables(db); - ensureSessionSnapshotTable(db); - ensureUokMessageTables(db); - ensureDeployTables(db); - ensureSleeptimeQueueTable(db); - ensureSpecSchemaTables(db); - ensureTaskFrontmatterColumns(db); - ensureRetrievalEvidenceTables(db); - ensureTriageTables(db); - ensureRuntimeCounterTable(db); - ensureValidationAttentionMarkersTable(db); - db.exec( - `CREATE VIEW IF NOT EXISTS active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL`, - ); - db.exec( - `CREATE VIEW IF NOT EXISTS active_requirements AS SELECT * FROM requirements WHERE superseded_by IS NULL`, - ); - db.exec( - `CREATE VIEW IF NOT EXISTS active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL`, - ); - db.exec( - `CREATE VIEW IF NOT EXISTS active_tasks AS SELECT * FROM tasks WHERE status NOT IN ('done','complete','completed','cancelled')`, - ); - db.exec(` - CREATE VIEW IF NOT EXISTS v_task_full AS - SELECT t.*, ts.spec_version, ts.verify AS spec_verify, - ts.inputs AS spec_inputs, ts.expected_output AS spec_expected_output - FROM tasks t - LEFT JOIN task_specs ts - ON t.milestone_id = ts.milestone_id - AND t.slice_id = ts.slice_id - AND t.id = ts.task_id - `); - const existing = db - .prepare("SELECT count(*) as cnt FROM schema_version") - .get(); - if (existing && existing["cnt"] === 0) { - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": SCHEMA_VERSION, - ":applied_at": new Date().toISOString(), - }); - } - db.exec("COMMIT"); - } catch (err) { - db.exec("ROLLBACK"); - throw err; - } - migrateSchema(db); -} -function columnExists(db, table, column) { - const rows = db.prepare(`PRAGMA table_info(${table})`).all(); - return rows.some((row) => row["name"] === column); -} -function tableExists(db, table) { - const row = db - .prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name=?`) - .get(table); - return row != null; -} -function ensureColumn(db, table, column, ddl) { - if (!columnExists(db, table, column)) db.exec(ddl); -} -function hasPlanningPayload(planning = {}) { - return ( - Boolean(planning.vision) || - (planning.successCriteria?.length ?? 0) > 0 || - (planning.keyRisks?.length ?? 0) > 0 || - (planning.proofStrategy?.length ?? 0) > 0 || - Boolean(planning.verificationContract) || - Boolean(planning.verificationIntegration) || - Boolean(planning.verificationOperational) || - Boolean(planning.verificationUat) || - (planning.definitionOfDone?.length ?? 0) > 0 || - Boolean(planning.requirementCoverage) || - Boolean(planning.boundaryMapMarkdown) || - Boolean(planning.visionMeeting) || - Boolean(planning.productResearch) - ); -} -function parseJsonOrFallback(raw, fallback) { - if (typeof raw !== "string" || raw.trim().length === 0) return fallback; - try { - return JSON.parse(raw); - } catch { - return fallback; - } -} -function isEmptyMilestoneSpec(row) { - if (!row) return true; - return ( - (row["vision"] ?? "") === "" && - parseJsonOrFallback(row["success_criteria"], []).length === 0 && - parseJsonOrFallback(row["key_risks"], []).length === 0 && - parseJsonOrFallback(row["proof_strategy"], []).length === 0 && - (row["verification_contract"] ?? "") === "" && - (row["verification_integration"] ?? "") === "" && - (row["verification_operational"] ?? "") === "" && - (row["verification_uat"] ?? "") === "" && - parseJsonOrFallback(row["definition_of_done"], []).length === 0 && - (row["requirement_coverage"] ?? "") === "" && - (row["boundary_map_markdown"] ?? "") === "" && - (row["vision_meeting_json"] ?? "") === "" && - (row["product_research_json"] ?? "") === "" - ); -} -function ensureTaskCreatedAtColumn(db) { - ensureColumn( - db, - "tasks", - "created_at", - `ALTER TABLE tasks ADD COLUMN created_at TEXT NOT NULL DEFAULT ''`, - ); -} -function ensureTaskFrontmatterColumns(db) { - ensureColumn( - db, - "tasks", - "risk", - `ALTER TABLE tasks ADD COLUMN risk TEXT NOT NULL DEFAULT 'low'`, - ); - ensureColumn( - db, - "tasks", - "mutation_scope", - `ALTER TABLE tasks ADD COLUMN mutation_scope TEXT NOT NULL DEFAULT 'isolated'`, - ); - ensureColumn( - db, - "tasks", - "verification_type", - `ALTER TABLE tasks ADD COLUMN verification_type TEXT NOT NULL DEFAULT 'self-check'`, - ); - ensureColumn( - db, - "tasks", - "plan_approval", - `ALTER TABLE tasks ADD COLUMN plan_approval TEXT NOT NULL DEFAULT 'not-required'`, - ); - ensureColumn( - db, - "tasks", - "task_status", - `ALTER TABLE tasks ADD COLUMN task_status TEXT NOT NULL DEFAULT 'todo'`, - ); - ensureColumn( - db, - "tasks", - "estimated_effort", - `ALTER TABLE tasks ADD COLUMN estimated_effort INTEGER DEFAULT NULL`, - ); - ensureColumn( - db, - "tasks", - "dependencies", - `ALTER TABLE tasks ADD COLUMN dependencies TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "tasks", - "blocks_parallel", - `ALTER TABLE tasks ADD COLUMN blocks_parallel INTEGER NOT NULL DEFAULT 0`, - ); - ensureColumn( - db, - "tasks", - "requires_user_input", - `ALTER TABLE tasks ADD COLUMN requires_user_input INTEGER NOT NULL DEFAULT 0`, - ); - ensureColumn( - db, - "tasks", - "auto_retry", - `ALTER TABLE tasks ADD COLUMN auto_retry INTEGER NOT NULL DEFAULT 1`, - ); - ensureColumn( - db, - "tasks", - "max_retries", - `ALTER TABLE tasks ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 2`, - ); - for (const table of ["task_specs"]) { - ensureColumn( - db, - table, - "risk", - `ALTER TABLE ${table} ADD COLUMN risk TEXT NOT NULL DEFAULT 'low'`, - ); - ensureColumn( - db, - table, - "mutation_scope", - `ALTER TABLE ${table} ADD COLUMN mutation_scope TEXT NOT NULL DEFAULT 'isolated'`, - ); - ensureColumn( - db, - table, - "verification_type", - `ALTER TABLE ${table} ADD COLUMN verification_type TEXT NOT NULL DEFAULT 'self-check'`, - ); - ensureColumn( - db, - table, - "plan_approval", - `ALTER TABLE ${table} ADD COLUMN plan_approval TEXT NOT NULL DEFAULT 'not-required'`, - ); - ensureColumn( - db, - table, - "estimated_effort", - `ALTER TABLE ${table} ADD COLUMN estimated_effort INTEGER DEFAULT NULL`, - ); - ensureColumn( - db, - table, - "dependencies", - `ALTER TABLE ${table} ADD COLUMN dependencies TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - table, - "blocks_parallel", - `ALTER TABLE ${table} ADD COLUMN blocks_parallel INTEGER NOT NULL DEFAULT 0`, - ); - ensureColumn( - db, - table, - "requires_user_input", - `ALTER TABLE ${table} ADD COLUMN requires_user_input INTEGER NOT NULL DEFAULT 0`, - ); - ensureColumn( - db, - table, - "auto_retry", - `ALTER TABLE ${table} ADD COLUMN auto_retry INTEGER NOT NULL DEFAULT 1`, - ); - ensureColumn( - db, - table, - "max_retries", - `ALTER TABLE ${table} ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 2`, - ); - } -} -function ensureTaskSchedulerTable(db) { - db.exec(` - CREATE TABLE IF NOT EXISTS task_scheduler ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - task_id TEXT NOT NULL, - status TEXT NOT NULL DEFAULT 'queued', - due_at TEXT DEFAULT NULL, - claimed_by TEXT DEFAULT NULL, - dispatched_at TEXT DEFAULT NULL, - consumed_at TEXT DEFAULT NULL, - expires_at TEXT DEFAULT NULL, - updated_at TEXT NOT NULL DEFAULT '', - PRIMARY KEY (milestone_id, slice_id, task_id), - FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) - ) - `); - db.exec(` - CREATE INDEX IF NOT EXISTS idx_task_scheduler_status - ON task_scheduler(status, due_at) - `); -} -function migrateCostUsdToMicroUsd(db) { - // Tier 2.7: Migrate cost_usd REAL to cost_micro_usd INTEGER - // Converts floating-point USD values to integer micro-USD (multiply by 1,000,000) - // Benefits: eliminates float drift on accumulated costs, easier reasoning about totals - // Purpose: Enable accurate cost tracking at scale without rounding errors - // Consumer: gate_runs cost tracking, cost analytics, budget checks - - // Guard: gate_runs may not exist in minimal legacy DBs (it will be dropped in v58) - if (!tableExists(db, "gate_runs")) return; - - // Add cost_micro_usd column if it doesn't exist - if (!columnExists(db, "gate_runs", "cost_micro_usd")) { - db.exec( - `ALTER TABLE gate_runs ADD COLUMN cost_micro_usd INTEGER DEFAULT NULL`, - ); - } - - // Migrate data: convert cost_usd to cost_micro_usd - // NULL values stay NULL; non-NULL values are multiplied by 1,000,000 - if (columnExists(db, "gate_runs", "cost_usd")) { - db.prepare(` - UPDATE gate_runs - SET cost_micro_usd = CAST(ROUND(cost_usd * 1000000) AS INTEGER) - WHERE cost_usd IS NOT NULL - AND cost_micro_usd IS NULL - `).run(); - } - - // Drop old cost_usd column (SQLite ALTER TABLE DROP is only available in 3.35.0+) - // For safety, we keep the old column as deprecated but unused - // Future: drop after confirming all queries use cost_micro_usd -} -function populateSpecTablesFromExisting(db) { - // Tier 1.3 Phase 2: Migrate existing spec data to new spec tables - // This populates milestone_specs, slice_specs, task_specs from existing columns - // Evidence tables are left empty; they populate as tools create new evidence. - - const now = new Date().toISOString(); - - // Migrate milestone specs - db.prepare(` - INSERT OR IGNORE INTO milestone_specs ( - id, vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, - spec_version, created_at - ) - SELECT - id, vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, '', - 1, COALESCE(created_at, ?) - FROM milestones - WHERE id NOT IN (SELECT id FROM milestone_specs) - `).run(now); - - // Migrate slice specs - db.prepare(` - INSERT OR IGNORE INTO slice_specs ( - milestone_id, slice_id, goal, success_criteria, proof_level, - integration_closure, observability_impact, - adversarial_partner, adversarial_combatant, adversarial_architect, - planning_meeting_json, spec_version, created_at - ) - SELECT - milestone_id, id, goal, success_criteria, proof_level, - integration_closure, observability_impact, - adversarial_partner, adversarial_combatant, adversarial_architect, - planning_meeting_json, 1, COALESCE(created_at, ?) - FROM slices - WHERE (milestone_id, id) NOT IN (SELECT milestone_id, slice_id FROM slice_specs) - `).run(now); - - // Migrate task specs - db.prepare(` - INSERT OR IGNORE INTO task_specs ( - milestone_id, slice_id, task_id, verify, inputs, expected_output, - spec_version, created_at - ) - SELECT - milestone_id, slice_id, id, verify, inputs, expected_output, - 1, COALESCE(created_at, ?) - FROM tasks - WHERE (milestone_id, slice_id, id) NOT IN (SELECT milestone_id, slice_id, task_id FROM task_specs) - `).run(now); -} -function migrateSchema(db) { - const row = withQueryTimeout( - () => db.prepare("SELECT MAX(version) as v FROM schema_version").get(), - null, - ); - const currentVersion = row ? row["v"] : 0; - if (currentVersion >= SCHEMA_VERSION) return; - // Backup database before migration so a mid-migration crash doesn't - // leave a partially-migrated DB with no recovery path. - // WAL-safe: checkpoint first to flush WAL into the main DB file, then copy. - if (currentPath && currentPath !== ":memory:" && existsSync(currentPath)) { - try { - const backupPath = `${currentPath}.backup-v${currentVersion}`; - if (!existsSync(backupPath)) { - // Flush WAL to main DB file before copying — without this, the backup - // may be missing committed data that only exists in the -wal file. - try { - db.exec("PRAGMA wal_checkpoint(TRUNCATE)"); - } catch { - /* checkpoint is best-effort */ - } - copyFileSync(currentPath, backupPath); - } - } catch (backupErr) { - // Log but proceed — blocking migration leaves the DB stuck at an old - // schema version permanently on read-only or full filesystems. - logWarning( - "db", - `Pre-migration backup failed: ${backupErr instanceof Error ? backupErr.message : String(backupErr)}`, - ); - } - } - db.exec("BEGIN"); - try { - if (currentVersion < 2) { - db.exec(` - CREATE TABLE IF NOT EXISTS artifacts ( - path TEXT PRIMARY KEY, - artifact_type TEXT NOT NULL DEFAULT '', - milestone_id TEXT DEFAULT NULL, - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - full_content TEXT NOT NULL DEFAULT '', - imported_at TEXT NOT NULL DEFAULT '' - ) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 2, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 3) { - db.exec(` - CREATE TABLE IF NOT EXISTS memories ( - seq INTEGER PRIMARY KEY AUTOINCREMENT, - id TEXT NOT NULL UNIQUE, - category TEXT NOT NULL, - content TEXT NOT NULL, - confidence REAL NOT NULL DEFAULT 0.8, - source_unit_type TEXT, - source_unit_id TEXT, - created_at TEXT NOT NULL, - updated_at TEXT NOT NULL, - superseded_by TEXT DEFAULT NULL, - hit_count INTEGER NOT NULL DEFAULT 0 - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS memory_processed_units ( - unit_key TEXT PRIMARY KEY, - activity_file TEXT, - processed_at TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", - ); - db.exec("DROP VIEW IF EXISTS active_memories"); - db.exec( - "CREATE VIEW active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 3, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 4) { - ensureColumn( - db, - "decisions", - "made_by", - `ALTER TABLE decisions ADD COLUMN made_by TEXT NOT NULL DEFAULT 'agent'`, - ); - db.exec("DROP VIEW IF EXISTS active_decisions"); - db.exec( - "CREATE VIEW active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 4, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 5) { - db.exec(` - CREATE TABLE IF NOT EXISTS milestones ( - id TEXT PRIMARY KEY, - title TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'active', - created_at TEXT NOT NULL, - completed_at TEXT DEFAULT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS slices ( - milestone_id TEXT NOT NULL, - id TEXT NOT NULL, - title TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'pending', - risk TEXT NOT NULL DEFAULT 'medium', - created_at TEXT NOT NULL DEFAULT '', - completed_at TEXT DEFAULT NULL, - PRIMARY KEY (milestone_id, id), - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS tasks ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - id TEXT NOT NULL, - title TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'pending', - one_liner TEXT NOT NULL DEFAULT '', - narrative TEXT NOT NULL DEFAULT '', - verification_result TEXT NOT NULL DEFAULT '', - duration TEXT NOT NULL DEFAULT '', - completed_at TEXT DEFAULT NULL, - blocker_discovered INTEGER DEFAULT 0, - deviations TEXT NOT NULL DEFAULT '', - known_issues TEXT NOT NULL DEFAULT '', - key_files TEXT NOT NULL DEFAULT '[]', - key_decisions TEXT NOT NULL DEFAULT '[]', - full_summary_md TEXT NOT NULL DEFAULT '', - PRIMARY KEY (milestone_id, slice_id, id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS verification_evidence ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - task_id TEXT NOT NULL DEFAULT '', - slice_id TEXT NOT NULL DEFAULT '', - milestone_id TEXT NOT NULL DEFAULT '', - command TEXT NOT NULL DEFAULT '', - exit_code INTEGER DEFAULT 0, - verdict TEXT NOT NULL DEFAULT '', - duration_ms INTEGER DEFAULT 0, - created_at TEXT NOT NULL DEFAULT '', - FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) - ) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 5, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 6) { - ensureColumn( - db, - "slices", - "full_summary_md", - `ALTER TABLE slices ADD COLUMN full_summary_md TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "full_uat_md", - `ALTER TABLE slices ADD COLUMN full_uat_md TEXT NOT NULL DEFAULT ''`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 6, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 7) { - ensureColumn( - db, - "slices", - "depends", - `ALTER TABLE slices ADD COLUMN depends TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "slices", - "demo", - `ALTER TABLE slices ADD COLUMN demo TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "depends_on", - `ALTER TABLE milestones ADD COLUMN depends_on TEXT NOT NULL DEFAULT '[]'`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 7, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 8) { - ensureColumn( - db, - "milestones", - "vision", - `ALTER TABLE milestones ADD COLUMN vision TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "success_criteria", - `ALTER TABLE milestones ADD COLUMN success_criteria TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "milestones", - "key_risks", - `ALTER TABLE milestones ADD COLUMN key_risks TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "milestones", - "proof_strategy", - `ALTER TABLE milestones ADD COLUMN proof_strategy TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "milestones", - "verification_contract", - `ALTER TABLE milestones ADD COLUMN verification_contract TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "verification_integration", - `ALTER TABLE milestones ADD COLUMN verification_integration TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "verification_operational", - `ALTER TABLE milestones ADD COLUMN verification_operational TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "verification_uat", - `ALTER TABLE milestones ADD COLUMN verification_uat TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "definition_of_done", - `ALTER TABLE milestones ADD COLUMN definition_of_done TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "milestones", - "requirement_coverage", - `ALTER TABLE milestones ADD COLUMN requirement_coverage TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestones", - "boundary_map_markdown", - `ALTER TABLE milestones ADD COLUMN boundary_map_markdown TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "goal", - `ALTER TABLE slices ADD COLUMN goal TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "success_criteria", - `ALTER TABLE slices ADD COLUMN success_criteria TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "proof_level", - `ALTER TABLE slices ADD COLUMN proof_level TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "integration_closure", - `ALTER TABLE slices ADD COLUMN integration_closure TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "observability_impact", - `ALTER TABLE slices ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "uat_verdict", - `ALTER TABLE slices ADD COLUMN uat_verdict TEXT DEFAULT NULL`, - ); - ensureColumn( - db, - "tasks", - "description", - `ALTER TABLE tasks ADD COLUMN description TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "tasks", - "estimate", - `ALTER TABLE tasks ADD COLUMN estimate TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "tasks", - "files", - `ALTER TABLE tasks ADD COLUMN files TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "tasks", - "verify", - `ALTER TABLE tasks ADD COLUMN verify TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "tasks", - "inputs", - `ALTER TABLE tasks ADD COLUMN inputs TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "tasks", - "expected_output", - `ALTER TABLE tasks ADD COLUMN expected_output TEXT NOT NULL DEFAULT '[]'`, - ); - ensureColumn( - db, - "tasks", - "observability_impact", - `ALTER TABLE tasks ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`, - ); - db.exec(` - CREATE TABLE IF NOT EXISTS replan_history ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - milestone_id TEXT NOT NULL DEFAULT '', - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - summary TEXT NOT NULL DEFAULT '', - previous_artifact_path TEXT DEFAULT NULL, - replacement_artifact_path TEXT DEFAULT NULL, - created_at TEXT NOT NULL DEFAULT '', - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS assessments ( - path TEXT PRIMARY KEY, - milestone_id TEXT NOT NULL DEFAULT '', - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - status TEXT NOT NULL DEFAULT '', - scope TEXT NOT NULL DEFAULT '', - full_content TEXT NOT NULL DEFAULT '', - created_at TEXT NOT NULL DEFAULT '', - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 8, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 9) { - ensureColumn( - db, - "slices", - "sequence", - `ALTER TABLE slices ADD COLUMN sequence INTEGER DEFAULT 0`, - ); - ensureColumn( - db, - "tasks", - "sequence", - `ALTER TABLE tasks ADD COLUMN sequence INTEGER DEFAULT 0`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 9, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 10) { - ensureColumn( - db, - "slices", - "replan_triggered_at", - `ALTER TABLE slices ADD COLUMN replan_triggered_at TEXT DEFAULT NULL`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 10, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 11) { - ensureColumn( - db, - "tasks", - "full_plan_md", - `ALTER TABLE tasks ADD COLUMN full_plan_md TEXT NOT NULL DEFAULT ''`, - ); - // Add unique constraint to replan_history for idempotency: - // one replan record per blocker task per slice per milestone. - db.exec(` - CREATE UNIQUE INDEX IF NOT EXISTS idx_replan_history_unique - ON replan_history(milestone_id, slice_id, task_id) - WHERE slice_id IS NOT NULL AND task_id IS NOT NULL - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 11, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 12) { - db.exec(` - CREATE TABLE IF NOT EXISTS quality_gates ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - gate_id TEXT NOT NULL, - scope TEXT NOT NULL DEFAULT 'slice', - task_id TEXT DEFAULT NULL, - status TEXT NOT NULL DEFAULT 'pending', - verdict TEXT NOT NULL DEFAULT '', - rationale TEXT NOT NULL DEFAULT '', - findings TEXT NOT NULL DEFAULT '', - evaluated_at TEXT DEFAULT NULL, - PRIMARY KEY (milestone_id, slice_id, gate_id, COALESCE(task_id, '')), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) - ) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 12, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 13) { - // Hot-path indexes for auto-loop dispatch queries - db.exec( - "CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)", - ); - ensureVerificationEvidenceDedupIndex(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 13, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 14) { - db.exec(` - CREATE TABLE IF NOT EXISTS slice_dependencies ( - milestone_id TEXT NOT NULL, - slice_id TEXT NOT NULL, - depends_on_slice_id TEXT NOT NULL, - PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id), - FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), - FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id) - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 14, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 15) { - db.exec(` - CREATE TABLE IF NOT EXISTS gate_runs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - trace_id TEXT NOT NULL, - turn_id TEXT NOT NULL, - gate_id TEXT NOT NULL, - gate_type TEXT NOT NULL DEFAULT '', - unit_type TEXT DEFAULT NULL, - unit_id TEXT DEFAULT NULL, - milestone_id TEXT DEFAULT NULL, - slice_id TEXT DEFAULT NULL, - task_id TEXT DEFAULT NULL, - outcome TEXT NOT NULL DEFAULT 'pass', - failure_class TEXT NOT NULL DEFAULT 'none', - rationale TEXT NOT NULL DEFAULT '', - findings TEXT NOT NULL DEFAULT '', - attempt INTEGER NOT NULL DEFAULT 1, - max_attempts INTEGER NOT NULL DEFAULT 1, - retryable INTEGER NOT NULL DEFAULT 0, - evaluated_at TEXT NOT NULL DEFAULT '', - duration_ms INTEGER DEFAULT NULL, - cost_micro_usd INTEGER DEFAULT NULL - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS turn_git_transactions ( - trace_id TEXT NOT NULL, - turn_id TEXT NOT NULL, - unit_type TEXT DEFAULT NULL, - unit_id TEXT DEFAULT NULL, - stage TEXT NOT NULL DEFAULT 'turn-start', - action TEXT NOT NULL DEFAULT 'status-only', - push INTEGER NOT NULL DEFAULT 0, - status TEXT NOT NULL DEFAULT 'ok', - error TEXT DEFAULT NULL, - metadata_json TEXT NOT NULL DEFAULT '{}', - updated_at TEXT NOT NULL DEFAULT '', - PRIMARY KEY (trace_id, turn_id, stage) - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS audit_events ( - event_id TEXT PRIMARY KEY, - trace_id TEXT NOT NULL, - turn_id TEXT DEFAULT NULL, - caused_by TEXT DEFAULT NULL, - category TEXT NOT NULL, - type TEXT NOT NULL, - ts TEXT NOT NULL, - payload_json TEXT NOT NULL DEFAULT '{}' - ) - `); - db.exec(` - CREATE TABLE IF NOT EXISTS audit_turn_index ( - trace_id TEXT NOT NULL, - turn_id TEXT NOT NULL, - first_ts TEXT NOT NULL, - last_ts TEXT NOT NULL, - event_count INTEGER NOT NULL DEFAULT 0, - PRIMARY KEY (trace_id, turn_id) - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_gate_runs_turn ON gate_runs(trace_id, turn_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_gate_runs_lookup ON gate_runs(milestone_id, slice_id, task_id, gate_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_turn_git_tx_turn ON turn_git_transactions(trace_id, turn_id)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_audit_events_trace ON audit_events(trace_id, ts)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_audit_events_turn ON audit_events(trace_id, turn_id, ts)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 15, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 16) { - db.exec(` - CREATE TABLE IF NOT EXISTS llm_task_outcomes ( - model_id TEXT NOT NULL, - provider TEXT NOT NULL, - unit_type TEXT NOT NULL, - unit_id TEXT NOT NULL, - succeeded INTEGER NOT NULL DEFAULT 0, - retries INTEGER NOT NULL DEFAULT 0, - escalated INTEGER NOT NULL DEFAULT 0, - verification_passed INTEGER DEFAULT NULL, - blocker_discovered INTEGER NOT NULL DEFAULT 0, - duration_ms INTEGER DEFAULT NULL, - tokens_total INTEGER DEFAULT NULL, - cost_usd REAL DEFAULT NULL, - failure_mode TEXT DEFAULT NULL, - recorded_at INTEGER NOT NULL - ) - `); - db.exec( - "CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 16, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 17) { - ensureColumn( - db, - "tasks", - "verification_status", - `ALTER TABLE tasks ADD COLUMN verification_status TEXT NOT NULL DEFAULT ''`, - ); - // Backfill verification_status from existing verification_evidence rows so the - // prior-task guard works on databases upgraded mid-project (not just new ones). - db.exec(` - UPDATE tasks - SET verification_status = CASE - WHEN (SELECT COUNT(*) FROM verification_evidence ve - WHERE ve.milestone_id = tasks.milestone_id - AND ve.slice_id = tasks.slice_id - AND ve.task_id = tasks.id) = 0 - THEN '' - WHEN (SELECT COUNT(*) FROM verification_evidence ve - WHERE ve.milestone_id = tasks.milestone_id - AND ve.slice_id = tasks.slice_id - AND ve.task_id = tasks.id - AND ve.exit_code != 0) = 0 - THEN 'all_pass' - WHEN (SELECT COUNT(*) FROM verification_evidence ve - WHERE ve.milestone_id = tasks.milestone_id - AND ve.slice_id = tasks.slice_id - AND ve.task_id = tasks.id - AND ve.exit_code = 0) > 0 - THEN 'partial' - ELSE 'all_fail' - END - WHERE tasks.status IN ('complete', 'done') - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 17, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 18) { - ensureColumn( - db, - "slices", - "adversarial_partner", - `ALTER TABLE slices ADD COLUMN adversarial_partner TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "adversarial_combatant", - `ALTER TABLE slices ADD COLUMN adversarial_combatant TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "slices", - "adversarial_architect", - `ALTER TABLE slices ADD COLUMN adversarial_architect TEXT NOT NULL DEFAULT ''`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 18, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 19) { - ensureColumn( - db, - "slices", - "planning_meeting_json", - `ALTER TABLE slices ADD COLUMN planning_meeting_json TEXT NOT NULL DEFAULT ''`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 19, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 20) { - ensureColumn( - db, - "milestones", - "vision_meeting_json", - `ALTER TABLE milestones ADD COLUMN vision_meeting_json TEXT NOT NULL DEFAULT ''`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 20, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 21) { - ensureRepoProfileTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 21, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 22) { - // SF ADR-011: progressive planning. is_sketch=1 means the slice is a 2-3 - // sentence sketch awaiting refine-slice expansion; refine fills in the - // real plan and clears the flag. sketch_scope holds the milestone - // planner's stored scope hint that refine treats as a hard boundary. - ensureColumn( - db, - "slices", - "is_sketch", - `ALTER TABLE slices ADD COLUMN is_sketch INTEGER NOT NULL DEFAULT 0`, - ); - ensureColumn( - db, - "slices", - "sketch_scope", - `ALTER TABLE slices ADD COLUMN sketch_scope TEXT NOT NULL DEFAULT ''`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 22, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 23) { - // ADR-011 Phase 2 (SF ADR): mid-execution escalation. escalation_pending=1 - // marks a task that paused for a user decision; escalation_artifact_path - // points to the T##-ESCALATION.json file containing options + recommendation. - // State derivation will emit phase='escalating-task' when any task in the - // active slice has escalation_pending=1; dispatch returns 'stop' so the - // loop never bypasses a pending decision. - ensureColumn( - db, - "tasks", - "escalation_pending", - `ALTER TABLE tasks ADD COLUMN escalation_pending INTEGER NOT NULL DEFAULT 0`, - ); - ensureColumn( - db, - "tasks", - "escalation_artifact_path", - `ALTER TABLE tasks ADD COLUMN escalation_artifact_path TEXT DEFAULT NULL`, - ); - try { - db.exec( - "CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending)", - ); - } catch { - /* index creation is opportunistic — fall through if backend lacks it */ - } - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 23, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 24) { - // ADR-011 P2 (SF ADR): the third escalation flag for the - // continueWithDefault=true case — an artifact is recorded for human - // review later, but the loop is NOT paused. Mutually exclusive with - // escalation_pending (the writer flips one or the other). - ensureColumn( - db, - "tasks", - "escalation_awaiting_review", - `ALTER TABLE tasks ADD COLUMN escalation_awaiting_review INTEGER NOT NULL DEFAULT 0`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 24, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 25) { - // SF ADR-011 P2 carry-forward: when an escalation is resolved, the user's - // choice should be visible to the next execute-task agent in the same - // slice. escalation_override_applied=0 marks "resolved but not yet - // injected into a downstream prompt"; the prompt builder calls - // claimEscalationOverride which atomically flips it to 1 (idempotent - // race-safe claim). Per-task granularity so multi-task slices can - // carry multiple resolved escalations forward independently. - ensureColumn( - db, - "tasks", - "escalation_override_applied", - `ALTER TABLE tasks ADD COLUMN escalation_override_applied INTEGER NOT NULL DEFAULT 0`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 25, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 26) { - db.exec(` - CREATE TABLE IF NOT EXISTS uok_runs ( - run_id TEXT PRIMARY KEY, - session_id TEXT DEFAULT NULL, - path TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'started', - started_at TEXT NOT NULL, - ended_at TEXT DEFAULT NULL, - error TEXT DEFAULT NULL, - flags_json TEXT NOT NULL DEFAULT '{}', - updated_at TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 26, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 27) { - ensureSolverEvalTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 27, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 28) { - // UOK observability: gate execution latency - // Guard: gate_runs table may not exist in minimal legacy DBs (it will be dropped in v58) - if (tableExists(db, "gate_runs")) { - ensureColumn( - db, - "gate_runs", - "duration_ms", - "ALTER TABLE gate_runs ADD COLUMN duration_ms INTEGER DEFAULT NULL", - ); - } - // UOK circuit breaker state - db.exec(` - CREATE TABLE IF NOT EXISTS gate_circuit_breakers ( - gate_id TEXT PRIMARY KEY, - state TEXT NOT NULL DEFAULT 'closed', - failure_streak INTEGER NOT NULL DEFAULT 0, - last_failure_at TEXT DEFAULT NULL, - opened_at TEXT DEFAULT NULL, - half_open_attempts INTEGER NOT NULL DEFAULT 0, - updated_at TEXT NOT NULL DEFAULT '' - ) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 28, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 29) { - ensureHeadlessRunTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 29, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 30) { - ensureSelfFeedbackTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 30, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 31) { - ensureUokMessageTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 31, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 32) { - ensureTaskCreatedAtColumn(db); - ensureSpecSchemaTables(db); - // Populate spec tables from existing spec columns in runtime tables - populateSpecTablesFromExisting(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 32, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 33) { - ensureColumn( - db, - "milestones", - "sequence", - `ALTER TABLE milestones ADD COLUMN sequence INTEGER DEFAULT 0`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 33, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 34) { - ensureTaskCreatedAtColumn(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 34, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 35) { - ensureBacklogTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 35, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 36) { - migrateCostUsdToMicroUsd(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 36, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 37) { - ensureScheduleTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 37, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 38) { - try { - db.exec( - "ALTER TABLE memories ADD COLUMN tags TEXT NOT NULL DEFAULT '[]'", - ); - } catch { - // Column may already exist on fresh DBs - } - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 38, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 39) { - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 39, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 40) { - db.exec(` - CREATE TABLE IF NOT EXISTS judgments ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - unit_id TEXT NOT NULL, - decision TEXT NOT NULL DEFAULT '', - alternatives_json TEXT NOT NULL DEFAULT '[]', - reasoning TEXT NOT NULL DEFAULT '', - confidence TEXT NOT NULL DEFAULT 'medium', - ts TEXT NOT NULL - ) - `); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 40, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 41) { - ensureRetrievalEvidenceTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 41, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 42) { - ensureColumn( - db, - "milestones", - "product_research_json", - `ALTER TABLE milestones ADD COLUMN product_research_json TEXT NOT NULL DEFAULT ''`, - ); - ensureColumn( - db, - "milestone_specs", - "product_research_json", - `ALTER TABLE milestone_specs ADD COLUMN product_research_json TEXT DEFAULT ''`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 42, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 43) { - db.exec(` - CREATE TABLE IF NOT EXISTS session_mode_state ( - id INTEGER PRIMARY KEY CHECK (id = 1), - work_mode TEXT NOT NULL DEFAULT 'chat', - run_control TEXT NOT NULL DEFAULT 'manual', - permission_profile TEXT NOT NULL DEFAULT 'restricted', - model_mode TEXT NOT NULL DEFAULT 'smart', - surface TEXT NOT NULL DEFAULT 'tui', - updated_at TEXT NOT NULL DEFAULT '' - ) - `); - db.exec(` - INSERT OR IGNORE INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at) - VALUES (1, 'chat', 'manual', 'restricted', 'smart', 'tui', datetime('now')) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 43, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 44) { - ensureSpecSchemaTables(db); - ensureTaskFrontmatterColumns(db); - db.exec(` - UPDATE tasks - SET task_status = CASE status - WHEN 'complete' THEN 'done' - WHEN 'completed' THEN 'done' - WHEN 'done' THEN 'done' - WHEN 'running' THEN 'running' - WHEN 'in_progress' THEN 'running' - WHEN 'blocked' THEN 'blocked' - WHEN 'failed' THEN 'failed' - WHEN 'cancelled' THEN 'cancelled' - ELSE COALESCE(NULLIF(task_status, ''), 'todo') - END - `); - db.exec(` - UPDATE task_specs - SET risk = COALESCE((SELECT tasks.risk FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), risk), - mutation_scope = COALESCE((SELECT tasks.mutation_scope FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), mutation_scope), - verification_type = COALESCE((SELECT tasks.verification_type FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), verification_type), - plan_approval = COALESCE((SELECT tasks.plan_approval FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), plan_approval), - estimated_effort = COALESCE((SELECT tasks.estimated_effort FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), estimated_effort), - dependencies = COALESCE((SELECT tasks.dependencies FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), dependencies), - blocks_parallel = COALESCE((SELECT tasks.blocks_parallel FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), blocks_parallel), - requires_user_input = COALESCE((SELECT tasks.requires_user_input FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), requires_user_input), - auto_retry = COALESCE((SELECT tasks.auto_retry FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), auto_retry), - max_retries = COALESCE((SELECT tasks.max_retries FROM tasks - WHERE tasks.milestone_id = task_specs.milestone_id - AND tasks.slice_id = task_specs.slice_id - AND tasks.id = task_specs.task_id), max_retries) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 44, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 45) { - ensureTaskSchedulerTable(db); - db.exec(` - INSERT OR IGNORE INTO task_scheduler ( - milestone_id, slice_id, task_id, status, updated_at - ) - SELECT milestone_id, slice_id, id, 'queued', datetime('now') - FROM tasks - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 45, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 46) { - // validation_runs: mirrors droid's validation-contract.md + validation-state.json - // pattern. Each run stores the contract spec inline and its execution state. - db.exec(` - CREATE TABLE IF NOT EXISTS validation_runs ( - run_id TEXT PRIMARY KEY, - milestone_id TEXT NOT NULL, - slice_id TEXT, - task_id TEXT, - contract TEXT NOT NULL DEFAULT '', - status TEXT NOT NULL DEFAULT 'pending', - verdict TEXT NOT NULL DEFAULT '', - rationale TEXT NOT NULL DEFAULT '', - findings TEXT NOT NULL DEFAULT '', - started_at TEXT, - completed_at TEXT, - created_at TEXT NOT NULL, - FOREIGN KEY (milestone_id) REFERENCES milestones(id) - ) - `); - db.exec(` - CREATE INDEX IF NOT EXISTS idx_validation_runs_scope - ON validation_runs(milestone_id, slice_id, task_id) - `); - db.exec(` - CREATE VIEW IF NOT EXISTS latest_validation_state AS - SELECT vr.* - FROM validation_runs vr - WHERE vr.rowid = ( - SELECT MAX(v2.rowid) - FROM validation_runs v2 - WHERE v2.milestone_id = vr.milestone_id - AND v2.slice_id IS vr.slice_id - AND v2.task_id IS vr.task_id - ) - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 46, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 47) { - // Drop unused superseded_by column from validation_runs. - // The column was never written or queried — dead schema from v46. - const cols = db - .prepare("PRAGMA table_info(validation_runs)") - .all() - .map((c) => c.name); - if (cols.includes("superseded_by")) { - db.exec("ALTER TABLE validation_runs DROP COLUMN superseded_by"); - } - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 47, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 48) { - // Session layer: create tables, backfill from existing headless_runs and - // audit_turn_index so historical data is queryable from day one. - // Message text will be NULL for backfilled turns — it was never stored. - ensureSessionTables(db); - // Backfill: one session per headless run. - db.exec(` - INSERT OR IGNORE INTO sessions (session_id, trace_id, mode, cwd, created_at, updated_at) - SELECT run_id, NULL, 'headless', '', created_at, updated_at - FROM headless_runs - `); - // Backfill: one session per distinct trace_id in audit_turn_index. - // Reconstruct created_at/updated_at from the min/max timestamps. - db.exec(` - INSERT OR IGNORE INTO sessions (session_id, trace_id, mode, cwd, created_at, updated_at) - SELECT trace_id, trace_id, 'interactive', - '', MIN(first_ts), MAX(last_ts) - FROM audit_turn_index - GROUP BY trace_id - `); - // Backfill: one turn row per (trace_id, turn_id) in audit_turn_index. - // turn_index derived from row order within trace; message text is NULL. - db.exec(` - INSERT OR IGNORE INTO turns (session_id, turn_index, user_message, assistant_response, ts) - SELECT - trace_id, - ROW_NUMBER() OVER (PARTITION BY trace_id ORDER BY first_ts) - 1, - NULL, NULL, - first_ts - FROM audit_turn_index - `); - // Rebuild FTS index from any turns that have text. - // None from backfill yet, but required so the FTS table is consistent. - db.exec(`INSERT INTO turns_fts(turns_fts) VALUES ('rebuild')`); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 48, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 49) { - // Add session_snapshots table — checkpoints before irreversible ops. - // Safe to call on fresh DBs too (CREATE TABLE IF NOT EXISTS). - ensureSessionSnapshotTable(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 49, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 50) { - // Add sleeptime_consolidation_queue — decouples memory consolidation - // from the conversation turn so the daemon can drain it asynchronously. - ensureSleeptimeQueueTable(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 50, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 51) { - // Add deploy/smoke/release/rollback tables — closes the vision→production loop. - // deploy_runs tracks each deployment attempt; smoke_results tracks live verification; - // release_records tracks version bumps and publishes; rollback_runs tracks reversions. - ensureDeployTables(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 51, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 52) { - // Add triage_runs/evals/items/skills, runtime_counters, and - // validation_attention_markers tables — migrate JSONL structured state to DB. - ensureTriageTables(db); - ensureRuntimeCounterTable(db); - ensureValidationAttentionMarkersTable(db); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 52, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 53) { - // Add routing_history and routing_feedback tables — migrate file-based - // routing history to DB-first storage. - db.exec(` - CREATE TABLE IF NOT EXISTS routing_history ( - pattern TEXT NOT NULL, - tier TEXT NOT NULL, - success_count INTEGER NOT NULL DEFAULT 0, - fail_count INTEGER NOT NULL DEFAULT 0, - updated_at TEXT NOT NULL, - PRIMARY KEY (pattern, tier) - ); - CREATE TABLE IF NOT EXISTS routing_feedback ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - pattern TEXT NOT NULL, - tier TEXT NOT NULL, - feedback TEXT NOT NULL, - recorded_at TEXT NOT NULL - ); - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 53, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 54) { - // Migrate metrics ledger from .sf/runtime/metrics.json to DB-first - // unit_metrics and project_metrics_meta tables. - db.exec(` - CREATE TABLE IF NOT EXISTS unit_metrics ( - type TEXT NOT NULL, - id TEXT NOT NULL, - started_at INTEGER NOT NULL, - finished_at INTEGER NOT NULL, - model TEXT NOT NULL, - auto_session_key TEXT, - tokens_input INTEGER NOT NULL DEFAULT 0, - tokens_output INTEGER NOT NULL DEFAULT 0, - tokens_cache_read INTEGER NOT NULL DEFAULT 0, - tokens_cache_write INTEGER NOT NULL DEFAULT 0, - tokens_total INTEGER NOT NULL DEFAULT 0, - cost REAL NOT NULL DEFAULT 0, - tool_calls INTEGER NOT NULL DEFAULT 0, - assistant_messages INTEGER NOT NULL DEFAULT 0, - user_messages INTEGER NOT NULL DEFAULT 0, - api_requests INTEGER NOT NULL DEFAULT 0, - tier TEXT, - model_downgraded INTEGER, - context_window_tokens INTEGER, - truncation_sections INTEGER, - continue_here_fired INTEGER, - prompt_char_count INTEGER, - baseline_char_count INTEGER, - cache_hit_rate INTEGER, - skills TEXT, - PRIMARY KEY (type, id, started_at) - ); - CREATE TABLE IF NOT EXISTS project_metrics_meta ( - key TEXT PRIMARY KEY, - value TEXT NOT NULL - ); - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 54, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 55) { - // Schema v55: composite index for audit_events + task access-pattern views - // Guard: audit_events may not exist in minimal legacy DBs (it will be dropped in v58) - if (tableExists(db, "audit_events")) { - db.exec( - `CREATE INDEX IF NOT EXISTS idx_audit_events_category ON audit_events(category, type, ts DESC)`, - ); - } - db.exec( - `CREATE VIEW IF NOT EXISTS active_tasks AS SELECT * FROM tasks WHERE status NOT IN ('done','complete','completed','cancelled')`, - ); - db.exec(` - CREATE VIEW IF NOT EXISTS v_task_full AS - SELECT t.*, ts.spec_version, ts.verify AS spec_verify, - ts.inputs AS spec_inputs, ts.expected_output AS spec_expected_output - FROM tasks t - LEFT JOIN task_specs ts - ON t.milestone_id = ts.milestone_id - AND t.slice_id = ts.slice_id - AND t.id = ts.task_id - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 55, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 56) { - // Schema v56: move metrics table to dedicated metrics.db — drop from main DB - // to eliminate WAL pressure from high-frequency telemetry writes. - db.exec(`DROP TABLE IF EXISTS metrics`); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 56, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 57) { - // Schema v57: add archived_at to sessions for soft-delete / archive support. - db.exec(`ALTER TABLE sessions ADD COLUMN archived_at TEXT DEFAULT NULL`); - db.exec( - `CREATE INDEX IF NOT EXISTS idx_sessions_archived ON sessions(archived_at) WHERE archived_at IS NOT NULL`, - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 57, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 58) { - // Schema v58: move trace data to JSONL files — drop gate_runs, turn_git_transactions, audit_events - db.exec("DROP TABLE IF EXISTS gate_runs"); - db.exec("DROP TABLE IF EXISTS turn_git_transactions"); - db.exec("DROP TABLE IF EXISTS audit_events"); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 58, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 59) { - // Schema v59: add failure_mode to llm_task_outcomes so the learning system - // can differentiate transient failures (rate_limit) from hard failures - // (quota_exhausted, auth_error) when weighting model demotions. - ensureColumn( - db, - "llm_task_outcomes", - "failure_mode", - "ALTER TABLE llm_task_outcomes ADD COLUMN failure_mode TEXT DEFAULT NULL", - ); - db.exec( - "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_failure_mode ON llm_task_outcomes(model_id, failure_mode, recorded_at DESC)", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 59, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 60) { - // Schema v60: add frontmatter_version to tasks table for future frontmatter - // schema migrations. Defaults to 1 for all existing rows. - ensureColumn( - db, - "tasks", - "frontmatter_version", - "ALTER TABLE tasks ADD COLUMN frontmatter_version INTEGER NOT NULL DEFAULT 1", - ); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 60, - ":applied_at": new Date().toISOString(), - }); - } - if (currentVersion < 61) { - // Schema v61: intent_chapters — crash-resume context for autonomous units. - // Each chapter records the agent's declared intent when a unit begins - // (chapter_open) and clears it on normal close (chapter_close). On - // crash-resume, the open chapter is surfaced to the prompt so the agent - // knows where it left off without replaying the full transcript. - db.exec(` - CREATE TABLE IF NOT EXISTS intent_chapters ( - id TEXT PRIMARY KEY, - unit_type TEXT NOT NULL, - unit_id TEXT NOT NULL, - milestone_id TEXT, - slice_id TEXT, - task_id TEXT, - intent TEXT NOT NULL, - opened_at TEXT NOT NULL, - closed_at TEXT, - outcome TEXT, - metadata_json TEXT - ); - CREATE INDEX IF NOT EXISTS idx_intent_chapters_unit - ON intent_chapters(unit_type, unit_id); - CREATE INDEX IF NOT EXISTS idx_intent_chapters_open - ON intent_chapters(closed_at, opened_at) - WHERE closed_at IS NULL; - `); - db.prepare( - "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", - ).run({ - ":version": 61, - ":applied_at": new Date().toISOString(), - }); - } - db.exec("COMMIT"); - } catch (err) { - db.exec("ROLLBACK"); - throw err; - } -} -let currentDb = null; -let currentPath = null; -let currentPid = 0; -let _exitHandlerRegistered = false; -let _dbOpenAttempted = false; -/** - * Get the name of the SQLite provider currently loaded (or null if unavailable). - */ -export function getDbProvider() { - loadProvider(); - return "node:sqlite"; -} -/** - * Check if the database is currently open and available for queries. - */ -export function isDbAvailable() { - return currentDb !== null; -} -/** - * Returns true if openDatabase() has been called at least once this session. - * Used to distinguish "DB not yet initialized" from "DB genuinely unavailable" - * so that early callers (e.g. before_agent_start context injection) don't - * trigger a false degraded-mode warning. - */ -export function wasDbOpenAttempted() { - return _dbOpenAttempted; -} -/** - * Get the current database adapter, or null if the database is not open. - */ -export function getDatabase() { - return currentDb; -} -/** - * Open the database at the specified path. Returns true if successful. - */ -export function openDatabase(path) { - _dbOpenAttempted = true; - if (currentDb && currentPath !== path) closeDatabase(); - if (currentDb && currentPath === path) return true; - const rawDb = openRawDb(path); - if (!rawDb) return false; - const adapter = createAdapter(rawDb); - const fileBacked = path !== ":memory:"; - try { - initSchema(adapter, fileBacked); - createDatabaseSnapshot(rawDb, path); - performDatabaseMaintenance(rawDb, path); - } catch (err) { - // Corrupt freelist: DDL fails with "malformed" but VACUUM can rebuild. - // Attempt VACUUM recovery before giving up (see #2519). - if ( - fileBacked && - err instanceof Error && - err.message?.includes("malformed") - ) { - try { - adapter.exec("VACUUM"); - initSchema(adapter, fileBacked); - process.stderr.write("sf-db: recovered corrupt database via VACUUM\n"); - } catch (retryErr) { - try { - adapter.close(); - } catch (e) { - logWarning("db", `close after VACUUM failed: ${e.message}`); - } - throw retryErr; - } - } else { - try { - adapter.close(); - } catch (e) { - logWarning("db", `close after VACUUM failed: ${e.message}`); - } - throw err; - } - } - currentDb = adapter; - currentPath = path; - currentPid = process.pid; - if (!_exitHandlerRegistered) { - _exitHandlerRegistered = true; - process.on("exit", () => { - try { - closeDatabase(); - } catch (e) { - logWarning("db", `exit handler close failed: ${e.message}`); - } - }); - } - return true; -} -/** - * Flush the WAL to the main DB file using a PASSIVE checkpoint. - * - * Purpose: safely persist all committed transactions to the main DB file at - * controlled loop boundaries (post-unit finalize). With wal_autocheckpoint=0, - * this is the only way WAL pages are flushed — keeping the checkpoint window - * predictable and crash-safe (no mid-operation checkpoint that an OOM kill - * could interrupt). - * - * PASSIVE is used (not TRUNCATE) so concurrent readers are not blocked. The - * WAL is truncated on close via closeDatabase(). - * - * Consumer: runFinalize() in auto/phases.js after each successful unit. - */ -export function checkpointWal() { - if (!currentDb) return; - try { - currentDb.exec("PRAGMA wal_checkpoint(PASSIVE)"); - } catch (e) { - logWarning( - "db", - `WAL checkpoint failed: ${e instanceof Error ? e.message : String(e)}`, - ); - } -} - -/** - * Close the database connection. - */ -export function closeDatabase() { - if (currentDb) { - try { - currentDb.exec("PRAGMA wal_checkpoint(TRUNCATE)"); - } catch (e) { - logWarning("db", `WAL checkpoint failed: ${e.message}`); - } - try { - // Incremental vacuum to reclaim space without blocking - currentDb.exec("PRAGMA incremental_vacuum(64)"); - } catch (e) { - logWarning("db", `incremental vacuum failed: ${e.message}`); - } - try { - currentDb.close(); - } catch (e) { - logWarning("db", `database close failed: ${e.message}`); - } - currentDb = null; - currentPath = null; - currentPid = 0; - _dbOpenAttempted = false; - } -} -/** Run a full VACUUM — call sparingly (e.g. after milestone completion). */ -/** - * Vacuum the database to reclaim disk space and optimize. - */ -export function vacuumDatabase() { - if (!currentDb) return; - try { - currentDb.exec("VACUUM"); - } catch (e) { - logWarning("db", `VACUUM failed: ${e.message}`); - } -} -let _txDepth = 0; -/** - * Execute a callback within a database transaction (BEGIN...COMMIT or ROLLBACK). - */ -export function transaction(fn) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - // Re-entrant: if already inside a transaction, just run fn() without - // starting a new one. SQLite does not support nested BEGIN/COMMIT. - if (_txDepth > 0) { - _txDepth++; - try { - return fn(); - } finally { - _txDepth--; - } - } - _txDepth++; - currentDb.exec("BEGIN"); - try { - const result = fn(); - currentDb.exec("COMMIT"); - return result; - } catch (err) { - currentDb.exec("ROLLBACK"); - throw err; - } finally { - _txDepth--; - } -} -/** - * Wrap a block of reads in a DEFERRED transaction so that all SELECTs observe - * a consistent snapshot of the DB even if a concurrent writer commits between - * them. Use this for multi-query read flows (e.g. tool executors that query - * milestone + slices + counts and want one snapshot). Re-entrant — if already - * inside a transaction, runs fn() without starting a nested one. - */ -/** - * Execute a callback within a read-only database transaction. - */ -export function readTransaction(fn) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - if (_txDepth > 0) { - _txDepth++; - try { - return fn(); - } finally { - _txDepth--; - } - } - _txDepth++; - currentDb.exec("BEGIN DEFERRED"); - try { - const result = fn(); - currentDb.exec("COMMIT"); - return result; - } catch (err) { - try { - currentDb.exec("ROLLBACK"); - } catch (rollbackErr) { - // A failed ROLLBACK after a failed read is a split-brain signal — - // the transaction is in an indeterminate state. Surface it via the - // logger instead of swallowing it. - logError("db", "snapshotState ROLLBACK failed", { - error: rollbackErr.message, - }); - } - throw err; - } finally { - _txDepth--; - } -} -export function insertDecision(d) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) - VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by)`) - .run({ - ":id": d.id, - ":when_context": d.when_context, - ":scope": d.scope, - ":decision": d.decision, - ":choice": d.choice, - ":rationale": d.rationale, - ":revisable": d.revisable, - ":made_by": d.made_by ?? "agent", - ":superseded_by": d.superseded_by, - }); -} -export function getDecisionById(id) { - if (!currentDb) return null; - const row = currentDb.prepare("SELECT * FROM decisions WHERE id = ?").get(id); - if (!row) return null; - return { - seq: row["seq"], - id: row["id"], - when_context: row["when_context"], - scope: row["scope"], - decision: row["decision"], - choice: row["choice"], - rationale: row["rationale"], - revisable: row["revisable"], - made_by: row["made_by"] ?? "agent", - superseded_by: row["superseded_by"] ?? null, - }; -} -export function getActiveDecisions() { - if (!currentDb) return []; - const rows = currentDb.prepare("SELECT * FROM active_decisions").all(); - return rows.map((row) => ({ - seq: row["seq"], - id: row["id"], - when_context: row["when_context"], - scope: row["scope"], - decision: row["decision"], - choice: row["choice"], - rationale: row["rationale"], - revisable: row["revisable"], - made_by: row["made_by"] ?? "agent", - superseded_by: null, - })); -} -export function insertRequirement(r) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by) - VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`) - .run({ - ":id": r.id, - ":class": r.class, - ":status": r.status, - ":description": r.description, - ":why": r.why, - ":source": r.source, - ":primary_owner": r.primary_owner, - ":supporting_slices": r.supporting_slices, - ":validation": r.validation, - ":notes": r.notes, - ":full_content": r.full_content, - ":superseded_by": r.superseded_by, - }); -} -export function getRequirementById(id) { - if (!currentDb) return null; - const row = currentDb - .prepare("SELECT * FROM requirements WHERE id = ?") - .get(id); - if (!row) return null; - return { - id: row["id"], - class: row["class"], - status: row["status"], - description: row["description"], - why: row["why"], - source: row["source"], - primary_owner: row["primary_owner"], - supporting_slices: row["supporting_slices"], - validation: row["validation"], - notes: row["notes"], - full_content: row["full_content"], - superseded_by: row["superseded_by"] ?? null, - }; -} -export function getActiveRequirements() { - if (!currentDb) return []; - const rows = currentDb.prepare("SELECT * FROM active_requirements").all(); - return rows.map((row) => ({ - id: row["id"], - class: row["class"], - status: row["status"], - description: row["description"], - why: row["why"], - source: row["source"], - primary_owner: row["primary_owner"], - supporting_slices: row["supporting_slices"], - validation: row["validation"], - notes: row["notes"], - full_content: row["full_content"], - superseded_by: null, - })); -} -export function getDbOwnerPid() { - return currentPid; -} -export function getDbPath() { - return currentPath; -} - -/** - * Load persisted session mode state from DB. - * - * Purpose: restore mode state across session restarts. - * - * Consumer: AutoSession initialization. - */ -export function loadSessionModeState() { - if (!currentDb) return null; - try { - const row = currentDb - .prepare("SELECT * FROM session_mode_state WHERE id = 1") - .get(); - if (!row) return null; - return { - workMode: row["work_mode"] ?? "chat", - runControl: row["run_control"] ?? "manual", - permissionProfile: row["permission_profile"] ?? "restricted", - modelMode: row["model_mode"] ?? "smart", - surface: row["surface"] ?? "tui", - updatedAt: row["updated_at"] ?? null, - }; - } catch { - return null; - } -} - -/** - * Persist the current session mode into the project database. - * - * Purpose: keep work mode, run control, permission profile, and model mode - * stable across reload/resume without letting command handlers write SQL. - * - * Consumer: AutoSession.setMode() after validated mode transitions. - */ -export function saveSessionModeState(mode) { - if (!currentDb) return false; - currentDb - .prepare(` - INSERT INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at) - VALUES (1, :workMode, :runControl, :permissionProfile, :modelMode, :surface, :updatedAt) - ON CONFLICT(id) DO UPDATE SET - work_mode = excluded.work_mode, - run_control = excluded.run_control, - permission_profile = excluded.permission_profile, - model_mode = excluded.model_mode, - surface = excluded.surface, - updated_at = excluded.updated_at - `) - .run({ - ":workMode": mode.workMode, - ":runControl": mode.runControl, - ":permissionProfile": mode.permissionProfile, - ":modelMode": mode.modelMode, - ":surface": mode.surface ?? "tui", - ":updatedAt": mode.updatedAt ?? new Date().toISOString(), - }); - return true; -} - -export function _getAdapter() { - return currentDb; -} -export function _resetProvider() { - loadAttempted = false; -} -export function upsertDecision(d) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - // Use ON CONFLICT DO UPDATE instead of INSERT OR REPLACE to preserve the - // seq column. INSERT OR REPLACE deletes then reinserts, resetting seq and - // corrupting decision ordering in DECISIONS.md after reconcile replay. - currentDb - .prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) - VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by) - ON CONFLICT(id) DO UPDATE SET - when_context = excluded.when_context, - scope = excluded.scope, - decision = excluded.decision, - choice = excluded.choice, - rationale = excluded.rationale, - revisable = excluded.revisable, - made_by = excluded.made_by, - superseded_by = excluded.superseded_by`) - .run({ - ":id": d.id, - ":when_context": d.when_context, - ":scope": d.scope, - ":decision": d.decision, - ":choice": d.choice, - ":rationale": d.rationale, - ":revisable": d.revisable, - ":made_by": d.made_by ?? "agent", - ":superseded_by": d.superseded_by ?? null, - }); -} -export function upsertRequirement(r) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR REPLACE INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by) - VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`) - .run({ - ":id": r.id, - ":class": r.class, - ":status": r.status, - ":description": r.description, - ":why": r.why, - ":source": r.source, - ":primary_owner": r.primary_owner, - ":supporting_slices": r.supporting_slices, - ":validation": r.validation, - ":notes": r.notes, - ":full_content": r.full_content, - ":superseded_by": r.superseded_by ?? null, - }); -} -export function clearArtifacts() { - if (!currentDb) return; - try { - currentDb.exec("DELETE FROM artifacts"); - } catch (e) { - logWarning("db", `clearArtifacts failed: ${e.message}`); - } -} -export function insertArtifact(a) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR REPLACE INTO artifacts (path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at) - VALUES (:path, :artifact_type, :milestone_id, :slice_id, :task_id, :full_content, :imported_at)`) - .run({ - ":path": a.path, - ":artifact_type": a.artifact_type, - ":milestone_id": a.milestone_id, - ":slice_id": a.slice_id, - ":task_id": a.task_id, - ":full_content": a.full_content, - ":imported_at": new Date().toISOString(), - }); -} -export function insertMilestone(m) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO milestones ( - id, title, status, depends_on, created_at, - vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, sequence - ) VALUES ( - :id, :title, :status, :depends_on, :created_at, - :vision, :success_criteria, :key_risks, :proof_strategy, - :verification_contract, :verification_integration, :verification_operational, :verification_uat, - :definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, :sequence - )`) - .run({ - ":id": m.id, - ":title": m.title ?? "", - // Default to "queued" — never auto-create milestones as "active" (#3380). - // Callers that need "active" must pass it explicitly. - ":status": m.status ?? "queued", - ":depends_on": JSON.stringify(m.depends_on ?? []), - ":created_at": new Date().toISOString(), - ":vision": m.planning?.vision ?? "", - ":success_criteria": JSON.stringify(m.planning?.successCriteria ?? []), - ":key_risks": JSON.stringify(m.planning?.keyRisks ?? []), - ":proof_strategy": JSON.stringify(m.planning?.proofStrategy ?? []), - ":verification_contract": m.planning?.verificationContract ?? "", - ":verification_integration": m.planning?.verificationIntegration ?? "", - ":verification_operational": m.planning?.verificationOperational ?? "", - ":verification_uat": m.planning?.verificationUat ?? "", - ":definition_of_done": JSON.stringify(m.planning?.definitionOfDone ?? []), - ":requirement_coverage": m.planning?.requirementCoverage ?? "", - ":boundary_map_markdown": m.planning?.boundaryMapMarkdown ?? "", - ":vision_meeting_json": m.planning?.visionMeeting - ? JSON.stringify(m.planning.visionMeeting) - : "", - ":product_research_json": m.planning?.productResearch - ? JSON.stringify(m.planning.productResearch) - : "", - ":sequence": m.sequence ?? 0, - }); - if (hasPlanningPayload(m.planning)) { - insertMilestoneSpecIfAbsent(m.id, m.planning ?? {}); - } -} -function insertMilestoneSpecIfAbsent(milestoneId, planning = {}) { - if (!hasPlanningPayload(planning)) return; - const existing = currentDb - .prepare("SELECT * FROM milestone_specs WHERE id = ?") - .get(milestoneId); - if (existing && !isEmptyMilestoneSpec(existing)) return; - const params = { - ":id": milestoneId, - ":vision": planning.vision ?? "", - ":success_criteria": JSON.stringify(planning.successCriteria ?? []), - ":key_risks": JSON.stringify(planning.keyRisks ?? []), - ":proof_strategy": JSON.stringify(planning.proofStrategy ?? []), - ":verification_contract": planning.verificationContract ?? "", - ":verification_integration": planning.verificationIntegration ?? "", - ":verification_operational": planning.verificationOperational ?? "", - ":verification_uat": planning.verificationUat ?? "", - ":definition_of_done": JSON.stringify(planning.definitionOfDone ?? []), - ":requirement_coverage": planning.requirementCoverage ?? "", - ":boundary_map_markdown": planning.boundaryMapMarkdown ?? "", - ":vision_meeting_json": planning.visionMeeting - ? JSON.stringify(planning.visionMeeting) - : "", - ":product_research_json": planning.productResearch - ? JSON.stringify(planning.productResearch) - : "", - ":created_at": new Date().toISOString(), - }; - if (existing) { - currentDb - .prepare(`UPDATE milestone_specs SET - vision = :vision, - success_criteria = :success_criteria, - key_risks = :key_risks, - proof_strategy = :proof_strategy, - verification_contract = :verification_contract, - verification_integration = :verification_integration, - verification_operational = :verification_operational, - verification_uat = :verification_uat, - definition_of_done = :definition_of_done, - requirement_coverage = :requirement_coverage, - boundary_map_markdown = :boundary_map_markdown, - vision_meeting_json = :vision_meeting_json, - product_research_json = :product_research_json - WHERE id = :id`) - .run(params); - return; - } - currentDb - .prepare(`INSERT OR IGNORE INTO milestone_specs ( - id, vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, - spec_version, created_at - ) VALUES ( - :id, :vision, :success_criteria, :key_risks, :proof_strategy, - :verification_contract, :verification_integration, :verification_operational, :verification_uat, - :definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, - 1, :created_at - )`) - .run(params); -} -export function upsertMilestonePlanning(milestoneId, planning) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - insertMilestoneSpecIfAbsent(milestoneId, planning); - currentDb - .prepare(`UPDATE milestones SET - title = COALESCE(NULLIF(:title, ''), title), - status = COALESCE(NULLIF(:status, ''), status), - vision = COALESCE(:vision, vision), - success_criteria = COALESCE(:success_criteria, success_criteria), - key_risks = COALESCE(:key_risks, key_risks), - proof_strategy = COALESCE(:proof_strategy, proof_strategy), - verification_contract = COALESCE(:verification_contract, verification_contract), - verification_integration = COALESCE(:verification_integration, verification_integration), - verification_operational = COALESCE(:verification_operational, verification_operational), - verification_uat = COALESCE(:verification_uat, verification_uat), - definition_of_done = COALESCE(:definition_of_done, definition_of_done), - requirement_coverage = COALESCE(:requirement_coverage, requirement_coverage), - boundary_map_markdown = COALESCE(:boundary_map_markdown, boundary_map_markdown), - vision_meeting_json = COALESCE(:vision_meeting_json, vision_meeting_json), - product_research_json = COALESCE(:product_research_json, product_research_json) - WHERE id = :id`) - .run({ - ":id": milestoneId, - ":title": planning.title ?? "", - ":status": planning.status ?? "", - ":vision": planning.vision ?? null, - ":success_criteria": planning.successCriteria - ? JSON.stringify(planning.successCriteria) - : null, - ":key_risks": planning.keyRisks - ? JSON.stringify(planning.keyRisks) - : null, - ":proof_strategy": planning.proofStrategy - ? JSON.stringify(planning.proofStrategy) - : null, - ":verification_contract": planning.verificationContract ?? null, - ":verification_integration": planning.verificationIntegration ?? null, - ":verification_operational": planning.verificationOperational ?? null, - ":verification_uat": planning.verificationUat ?? null, - ":definition_of_done": planning.definitionOfDone - ? JSON.stringify(planning.definitionOfDone) - : null, - ":requirement_coverage": planning.requirementCoverage ?? null, - ":boundary_map_markdown": planning.boundaryMapMarkdown ?? null, - ":vision_meeting_json": planning.visionMeeting - ? JSON.stringify(planning.visionMeeting) - : null, - ":product_research_json": planning.productResearch - ? JSON.stringify(planning.productResearch) - : null, - }); -} -export function insertSlice(s) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO slices ( - milestone_id, id, title, status, risk, depends, demo, created_at, - goal, success_criteria, proof_level, integration_closure, observability_impact, - adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, sequence, - is_sketch, sketch_scope - ) VALUES ( - :milestone_id, :id, :title, :status, :risk, :depends, :demo, :created_at, - :goal, :success_criteria, :proof_level, :integration_closure, :observability_impact, - :adversarial_partner, :adversarial_combatant, :adversarial_architect, :planning_meeting_json, :sequence, - :is_sketch, :sketch_scope - ) - ON CONFLICT (milestone_id, id) DO UPDATE SET - title = CASE WHEN :raw_title IS NOT NULL THEN excluded.title ELSE slices.title END, - status = CASE WHEN slices.status IN ('complete', 'done') THEN slices.status ELSE excluded.status END, - risk = CASE WHEN :raw_risk IS NOT NULL THEN excluded.risk ELSE slices.risk END, - depends = excluded.depends, - demo = CASE WHEN :raw_demo IS NOT NULL THEN excluded.demo ELSE slices.demo END, - goal = CASE WHEN :raw_goal IS NOT NULL THEN excluded.goal ELSE slices.goal END, - success_criteria = CASE WHEN :raw_success_criteria IS NOT NULL THEN excluded.success_criteria ELSE slices.success_criteria END, - proof_level = CASE WHEN :raw_proof_level IS NOT NULL THEN excluded.proof_level ELSE slices.proof_level END, - integration_closure = CASE WHEN :raw_integration_closure IS NOT NULL THEN excluded.integration_closure ELSE slices.integration_closure END, - observability_impact = CASE WHEN :raw_observability_impact IS NOT NULL THEN excluded.observability_impact ELSE slices.observability_impact END, - adversarial_partner = CASE WHEN :raw_adversarial_partner IS NOT NULL THEN excluded.adversarial_partner ELSE slices.adversarial_partner END, - adversarial_combatant = CASE WHEN :raw_adversarial_combatant IS NOT NULL THEN excluded.adversarial_combatant ELSE slices.adversarial_combatant END, - adversarial_architect = CASE WHEN :raw_adversarial_architect IS NOT NULL THEN excluded.adversarial_architect ELSE slices.adversarial_architect END, - planning_meeting_json = CASE WHEN :raw_planning_meeting_json IS NOT NULL THEN excluded.planning_meeting_json ELSE slices.planning_meeting_json END, - sequence = CASE WHEN :raw_sequence IS NOT NULL THEN excluded.sequence ELSE slices.sequence END, - is_sketch = CASE WHEN :raw_is_sketch IS NOT NULL THEN excluded.is_sketch ELSE slices.is_sketch END, - sketch_scope = CASE WHEN :raw_sketch_scope IS NOT NULL THEN excluded.sketch_scope ELSE slices.sketch_scope END`) - .run({ - ":milestone_id": s.milestoneId, - ":id": s.id, - ":title": s.title ?? "", - ":status": s.status ?? "pending", - ":risk": s.risk ?? "medium", - ":depends": JSON.stringify(s.depends ?? []), - ":demo": s.demo ?? "", - ":created_at": new Date().toISOString(), - ":goal": s.planning?.goal ?? "", - ":success_criteria": s.planning?.successCriteria ?? "", - ":proof_level": s.planning?.proofLevel ?? "", - ":integration_closure": s.planning?.integrationClosure ?? "", - ":observability_impact": s.planning?.observabilityImpact ?? "", - ":adversarial_partner": s.planning?.adversarialReview?.partner ?? "", - ":adversarial_combatant": s.planning?.adversarialReview?.combatant ?? "", - ":adversarial_architect": s.planning?.adversarialReview?.architect ?? "", - ":planning_meeting_json": s.planning?.planningMeeting - ? JSON.stringify(s.planning.planningMeeting) - : "", - ":sequence": s.sequence ?? 0, - ":is_sketch": s.isSketch === true ? 1 : 0, - ":sketch_scope": s.sketchScope ?? "", - // Raw sentinel params: NULL when caller omitted the field, used in ON CONFLICT guards - ":raw_title": s.title ?? null, - ":raw_risk": s.risk ?? null, - ":raw_demo": s.demo ?? null, - ":raw_goal": s.planning?.goal ?? null, - ":raw_success_criteria": s.planning?.successCriteria ?? null, - ":raw_proof_level": s.planning?.proofLevel ?? null, - ":raw_integration_closure": s.planning?.integrationClosure ?? null, - ":raw_observability_impact": s.planning?.observabilityImpact ?? null, - ":raw_adversarial_partner": - s.planning?.adversarialReview?.partner ?? null, - ":raw_adversarial_combatant": - s.planning?.adversarialReview?.combatant ?? null, - ":raw_adversarial_architect": - s.planning?.adversarialReview?.architect ?? null, - ":raw_planning_meeting_json": s.planning?.planningMeeting - ? JSON.stringify(s.planning.planningMeeting) - : null, - ":raw_sequence": s.sequence ?? null, - ":raw_is_sketch": s.isSketch === undefined ? null : s.isSketch ? 1 : 0, - ":raw_sketch_scope": s.sketchScope === undefined ? null : s.sketchScope, - }); - insertSliceSpecIfAbsent(s.milestoneId, s.id, s.planning ?? {}); -} -function insertSliceSpecIfAbsent(milestoneId, sliceId, planning = {}) { - currentDb - .prepare(`INSERT OR IGNORE INTO slice_specs ( - milestone_id, slice_id, goal, success_criteria, proof_level, - integration_closure, observability_impact, - adversarial_partner, adversarial_combatant, adversarial_architect, - planning_meeting_json, spec_version, created_at - ) VALUES ( - :milestone_id, :slice_id, :goal, :success_criteria, :proof_level, - :integration_closure, :observability_impact, - :adversarial_partner, :adversarial_combatant, :adversarial_architect, - :planning_meeting_json, 1, :created_at - )`) - .run({ - ":milestone_id": milestoneId, - ":slice_id": sliceId, - ":goal": planning.goal ?? "", - ":success_criteria": planning.successCriteria ?? "", - ":proof_level": planning.proofLevel ?? "", - ":integration_closure": planning.integrationClosure ?? "", - ":observability_impact": planning.observabilityImpact ?? "", - ":adversarial_partner": planning.adversarialReview?.partner ?? "", - ":adversarial_combatant": planning.adversarialReview?.combatant ?? "", - ":adversarial_architect": planning.adversarialReview?.architect ?? "", - ":planning_meeting_json": planning.planningMeeting - ? JSON.stringify(planning.planningMeeting) - : "", - ":created_at": new Date().toISOString(), - }); -} -/** - * SF ADR-011: clear the is_sketch flag after refine-slice fills in the full plan. - * Idempotent — safe to call on already-refined slices. - */ -export function clearSliceSketch(milestoneId, sliceId) { - setSliceSketchFlag(milestoneId, sliceId, false); -} -/** - * SF ADR-011: generalized sketch-flag setter — flip true or false. - * Idempotent. Use this instead of clearSliceSketch when you also need to - * mark a slice as a sketch (e.g., a re-plan flow that wants to revert to - * sketch-then-refine). - */ -export function setSliceSketchFlag(milestoneId, sliceId, isSketch) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `UPDATE slices SET is_sketch = :is_sketch WHERE milestone_id = :mid AND id = :sid`, - ) - .run({ - ":is_sketch": isSketch ? 1 : 0, - ":mid": milestoneId, - ":sid": sliceId, - }); -} -/** - * SF ADR-011 auto-heal: reconcile stale is_sketch=1 rows whose PLAN file already - * exists on disk. The caller passes a predicate that uses the canonical path - * resolver so path logic stays in one place. Safe to call repeatedly — only - * flips rows that meet the predicate. - */ -export function autoHealSketchFlags(milestoneId, hasPlanFile) { - if (!currentDb) return; - const rows = currentDb - .prepare( - `SELECT id FROM slices WHERE milestone_id = :mid AND is_sketch = 1`, - ) - .all({ ":mid": milestoneId }); - for (const row of rows) { - if (hasPlanFile(row.id)) { - setSliceSketchFlag(milestoneId, row.id, false); - } - } -} -/** - * SF ADR-011 P2: list tasks across a milestone that have an - * escalation artifact path. By default returns only ACTIVE escalations - * (pending OR awaiting_review); pass includeResolved=true to also return - * resolved-but-still-recorded entries (audit trail). - * - * Used by `/escalate list` to enumerate cross-slice escalations. - */ -export function listEscalationArtifacts(milestoneId, includeResolved = false) { - if (!currentDb) return []; - const filter = includeResolved - ? "escalation_artifact_path IS NOT NULL" - : "(escalation_pending = 1 OR escalation_awaiting_review = 1) AND escalation_artifact_path IS NOT NULL"; - const rows = currentDb - .prepare( - `SELECT * FROM tasks WHERE milestone_id = :mid AND ${filter} ORDER BY slice_id, sequence, id`, - ) - .all({ ":mid": milestoneId }); - return rows.map(rowToTask); -} -export function upsertSlicePlanning(milestoneId, sliceId, planning) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - insertSliceSpecIfAbsent(milestoneId, sliceId, planning); - currentDb - .prepare(`UPDATE slices SET - goal = COALESCE(:goal, goal), - success_criteria = COALESCE(:success_criteria, success_criteria), - proof_level = COALESCE(:proof_level, proof_level), - integration_closure = COALESCE(:integration_closure, integration_closure), - observability_impact = COALESCE(:observability_impact, observability_impact), - adversarial_partner = COALESCE(:adversarial_partner, adversarial_partner), - adversarial_combatant = COALESCE(:adversarial_combatant, adversarial_combatant), - adversarial_architect = COALESCE(:adversarial_architect, adversarial_architect), - planning_meeting_json = COALESCE(:planning_meeting_json, planning_meeting_json) - WHERE milestone_id = :milestone_id AND id = :id`) - .run({ - ":milestone_id": milestoneId, - ":id": sliceId, - ":goal": planning.goal ?? null, - ":success_criteria": planning.successCriteria ?? null, - ":proof_level": planning.proofLevel ?? null, - ":integration_closure": planning.integrationClosure ?? null, - ":observability_impact": planning.observabilityImpact ?? null, - ":adversarial_partner": planning.adversarialReview?.partner ?? null, - ":adversarial_combatant": planning.adversarialReview?.combatant ?? null, - ":adversarial_architect": planning.adversarialReview?.architect ?? null, - ":planning_meeting_json": planning.planningMeeting - ? JSON.stringify(planning.planningMeeting) - : null, - }); -} -export function insertTask(t) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO tasks ( - milestone_id, slice_id, id, title, status, task_status, one_liner, narrative, - verification_result, verification_status, duration, completed_at, blocker_discovered, - deviations, known_issues, key_files, key_decisions, full_summary_md, - description, estimate, files, verify, inputs, expected_output, observability_impact, sequence - ) VALUES ( - :milestone_id, :slice_id, :id, :title, :status, :task_status, :one_liner, :narrative, - :verification_result, :verification_status, :duration, :completed_at, :blocker_discovered, - :deviations, :known_issues, :key_files, :key_decisions, :full_summary_md, - :description, :estimate, :files, :verify, :inputs, :expected_output, :observability_impact, :sequence - ) - ON CONFLICT(milestone_id, slice_id, id) DO UPDATE SET - title = CASE WHEN NULLIF(:title, '') IS NOT NULL THEN :title ELSE tasks.title END, - status = :status, - task_status = :task_status, - one_liner = :one_liner, - narrative = :narrative, - verification_result = :verification_result, - verification_status = :verification_status, - duration = :duration, - completed_at = :completed_at, - blocker_discovered = :blocker_discovered, - deviations = :deviations, - known_issues = :known_issues, - key_files = :key_files, - key_decisions = :key_decisions, - full_summary_md = :full_summary_md, - description = CASE WHEN NULLIF(:description, '') IS NOT NULL THEN :description ELSE tasks.description END, - estimate = CASE WHEN NULLIF(:estimate, '') IS NOT NULL THEN :estimate ELSE tasks.estimate END, - files = CASE WHEN NULLIF(:files, '[]') IS NOT NULL THEN :files ELSE tasks.files END, - verify = CASE WHEN NULLIF(:verify, '') IS NOT NULL THEN :verify ELSE tasks.verify END, - inputs = CASE WHEN NULLIF(:inputs, '[]') IS NOT NULL THEN :inputs ELSE tasks.inputs END, - expected_output = CASE WHEN NULLIF(:expected_output, '[]') IS NOT NULL THEN :expected_output ELSE tasks.expected_output END, - observability_impact = CASE WHEN NULLIF(:observability_impact, '') IS NOT NULL THEN :observability_impact ELSE tasks.observability_impact END, - sequence = :sequence`) - .run({ - ":milestone_id": t.milestoneId, - ":slice_id": t.sliceId, - ":id": t.id, - ":title": t.title ?? "", - ":status": t.status ?? "pending", - ":task_status": normalizeTaskStatus(t.taskStatus ?? t.status) ?? "todo", - ":one_liner": t.oneLiner ?? "", - ":narrative": t.narrative ?? "", - ":verification_result": t.verificationResult ?? "", - ":verification_status": t.verificationStatus ?? "", - ":duration": t.duration ?? "", - ":completed_at": - t.status === "done" || t.status === "complete" - ? new Date().toISOString() - : null, - ":blocker_discovered": t.blockerDiscovered ? 1 : 0, - ":deviations": t.deviations ?? "", - ":known_issues": t.knownIssues ?? "", - ":key_files": JSON.stringify(t.keyFiles ?? []), - ":key_decisions": JSON.stringify(t.keyDecisions ?? []), - ":full_summary_md": t.fullSummaryMd ?? "", - ":description": t.planning?.description ?? "", - ":estimate": t.planning?.estimate ?? "", - ":files": JSON.stringify(t.planning?.files ?? []), - ":verify": t.planning?.verify ?? "", - ":inputs": JSON.stringify(t.planning?.inputs ?? []), - ":expected_output": JSON.stringify(t.planning?.expectedOutput ?? []), - ":observability_impact": t.planning?.observabilityImpact ?? "", - ":sequence": t.sequence ?? 0, - }); - if (hasTaskSpecIntent(t.planning)) { - insertTaskSpecIfAbsent(t.milestoneId, t.sliceId, t.id, t.planning ?? {}); - } - insertTaskSchedulerIfAbsent(t.milestoneId, t.sliceId, t.id); -} -function hasTaskSpecIntent(planning = {}) { - if (!planning || typeof planning !== "object") return false; - if (typeof planning.verify === "string" && planning.verify.trim()) - return true; - if (Array.isArray(planning.inputs) && planning.inputs.length > 0) return true; - if ( - Array.isArray(planning.expectedOutput) && - planning.expectedOutput.length > 0 - ) { - return true; - } - for (const key of [ - "risk", - "mutationScope", - "mutation_scope", - "verification", - "verificationType", - "verification_type", - "planApproval", - "plan_approval", - "estimatedEffort", - "estimated_effort", - "dependencies", - "blocksParallel", - "blocks_parallel", - "requiresUserInput", - "requires_user_input", - "autoRetry", - "auto_retry", - "maxRetries", - "max_retries", - ]) { - if (planning[key] !== undefined) return true; - } - return false; -} -function insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning = {}) { - if (!hasTaskSpecIntent(planning)) return; - const { normalized: frontmatter, errors } = - taskFrontmatterFromRecord(planning); - if (errors?.length) - logWarning( - "sf-db:insertTaskSpec", - `frontmatter validation errors for ${milestoneId}/${sliceId}/${taskId}: ${errors.join(", ")}`, - ); - currentDb - .prepare(`INSERT OR IGNORE INTO task_specs ( - milestone_id, slice_id, task_id, verify, inputs, expected_output, - risk, mutation_scope, verification_type, plan_approval, estimated_effort, - dependencies, blocks_parallel, requires_user_input, auto_retry, max_retries, - spec_version, created_at - ) VALUES ( - :milestone_id, :slice_id, :task_id, :verify, :inputs, :expected_output, - :risk, :mutation_scope, :verification_type, :plan_approval, :estimated_effort, - :dependencies, :blocks_parallel, :requires_user_input, :auto_retry, :max_retries, - 1, :created_at - )`) - .run({ - ":milestone_id": milestoneId, - ":slice_id": sliceId, - ":task_id": taskId, - ":verify": planning.verify ?? "", - ":inputs": JSON.stringify(planning.inputs ?? []), - ":expected_output": JSON.stringify(planning.expectedOutput ?? []), - ":risk": frontmatter.risk, - ":mutation_scope": frontmatter.mutationScope, - ":verification_type": frontmatter.verification, - ":plan_approval": frontmatter.planApproval, - ":estimated_effort": frontmatter.estimatedEffort, - ":dependencies": JSON.stringify(frontmatter.dependencies), - ":blocks_parallel": frontmatter.blocksParallel ? 1 : 0, - ":requires_user_input": frontmatter.requiresUserInput ? 1 : 0, - ":auto_retry": frontmatter.autoRetry ? 1 : 0, - ":max_retries": frontmatter.maxRetries, - ":created_at": new Date().toISOString(), - }); -} -function insertTaskSchedulerIfAbsent(milestoneId, sliceId, taskId) { - upsertTaskSchedulerStatus(milestoneId, sliceId, taskId, "queued", { - onlyIfAbsent: true, - }); -} -/** - * Upsert a task scheduler row without changing the task lifecycle row. - * - * Purpose: keep due/claimed/dispatched/consumed scheduling separate from - * task_status so automation level and timing do not overwrite work progress. - * - * Consumer: task scheduling/dispatch surfaces and task planning row creation. - */ -export function upsertTaskSchedulerStatus( - milestoneId, - sliceId, - taskId, - status = "queued", - { onlyIfAbsent = false } = {}, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const schedulerStatus = normalizeSchedulerStatus(status) ?? "queued"; - const sql = onlyIfAbsent - ? `INSERT OR IGNORE INTO task_scheduler ( - milestone_id, slice_id, task_id, status, updated_at - ) VALUES ( - :milestone_id, :slice_id, :task_id, :status, :updated_at - )` - : `INSERT INTO task_scheduler ( - milestone_id, slice_id, task_id, status, updated_at - ) VALUES ( - :milestone_id, :slice_id, :task_id, :status, :updated_at - ) - ON CONFLICT(milestone_id, slice_id, task_id) DO UPDATE SET - status = excluded.status, - updated_at = excluded.updated_at`; - currentDb.prepare(sql).run({ - ":milestone_id": milestoneId, - ":slice_id": sliceId, - ":task_id": taskId, - ":status": schedulerStatus, - ":updated_at": new Date().toISOString(), - }); -} -export function updateTaskStatus( - milestoneId, - sliceId, - taskId, - status, - completedAt, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const taskStatus = normalizeTaskStatus(status) ?? "todo"; - currentDb - .prepare(`UPDATE tasks SET - status = :status, - completed_at = :completed_at, - task_status = :task_status - WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`) - .run({ - ":status": status, - ":completed_at": completedAt ?? null, - ":task_status": taskStatus, - ":milestone_id": milestoneId, - ":slice_id": sliceId, - ":id": taskId, - }); -} -/** SF ADR-011 P2: set pause-on-escalation state on a task. The two flags are - * mutually exclusive — pending=1 forces awaiting_review=0. */ -export function setTaskEscalationPending( - milestoneId, - sliceId, - taskId, - artifactPath, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE tasks - SET escalation_pending = 1, - escalation_awaiting_review = 0, - escalation_artifact_path = :path - WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) - .run({ - ":path": artifactPath, - ":mid": milestoneId, - ":sid": sliceId, - ":tid": taskId, - }); -} -/** SF ADR-011 P2: continueWithDefault=true marker — artifact exists but no pause. - * Mutually exclusive with escalation_pending. */ -export function setTaskEscalationAwaitingReview( - milestoneId, - sliceId, - taskId, - artifactPath, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE tasks - SET escalation_awaiting_review = 1, - escalation_pending = 0, - escalation_artifact_path = :path - WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) - .run({ - ":path": artifactPath, - ":mid": milestoneId, - ":sid": sliceId, - ":tid": taskId, - }); -} -/** SF ADR-011 P2: clear both escalation flags (called when an escalation is - * resolved or its artifact is removed). Leaves escalation_artifact_path so - * the resolution audit trail survives. */ -export function clearTaskEscalationFlags(milestoneId, sliceId, taskId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE tasks - SET escalation_pending = 0, - escalation_awaiting_review = 0 - WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) - .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); -} -/** SF ADR-011 P2 carry-forward: find a task in this slice that has a resolved - * escalation override that has NOT yet been injected into a downstream - * prompt. Returns the first match by sequence (lowest first), or null when - * no carry-forward is pending. - * - * The match criterion: escalation_artifact_path IS NOT NULL AND - * escalation_pending=0 AND escalation_awaiting_review=0 AND - * escalation_override_applied=0. The artifact's respondedAt is checked by - * the caller (claimOverrideForInjection in escalation.ts) — keeping artifact - * schema knowledge out of the DB layer. */ -export function findUnappliedEscalationOverride(milestoneId, sliceId) { - if (!currentDb) return null; - const row = currentDb - .prepare(`SELECT id, escalation_artifact_path - FROM tasks - WHERE milestone_id = :mid - AND slice_id = :sid - AND escalation_artifact_path IS NOT NULL - AND escalation_pending = 0 - AND escalation_awaiting_review = 0 - AND escalation_override_applied = 0 - ORDER BY sequence ASC, id ASC - LIMIT 1`) - .get({ ":mid": milestoneId, ":sid": sliceId }); - if (!row || !row.escalation_artifact_path) return null; - return { taskId: row.id, artifactPath: row.escalation_artifact_path }; -} -/** SF ADR-011 P2 carry-forward: atomically claim the override for injection. - * Returns true when this caller successfully flipped 0→1 (race winner) or - * false when another caller claimed it first (race loser). Use this to - * guarantee the override is injected exactly once. */ -export function claimEscalationOverride(milestoneId, sliceId, taskId) { - if (!currentDb) return false; - const result = currentDb - .prepare(`UPDATE tasks - SET escalation_override_applied = 1 - WHERE milestone_id = :mid - AND slice_id = :sid - AND id = :tid - AND escalation_override_applied = 0`) - .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); - return (result?.changes ?? 0) > 0; -} -export function setTaskBlockerDiscovered( - milestoneId, - sliceId, - taskId, - discovered, -) { - if (!currentDb) return; - currentDb - .prepare( - `UPDATE tasks SET blocker_discovered = :discovered WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, - ) - .run({ - ":discovered": discovered ? 1 : 0, - ":mid": milestoneId, - ":sid": sliceId, - ":tid": taskId, - }); -} -export function upsertTaskPlanning(milestoneId, sliceId, taskId, planning) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning); - const { normalized: frontmatter, errors: fmErrors } = - taskFrontmatterFromRecord(planning); - if (fmErrors?.length) - logWarning( - "sf-db:upsertTaskPlanning", - `frontmatter validation errors for ${milestoneId}/${sliceId}/${taskId}: ${fmErrors.join(", ")}`, - ); - const hasTaskStatus = - planning.taskStatus !== undefined || - planning.task_status !== undefined || - planning.status !== undefined; - currentDb - .prepare(`UPDATE tasks SET - title = COALESCE(:title, title), - description = COALESCE(:description, description), - estimate = COALESCE(:estimate, estimate), - files = COALESCE(:files, files), - verify = COALESCE(:verify, verify), - inputs = COALESCE(:inputs, inputs), - expected_output = COALESCE(:expected_output, expected_output), - observability_impact = COALESCE(:observability_impact, observability_impact), - full_plan_md = COALESCE(:full_plan_md, full_plan_md), - risk = :risk, - mutation_scope = :mutation_scope, - verification_type = :verification_type, - plan_approval = :plan_approval, - task_status = CASE WHEN :has_task_status = 1 THEN :task_status ELSE task_status END, - estimated_effort = :estimated_effort, - dependencies = :dependencies, - blocks_parallel = :blocks_parallel, - requires_user_input = :requires_user_input, - auto_retry = :auto_retry, - max_retries = :max_retries, - frontmatter_version = :frontmatter_version - WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`) - .run({ - ":milestone_id": milestoneId, - ":slice_id": sliceId, - ":id": taskId, - ":title": planning.title ?? null, - ":description": planning.description ?? null, - ":estimate": planning.estimate ?? null, - ":files": planning.files ? JSON.stringify(planning.files) : null, - ":verify": planning.verify ?? null, - ":inputs": planning.inputs ? JSON.stringify(planning.inputs) : null, - ":expected_output": planning.expectedOutput - ? JSON.stringify(planning.expectedOutput) - : null, - ":observability_impact": planning.observabilityImpact ?? null, - ":full_plan_md": planning.fullPlanMd ?? null, - ":risk": frontmatter.risk, - ":mutation_scope": frontmatter.mutationScope, - ":verification_type": frontmatter.verification, - ":plan_approval": frontmatter.planApproval, - ":task_status": frontmatter.taskStatus, - ":has_task_status": hasTaskStatus ? 1 : 0, - ":estimated_effort": frontmatter.estimatedEffort, - ":dependencies": JSON.stringify(frontmatter.dependencies), - ":blocks_parallel": frontmatter.blocksParallel ? 1 : 0, - ":requires_user_input": frontmatter.requiresUserInput ? 1 : 0, - ":auto_retry": frontmatter.autoRetry ? 1 : 0, - ":max_retries": frontmatter.maxRetries, - ":frontmatter_version": frontmatter.frontmatterVersion, - }); - if ( - planning.schedulerStatus !== undefined || - planning.scheduler_status !== undefined - ) { - upsertTaskSchedulerStatus( - milestoneId, - sliceId, - taskId, - frontmatter.schedulerStatus, - ); - } else { - insertTaskSchedulerIfAbsent(milestoneId, sliceId, taskId); - } -} -function parsePlanningMeeting(raw) { - if (typeof raw !== "string" || raw.trim() === "") return null; - try { - return JSON.parse(raw); - } catch { - return null; - } -} -function rowToSlice(row) { - return { - milestone_id: row["milestone_id"], - id: row["id"], - title: row["title"], - status: row["status"], - risk: row["risk"], - depends: safeParseJsonArray(row["depends"]), - demo: row["demo"] ?? "", - created_at: row["created_at"], - completed_at: row["completed_at"] ?? null, - full_summary_md: row["full_summary_md"] ?? "", - full_uat_md: row["full_uat_md"] ?? "", - goal: row["goal"] ?? "", - success_criteria: row["success_criteria"] ?? "", - proof_level: row["proof_level"] ?? "", - integration_closure: row["integration_closure"] ?? "", - observability_impact: row["observability_impact"] ?? "", - adversarial_partner: row["adversarial_partner"] ?? "", - adversarial_combatant: row["adversarial_combatant"] ?? "", - adversarial_architect: row["adversarial_architect"] ?? "", - planning_meeting: parsePlanningMeeting(row["planning_meeting_json"]), - sequence: row["sequence"] ?? 0, - replan_triggered_at: row["replan_triggered_at"] ?? null, - sketch_scope: row["sketch_scope"] ?? "", - is_sketch: row["is_sketch"] ?? 0, - }; -} -export function getSlice(milestoneId, sliceId) { - if (!currentDb) return null; - const row = currentDb - .prepare("SELECT * FROM slices WHERE milestone_id = :mid AND id = :sid") - .get({ ":mid": milestoneId, ":sid": sliceId }); - if (!row) return null; - return rowToSlice(row); -} -export function updateSliceStatus(milestoneId, sliceId, status, completedAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE slices SET status = :status, completed_at = :completed_at - WHERE milestone_id = :milestone_id AND id = :id`) - .run({ - ":status": status, - ":completed_at": completedAt ?? null, - ":milestone_id": milestoneId, - ":id": sliceId, - }); -} -/** - * Store the UAT verdict for a slice. Called when an ASSESSMENT or UAT_RESULT - * file is written so the DB is the canonical source for verdict checks. - */ -export function setSliceUatVerdict(milestoneId, sliceId, verdict) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `UPDATE slices SET uat_verdict = :verdict WHERE milestone_id = :mid AND id = :sid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId, ":verdict": verdict }); -} -/** - * Returns the stored UAT verdict for a slice, or null if not yet recorded. - */ -export function getSliceUatVerdict(milestoneId, sliceId) { - if (!currentDb) return null; - const row = currentDb - .prepare( - `SELECT uat_verdict FROM slices WHERE milestone_id = :mid AND id = :sid`, - ) - .get({ ":mid": milestoneId, ":sid": sliceId }); - return row?.uat_verdict ?? null; -} -/** - * Scan existing ASSESSMENT/UAT_RESULT files on disk and populate uat_verdict - * for slices that have no verdict recorded in the DB yet. - * - * Purpose: one-time migration path so that repos with pre-existing verdict - * files work without file fallbacks in checkNeedsRunUat — the DB becomes the - * sole source of truth immediately after open. - * - * Consumer: ensureDbOpen (dynamic-tools.js) after openDatabase succeeds. - */ -export function backfillUatVerdicts(basePath) { - if (!currentDb) return; - // Find all slices that have no verdict yet - const rows = currentDb - .prepare(`SELECT milestone_id, id FROM slices WHERE uat_verdict IS NULL`) - .all(); - if (!rows.length) return; - // Extract verdict from content — inline to avoid cross-module import at db layer - function parseVerdictFromContent(content) { - const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); - if (fmMatch) { - const m = fmMatch[1].match(/verdict:\s*([\w-]+)/i); - if (m) { - let v = m[1].toLowerCase(); - if (v === "passed") v = "pass"; - return v; - } - return null; - } - const bodyMatch = content.match( - /\*\*Verdict:?\*\*\s*(?:✅\s*)?(\w[\w-]*)/i, - ); - if (bodyMatch) { - let v = bodyMatch[1].toLowerCase(); - if (v === "passed") v = "pass"; - return v; - } - return null; - } - const stmt = currentDb.prepare( - `UPDATE slices SET uat_verdict = :verdict WHERE milestone_id = :mid AND id = :sid`, - ); - for (const row of rows) { - const mid = row["milestone_id"]; - const sid = row["id"]; - const sliceDir = join(basePath, ".sf", "milestones", mid, "slices", sid); - const candidates = [ - join(sliceDir, `${sid}-ASSESSMENT.md`), - join(sliceDir, `${sid}-UAT_RESULT.md`), - ]; - for (const candidatePath of candidates) { - if (!existsSync(candidatePath)) continue; - try { - const content = readFileSync(candidatePath, "utf8"); - const verdict = parseVerdictFromContent(content); - if (verdict) { - stmt.run({ ":mid": mid, ":sid": sid, ":verdict": verdict }); - break; - } - } catch { - // Skip unreadable files - } - } - } -} -export function setTaskSummaryMd(milestoneId, sliceId, taskId, md) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `UPDATE tasks SET full_summary_md = :md WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId, ":md": md }); -} -export function setSliceSummaryMd(milestoneId, sliceId, summaryMd, uatMd) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `UPDATE slices SET full_summary_md = :summary_md, full_uat_md = :uat_md WHERE milestone_id = :mid AND id = :sid`, - ) - .run({ - ":mid": milestoneId, - ":sid": sliceId, - ":summary_md": summaryMd, - ":uat_md": uatMd, - }); -} -function safeParseJsonArray(raw, fallback = []) { - if (typeof raw !== "string" || raw.trim() === "") return fallback; - try { - const parsed = JSON.parse(raw); - return Array.isArray(parsed) ? parsed : fallback; - } catch { - return fallback; - } -} -function parseTaskArrayColumn(raw) { - if (typeof raw !== "string" || raw.trim() === "") return []; - try { - const parsed = JSON.parse(raw); - if (Array.isArray(parsed)) return parsed.map((value) => String(value)); - if (parsed === null || parsed === undefined || parsed === "") return []; - return [String(parsed)]; - } catch { - // Older/corrupt rows may contain comma-separated strings instead of JSON. - return raw - .split(",") - .map((value) => value.trim()) - .filter(Boolean); - } -} -function rowToTask(row) { - const parseTaskArray = (value) => { - if (Array.isArray(value)) { - return value.filter((entry) => typeof entry === "string"); - } - if (typeof value !== "string") return []; - const trimmed = value.trim(); - if (!trimmed) return []; - try { - const parsed = JSON.parse(trimmed); - if (Array.isArray(parsed)) { - return parsed.filter((entry) => typeof entry === "string"); - } - if (typeof parsed === "string" && parsed.trim()) { - return [parsed.trim()]; - } - } catch { - // Older/corrupt DB rows may contain raw comma-separated paths instead of JSON arrays. - } - return trimmed - .split(",") - .map((entry) => entry.trim()) - .filter(Boolean); - }; - return withTaskFrontmatter({ - milestone_id: row["milestone_id"], - slice_id: row["slice_id"], - id: row["id"], - title: row["title"], - status: row["status"], - one_liner: row["one_liner"], - narrative: row["narrative"], - verification_result: row["verification_result"], - duration: row["duration"], - completed_at: row["completed_at"] ?? null, - blocker_discovered: row["blocker_discovered"] === 1, - deviations: row["deviations"], - known_issues: row["known_issues"], - key_files: parseTaskArrayColumn(row["key_files"]), - key_decisions: parseTaskArrayColumn(row["key_decisions"]), - full_summary_md: row["full_summary_md"], - description: row["description"] ?? "", - estimate: row["estimate"] ?? "", - files: parseTaskArray(row["files"]), - verify: row["verify"] ?? "", - inputs: parseTaskArray(row["inputs"]), - expected_output: parseTaskArray(row["expected_output"]), - observability_impact: row["observability_impact"] ?? "", - full_plan_md: row["full_plan_md"] ?? "", - sequence: row["sequence"] ?? 0, - verification_status: row["verification_status"] ?? "", - risk: row["risk"] ?? "low", - mutation_scope: row["mutation_scope"] ?? "isolated", - verification_type: row["verification_type"] ?? "self-check", - plan_approval: row["plan_approval"] ?? "not-required", - task_status: row["task_status"] ?? row["status"] ?? "todo", - scheduler_status: row["scheduler_status"] ?? "queued", - estimated_effort: row["estimated_effort"] ?? null, - dependencies: parseTaskArray(row["dependencies"]), - blocks_parallel: row["blocks_parallel"] ?? 0, - requires_user_input: row["requires_user_input"] ?? 0, - auto_retry: row["auto_retry"] ?? 1, - max_retries: row["max_retries"] ?? 2, - escalation_pending: row["escalation_pending"] ?? 0, - escalation_awaiting_review: row["escalation_awaiting_review"] ?? 0, - escalation_override_applied: row["escalation_override_applied"] ?? 0, - escalation_artifact_path: row["escalation_artifact_path"] ?? null, - }); -} -export function getTask(milestoneId, sliceId, taskId) { - if (!currentDb) return null; - const row = currentDb - .prepare( - `SELECT t.*, ts.status AS scheduler_status - FROM tasks t - LEFT JOIN task_scheduler ts - ON t.milestone_id = ts.milestone_id - AND t.slice_id = ts.slice_id - AND t.id = ts.task_id - WHERE t.milestone_id = :mid AND t.slice_id = :sid AND t.id = :tid`, - ) - .get({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); - if (!row) return null; - return rowToTask(row); -} -export function getSliceTasks(milestoneId, sliceId) { - if (!currentDb) return []; - const rows = currentDb - .prepare( - `SELECT t.*, ts.status AS scheduler_status - FROM tasks t - LEFT JOIN task_scheduler ts - ON t.milestone_id = ts.milestone_id - AND t.slice_id = ts.slice_id - AND t.id = ts.task_id - WHERE t.milestone_id = :mid AND t.slice_id = :sid - ORDER BY t.sequence, t.id`, - ) - .all({ ":mid": milestoneId, ":sid": sliceId }); - return rows.map(rowToTask); -} -export function insertVerificationEvidence(e) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at) - VALUES (:task_id, :slice_id, :milestone_id, :command, :exit_code, :verdict, :duration_ms, :created_at)`) - .run({ - ":task_id": e.taskId, - ":slice_id": e.sliceId, - ":milestone_id": e.milestoneId, - ":command": e.command, - ":exit_code": e.exitCode, - ":verdict": e.verdict, - ":duration_ms": e.durationMs, - ":created_at": new Date().toISOString(), - }); -} -export function getVerificationEvidence(milestoneId, sliceId, taskId) { - if (!currentDb) return []; - const rows = currentDb - .prepare( - "SELECT * FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid ORDER BY id", - ) - .all({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); - return rows; -} -function rowToSelfFeedback(row) { - try { - const parsed = JSON.parse(row["full_json"]); - return { - ...parsed, - resolvedAt: row["resolved_at"] ?? parsed.resolvedAt, - resolvedReason: row["resolved_reason"] ?? parsed.resolvedReason, - resolvedBySfVersion: - row["resolved_by_sf_version"] ?? parsed.resolvedBySfVersion, - resolvedEvidence: row["resolved_evidence_json"] - ? JSON.parse(row["resolved_evidence_json"]) - : parsed.resolvedEvidence, - resolvedCriteriaMet: row["resolved_criteria_json"] - ? JSON.parse(row["resolved_criteria_json"]) - : parsed.resolvedCriteriaMet, - }; - } catch { - return { - id: row["id"], - ts: row["ts"], - kind: row["kind"], - severity: row["severity"], - blocking: row["blocking"] === 1, - repoIdentity: row["repo_identity"], - sfVersion: row["sf_version"], - basePath: row["base_path"], - occurredIn: { - unitType: row["unit_type"] ?? undefined, - milestone: row["milestone_id"] ?? undefined, - slice: row["slice_id"] ?? undefined, - task: row["task_id"] ?? undefined, - }, - summary: row["summary"], - evidence: row["evidence"], - suggestedFix: row["suggested_fix"], - resolvedAt: row["resolved_at"] ?? undefined, - resolvedReason: row["resolved_reason"] ?? undefined, - resolvedBySfVersion: row["resolved_by_sf_version"] ?? undefined, - resolvedEvidence: row["resolved_evidence_json"] - ? JSON.parse(row["resolved_evidence_json"]) - : undefined, - resolvedCriteriaMet: row["resolved_criteria_json"] - ? JSON.parse(row["resolved_criteria_json"]) - : undefined, - }; - } -} -export function insertSelfFeedbackEntry(entry) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const occurred = entry.occurredIn ?? {}; - currentDb - .prepare(`INSERT INTO self_feedback ( - id, ts, kind, severity, blocking, repo_identity, sf_version, base_path, - unit_type, milestone_id, slice_id, task_id, summary, evidence, suggested_fix, full_json, - resolved_at, resolved_reason, resolved_by_sf_version, resolved_evidence_json, resolved_criteria_json - ) VALUES ( - :id, :ts, :kind, :severity, :blocking, :repo_identity, :sf_version, :base_path, - :unit_type, :milestone_id, :slice_id, :task_id, :summary, :evidence, :suggested_fix, :full_json, - :resolved_at, :resolved_reason, :resolved_by_sf_version, :resolved_evidence_json, :resolved_criteria_json - ) - ON CONFLICT(id) DO NOTHING`) - .run({ - ":id": entry.id, - ":ts": entry.ts, - ":kind": entry.kind, - ":severity": entry.severity, - ":blocking": entry.blocking ? 1 : 0, - ":repo_identity": entry.repoIdentity ?? "", - ":sf_version": entry.sfVersion ?? "", - ":base_path": entry.basePath ?? "", - ":unit_type": occurred.unitType ?? null, - ":milestone_id": occurred.milestone ?? null, - ":slice_id": occurred.slice ?? null, - ":task_id": occurred.task ?? null, - ":summary": entry.summary ?? "", - ":evidence": entry.evidence ?? "", - ":suggested_fix": entry.suggestedFix ?? "", - ":full_json": JSON.stringify(entry), - ":resolved_at": entry.resolvedAt ?? null, - ":resolved_reason": entry.resolvedReason ?? null, - ":resolved_by_sf_version": entry.resolvedBySfVersion ?? null, - ":resolved_evidence_json": entry.resolvedEvidence - ? JSON.stringify(entry.resolvedEvidence) - : null, - ":resolved_criteria_json": entry.resolvedCriteriaMet - ? JSON.stringify(entry.resolvedCriteriaMet) - : null, - }); -} -export function listSelfFeedbackEntries() { - if (!currentDb) return []; - const rows = currentDb - .prepare("SELECT * FROM self_feedback ORDER BY ts ASC, id ASC") - .all(); - return rows.map(rowToSelfFeedback); -} -export function resolveSelfFeedbackEntry(entryId, resolution) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const existing = currentDb - .prepare("SELECT * FROM self_feedback WHERE id = :id") - .get({ ":id": entryId }); - if (!existing || existing["resolved_at"]) return false; - const resolvedAt = resolution.resolvedAt ?? new Date().toISOString(); - const entry = { - ...rowToSelfFeedback(existing), - resolvedAt, - resolvedReason: resolution.reason, - resolvedBySfVersion: resolution.resolvedBySfVersion ?? "", - resolvedEvidence: resolution.evidence, - }; - if (resolution.criteriaMet) - entry.resolvedCriteriaMet = resolution.criteriaMet; - const result = currentDb - .prepare(`UPDATE self_feedback SET - full_json = :full_json, - resolved_at = :resolved_at, - resolved_reason = :resolved_reason, - resolved_by_sf_version = :resolved_by_sf_version, - resolved_evidence_json = :resolved_evidence_json, - resolved_criteria_json = :resolved_criteria_json - WHERE id = :id AND resolved_at IS NULL`) - .run({ - ":id": entryId, - ":full_json": JSON.stringify(entry), - ":resolved_at": resolvedAt, - ":resolved_reason": resolution.reason ?? "", - ":resolved_by_sf_version": resolution.resolvedBySfVersion ?? "", - ":resolved_evidence_json": resolution.evidence - ? JSON.stringify(resolution.evidence) - : null, - ":resolved_criteria_json": resolution.criteriaMet - ? JSON.stringify(resolution.criteriaMet) - : null, - }); - return result.changes > 0; -} -function parseVisionMeeting(raw) { - if (typeof raw !== "string" || raw.trim().length === 0) return null; - try { - return JSON.parse(raw); - } catch { - return null; - } -} -function parseProductResearch(raw) { - if (typeof raw !== "string" || raw.trim().length === 0) return null; - try { - return JSON.parse(raw); - } catch { - return null; - } -} -function rowToMilestone(row) { - return { - id: row["id"], - title: row["title"], - status: row["status"], - depends_on: safeParseJsonArray(row["depends_on"]), - created_at: row["created_at"], - completed_at: row["completed_at"] ?? null, - vision: row["vision"] ?? "", - success_criteria: safeParseJsonArray(row["success_criteria"]), - key_risks: safeParseJsonArray(row["key_risks"]), - proof_strategy: safeParseJsonArray(row["proof_strategy"]), - verification_contract: row["verification_contract"] ?? "", - verification_integration: row["verification_integration"] ?? "", - verification_operational: row["verification_operational"] ?? "", - verification_uat: row["verification_uat"] ?? "", - definition_of_done: safeParseJsonArray(row["definition_of_done"]), - requirement_coverage: row["requirement_coverage"] ?? "", - boundary_map_markdown: row["boundary_map_markdown"] ?? "", - vision_meeting: parseVisionMeeting(row["vision_meeting_json"]), - product_research: parseProductResearch(row["product_research_json"]), - sequence: row["sequence"] ?? 0, - }; -} -function rowToArtifact(row) { - return { - path: row["path"], - artifact_type: row["artifact_type"], - milestone_id: row["milestone_id"] ?? null, - slice_id: row["slice_id"] ?? null, - task_id: row["task_id"] ?? null, - full_content: row["full_content"], - imported_at: row["imported_at"], - }; -} -export function getAllMilestones() { - if (!currentDb) return []; - const rows = currentDb - .prepare( - "SELECT * FROM milestones ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id", - ) - .all(); - return rows.map(rowToMilestone); -} -export function getMilestone(id) { - if (!currentDb) return null; - const row = currentDb - .prepare("SELECT * FROM milestones WHERE id = :id") - .get({ ":id": id }); - if (!row) return null; - return rowToMilestone(row); -} -function rowToBacklogItem(row) { - return { - id: row["id"], - title: row["title"], - status: row["status"], - note: row["note"] ?? "", - source: row["source"] ?? "", - triageRunId: row["triage_run_id"] ?? null, - sequence: row["sequence"] ?? 0, - createdAt: row["created_at"], - updatedAt: row["updated_at"], - promotedAt: row["promoted_at"] ?? null, - }; -} -export function listBacklogItems() { - if (!currentDb) return []; - return currentDb - .prepare( - "SELECT * FROM backlog_items ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id", - ) - .all() - .map(rowToBacklogItem); -} -export function nextBacklogItemId() { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const row = currentDb - .prepare( - "SELECT id FROM backlog_items WHERE id LIKE '999.%' ORDER BY CAST(substr(id, 5) AS INTEGER) DESC LIMIT 1", - ) - .get(); - const next = row?.id ? Number.parseInt(String(row.id).slice(4), 10) + 1 : 1; - return `999.${Number.isFinite(next) ? next : 1}`; -} -export function addBacklogItem({ - id, - title, - note = "", - source = "manual", - triageRunId = null, - status = "pending", -}) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const itemId = id ?? nextBacklogItemId(); - const now = new Date().toISOString(); - const sequenceRow = currentDb - .prepare( - "SELECT COALESCE(MAX(sequence), 0) + 1 AS sequence FROM backlog_items", - ) - .get(); - currentDb - .prepare(`INSERT INTO backlog_items ( - id, title, status, note, source, triage_run_id, sequence, created_at, updated_at, promoted_at - ) VALUES ( - :id, :title, :status, :note, :source, :triage_run_id, :sequence, :created_at, :updated_at, :promoted_at - ) - ON CONFLICT(id) DO UPDATE SET - title = excluded.title, - status = excluded.status, - note = excluded.note, - source = excluded.source, - triage_run_id = excluded.triage_run_id, - updated_at = excluded.updated_at, - promoted_at = excluded.promoted_at`) - .run({ - ":id": itemId, - ":title": title, - ":status": status, - ":note": note, - ":source": source, - ":triage_run_id": triageRunId, - ":sequence": sequenceRow?.sequence ?? 1, - ":created_at": now, - ":updated_at": now, - ":promoted_at": status === "promoted" ? now : null, - }); - return itemId; -} -export function updateBacklogItemStatus(id, status, note = "") { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const now = new Date().toISOString(); - const result = currentDb - .prepare(`UPDATE backlog_items - SET status = :status, - note = :note, - updated_at = :updated_at, - promoted_at = CASE WHEN :status = 'promoted' THEN :updated_at ELSE promoted_at END - WHERE id = :id`) - .run({ - ":id": id, - ":status": status, - ":note": note, - ":updated_at": now, - }); - return (result?.changes ?? 0) > 0; -} -export function removeBacklogItem(id) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const result = currentDb - .prepare("DELETE FROM backlog_items WHERE id = :id") - .run({ ":id": id }); - return (result?.changes ?? 0) > 0; -} -/** - * Update a milestone's status in the database. - * Used by park/unpark to keep the DB in sync with the filesystem marker. - * See: https://github.com/singularity-forge/sf-run/issues/2694 - */ -export function updateMilestoneStatus(milestoneId, status, completedAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `UPDATE milestones SET status = :status, completed_at = :completed_at WHERE id = :id`, - ) - .run({ - ":status": status, - ":completed_at": completedAt ?? null, - ":id": milestoneId, - }); -} -/** - * Persist explicit milestone execution order in the structured runtime DB. - * - * Purpose: make roadmap priority/order queryable and schema-owned instead of - * relying on `.sf/QUEUE-ORDER.json` as a peer source of truth. - * - * Consumer: queue-order.js when `/queue` or rethink reorders milestones. - */ -export function updateMilestoneQueueOrder(order) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - transaction(() => { - const stmt = currentDb.prepare( - "UPDATE milestones SET sequence = :sequence WHERE id = :id", - ); - for (let i = 0; i < order.length; i++) { - stmt.run({ ":sequence": i + 1, ":id": order[i] }); - } - }); -} -export function getActiveMilestoneFromDb() { - if (!currentDb) return null; - const row = currentDb - .prepare( - "SELECT * FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id LIMIT 1", - ) - .get(); - if (!row) return null; - return rowToMilestone(row); -} -export function getActiveSliceFromDb(milestoneId) { - if (!currentDb) return null; - // Find the first non-complete slice whose dependencies are all satisfied. - // Primary: uses the slice_dependencies junction table (kept in sync by syncSliceDependencies). - // Fallback: for slices with no junction rows, check the `depends` JSON column directly - // to handle legacy data or rows that were written before syncSliceDependencies ran. - const candidates = currentDb - .prepare(`SELECT s.* FROM slices s - WHERE s.milestone_id = :mid - AND s.status NOT IN ('complete', 'done', 'skipped') - AND NOT EXISTS ( - SELECT 1 FROM slice_dependencies d - WHERE d.milestone_id = :mid - AND d.slice_id = s.id - AND d.depends_on_slice_id NOT IN ( - SELECT id FROM slices WHERE milestone_id = :mid AND status IN ('complete', 'done', 'skipped') - ) - ) - ORDER BY s.sequence, s.id`) - .all({ ":mid": milestoneId }); - if (candidates.length === 0) return null; - // Collect completed slice IDs for JSON-dep fallback check. - const completedIds = new Set( - currentDb - .prepare( - "SELECT id FROM slices WHERE milestone_id = :mid AND status IN ('complete', 'done', 'skipped')", - ) - .all({ ":mid": milestoneId }) - .map((r) => r["id"]), - ); - for (const candidate of candidates) { - const hasSyncedDeps = - (currentDb - .prepare( - "SELECT COUNT(*) as c FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid", - ) - .get({ ":mid": milestoneId, ":sid": candidate["id"] })?.c ?? 0) > 0; - if (hasSyncedDeps) { - // Junction table is authoritative and candidate already passed the NOT EXISTS check. - return rowToSlice(candidate); - } - // No junction rows for this slice — fall back to JSON depends column. - const jsonDeps = safeParseJsonArray(candidate["depends"]); - if (jsonDeps.length === 0 || jsonDeps.every((d) => completedIds.has(d))) { - return rowToSlice(candidate); - } - // JSON deps not yet satisfied — continue to next candidate. - } - return null; -} -export function getActiveTaskFromDb(milestoneId, sliceId) { - if (!currentDb) return null; - const row = currentDb - .prepare( - "SELECT * FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1", - ) - .get({ ":mid": milestoneId, ":sid": sliceId }); - if (!row) return null; - return rowToTask(row); -} -export function getMilestoneSlices(milestoneId) { - if (!currentDb) return []; - const rows = currentDb - .prepare( - "SELECT * FROM slices WHERE milestone_id = :mid ORDER BY sequence, id", - ) - .all({ ":mid": milestoneId }); - return rows.map(rowToSlice); -} -export function getArtifact(path) { - if (!currentDb) return null; - const row = currentDb - .prepare("SELECT * FROM artifacts WHERE path = :path") - .get({ ":path": path }); - if (!row) return null; - return rowToArtifact(row); -} -// ─── Lightweight Query Variants (hot-path optimized) ───────────────────── -/** Fast milestone status check — avoids deserializing JSON planning fields. */ -export function getActiveMilestoneIdFromDb() { - if (!currentDb) return null; - const row = currentDb - .prepare( - "SELECT id, status FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY id LIMIT 1", - ) - .get(); - if (!row) return null; - return { id: row["id"], status: row["status"] }; -} -/** Fast slice status check — avoids deserializing JSON depends/planning fields. */ -export function getSliceStatusSummary(milestoneId) { - if (!currentDb) return []; - return currentDb - .prepare( - "SELECT id, status FROM slices WHERE milestone_id = :mid ORDER BY sequence, id", - ) - .all({ ":mid": milestoneId }) - .map((r) => ({ id: r["id"], status: r["status"] })); -} -/** Fast task status check — avoids deserializing JSON arrays and large text fields. */ -export function getActiveTaskIdFromDb(milestoneId, sliceId) { - if (!currentDb) return null; - const row = currentDb - .prepare( - "SELECT id, status, title FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1", - ) - .get({ ":mid": milestoneId, ":sid": sliceId }); - if (!row) return null; - return { - id: row["id"], - status: row["status"], - title: row["title"], - }; -} -/** Count tasks by status for a slice — useful for progress reporting without full row load. */ -export function getSliceTaskCounts(milestoneId, sliceId) { - if (!currentDb) return { total: 0, done: 0, pending: 0 }; - const row = currentDb - .prepare(`SELECT - COUNT(*) as total, - SUM(CASE WHEN status IN ('complete', 'done') THEN 1 ELSE 0 END) as done, - SUM(CASE WHEN status NOT IN ('complete', 'done') THEN 1 ELSE 0 END) as pending - FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`) - .get({ ":mid": milestoneId, ":sid": sliceId }); - if (!row) return { total: 0, done: 0, pending: 0 }; - return { - total: row["total"] ?? 0, - done: row["done"] ?? 0, - pending: row["pending"] ?? 0, - }; -} -// ─── Slice Dependencies (junction table) ───────────────────────────────── -/** Sync the slice_dependencies junction table from a slice's JSON depends array. */ -export function syncSliceDependencies(milestoneId, sliceId, depends) { - if (!currentDb) return; - currentDb - .prepare( - "DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid", - ) - .run({ ":mid": milestoneId, ":sid": sliceId }); - for (const dep of depends) { - currentDb - .prepare( - "INSERT OR IGNORE INTO slice_dependencies (milestone_id, slice_id, depends_on_slice_id) VALUES (:mid, :sid, :dep)", - ) - .run({ ":mid": milestoneId, ":sid": sliceId, ":dep": dep }); - } -} -/** Get all slices that depend on a given slice. */ -export function getDependentSlices(milestoneId, sliceId) { - if (!currentDb) return []; - return currentDb - .prepare( - "SELECT slice_id FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid", - ) - .all({ ":mid": milestoneId, ":sid": sliceId }) - .map((r) => r["slice_id"]); -} -// ─── Worktree DB Helpers ────────────────────────────────────────────────── -export function copyWorktreeDb(srcDbPath, destDbPath) { - try { - if (!existsSync(srcDbPath)) return false; - const destDir = dirname(destDbPath); - mkdirSync(destDir, { recursive: true }); - copyFileSync(srcDbPath, destDbPath); - return true; - } catch (err) { - logError("db", "failed to copy DB to worktree", { - error: err.message, - }); - return false; - } -} -export function reconcileWorktreeDb(mainDbPath, worktreeDbPath) { - const zero = { - decisions: 0, - requirements: 0, - artifacts: 0, - milestones: 0, - slices: 0, - tasks: 0, - memories: 0, - verification_evidence: 0, - conflicts: [], - }; - if (!existsSync(worktreeDbPath)) return zero; - // Guard: bail when both paths resolve to the same physical file. - // ATTACHing a WAL-mode DB to itself corrupts the WAL (#2823). - try { - if (realpathSync(mainDbPath) === realpathSync(worktreeDbPath)) return zero; - } catch (e) { - logWarning("db", `realpathSync failed: ${e.message}`); - } - // Sanitize path: reject any characters that could break ATTACH syntax. - // ATTACH DATABASE doesn't support parameterized paths in all providers, - // so we use strict allowlist validation instead. - if (/['";\x00]/.test(worktreeDbPath)) { - logError( - "db", - "worktree DB reconciliation failed: path contains unsafe characters", - ); - return zero; - } - if (!currentDb) { - const opened = openDatabase(mainDbPath); - if (!opened) { - logError("db", "worktree DB reconciliation failed: cannot open main DB"); - return zero; - } - } - const adapter = currentDb; - const conflicts = []; - try { - adapter.exec(`ATTACH DATABASE '${worktreeDbPath}' AS wt`); - try { - const wtInfo = adapter.prepare("PRAGMA wt.table_info('decisions')").all(); - const hasMadeBy = wtInfo.some((col) => col["name"] === "made_by"); - const wtMilestoneInfo = adapter - .prepare("PRAGMA wt.table_info('milestones')") - .all(); - const hasProductResearch = wtMilestoneInfo.some( - (col) => col["name"] === "product_research_json", - ); - const decConf = adapter - .prepare( - `SELECT m.id FROM decisions m INNER JOIN wt.decisions w ON m.id = w.id WHERE m.decision != w.decision OR m.choice != w.choice OR m.rationale != w.rationale OR ${hasMadeBy ? "m.made_by != w.made_by" : "'agent' != 'agent'"} OR m.superseded_by IS NOT w.superseded_by`, - ) - .all(); - for (const row of decConf) - conflicts.push(`decision ${row["id"]}: modified in both`); - const reqConf = adapter - .prepare( - `SELECT m.id FROM requirements m INNER JOIN wt.requirements w ON m.id = w.id WHERE m.description != w.description OR m.status != w.status OR m.notes != w.notes OR m.superseded_by IS NOT w.superseded_by`, - ) - .all(); - for (const row of reqConf) - conflicts.push(`requirement ${row["id"]}: modified in both`); - const merged = { - decisions: 0, - requirements: 0, - artifacts: 0, - milestones: 0, - slices: 0, - tasks: 0, - memories: 0, - verification_evidence: 0, - }; - function countChanges(result) { - return typeof result === "object" && result !== null - ? (result.changes ?? 0) - : 0; - } - adapter.exec("BEGIN"); - try { - merged.decisions = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO decisions ( - id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by - ) - SELECT id, when_context, scope, decision, choice, rationale, revisable, ${hasMadeBy ? "made_by" : "'agent'"}, superseded_by FROM wt.decisions - `) - .run(), - ); - merged.requirements = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO requirements ( - id, class, status, description, why, source, primary_owner, - supporting_slices, validation, notes, full_content, superseded_by - ) - SELECT id, class, status, description, why, source, primary_owner, - supporting_slices, validation, notes, full_content, superseded_by - FROM wt.requirements - `) - .run(), - ); - merged.artifacts = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO artifacts ( - path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at - ) - SELECT path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at - FROM wt.artifacts - `) - .run(), - ); - // Merge milestones — worktree may have updated status/planning fields - merged.milestones = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO milestones ( - id, title, status, depends_on, created_at, completed_at, - vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json - ) - SELECT id, title, status, depends_on, created_at, completed_at, - vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, ${hasProductResearch ? "product_research_json" : "''"} - FROM wt.milestones - `) - .run(), - ); - // Merge slices — preserve worktree progress but never downgrade completed status (#2558). - // Uses INSERT OR REPLACE with a subquery that picks the best status — if the main DB - // already has a completed slice, keep that status even if the worktree copy is stale. - merged.slices = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO slices ( - milestone_id, id, title, status, risk, depends, demo, created_at, completed_at, - full_summary_md, full_uat_md, goal, success_criteria, proof_level, - integration_closure, observability_impact, adversarial_partner, adversarial_combatant, - adversarial_architect, planning_meeting_json, sequence, replan_triggered_at - ) - SELECT w.milestone_id, w.id, w.title, - CASE - WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') - THEN m.status ELSE w.status - END, - w.risk, w.depends, w.demo, w.created_at, - CASE - WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') - THEN m.completed_at ELSE w.completed_at - END, - w.full_summary_md, w.full_uat_md, w.goal, w.success_criteria, w.proof_level, - w.integration_closure, w.observability_impact, w.adversarial_partner, w.adversarial_combatant, - w.adversarial_architect, w.planning_meeting_json, w.sequence, w.replan_triggered_at - FROM wt.slices w - LEFT JOIN slices m ON m.milestone_id = w.milestone_id AND m.id = w.id - `) - .run(), - ); - // Merge tasks — preserve execution results, never downgrade completed status (#2558) - merged.tasks = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO tasks ( - milestone_id, slice_id, id, title, status, one_liner, narrative, - verification_result, duration, completed_at, blocker_discovered, - deviations, known_issues, key_files, key_decisions, full_summary_md, - description, estimate, files, verify, inputs, expected_output, - observability_impact, full_plan_md, sequence - ) - SELECT w.milestone_id, w.slice_id, w.id, w.title, - CASE - WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') - THEN m.status ELSE w.status - END, - w.one_liner, w.narrative, - w.verification_result, w.duration, - CASE - WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') - THEN m.completed_at ELSE w.completed_at - END, - w.blocker_discovered, - w.deviations, w.known_issues, w.key_files, w.key_decisions, w.full_summary_md, - w.description, w.estimate, w.files, w.verify, w.inputs, w.expected_output, - w.observability_impact, w.full_plan_md, w.sequence - FROM wt.tasks w - LEFT JOIN tasks m ON m.milestone_id = w.milestone_id AND m.slice_id = w.slice_id AND m.id = w.id - `) - .run(), - ); - // Merge memories — keep worktree-learned insights - merged.memories = countChanges( - adapter - .prepare(` - INSERT OR REPLACE INTO memories ( - seq, id, category, content, confidence, source_unit_type, source_unit_id, - created_at, updated_at, superseded_by, hit_count - ) - SELECT seq, id, category, content, confidence, source_unit_type, source_unit_id, - created_at, updated_at, superseded_by, hit_count - FROM wt.memories - `) - .run(), - ); - // Merge verification evidence — append-only, use INSERT OR IGNORE to avoid duplicates - merged.verification_evidence = countChanges( - adapter - .prepare(` - INSERT OR IGNORE INTO verification_evidence ( - task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at - ) - SELECT task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at - FROM wt.verification_evidence - `) - .run(), - ); - adapter.exec("COMMIT"); - } catch (txErr) { - try { - adapter.exec("ROLLBACK"); - } catch (e) { - logWarning("db", `rollback failed: ${e.message}`); - } - throw txErr; - } - return { ...merged, conflicts }; - } finally { - try { - adapter.exec("DETACH DATABASE wt"); - } catch (e) { - logWarning("db", `detach worktree DB failed: ${e.message}`); - } - } - } catch (err) { - logError("db", "worktree DB reconciliation failed", { - error: err.message, - }); - return { ...zero, conflicts }; - } -} -// ─── Replan & Assessment Helpers ────────────────────────────────────────── -export function insertReplanHistory(entry) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - // INSERT OR REPLACE: idempotent on (milestone_id, slice_id, task_id) via schema v11 unique index. - // Retrying the same replan silently updates summary instead of accumulating duplicate rows. - currentDb - .prepare(`INSERT OR REPLACE INTO replan_history (milestone_id, slice_id, task_id, summary, previous_artifact_path, replacement_artifact_path, created_at) - VALUES (:milestone_id, :slice_id, :task_id, :summary, :previous_artifact_path, :replacement_artifact_path, :created_at)`) - .run({ - ":milestone_id": entry.milestoneId, - ":slice_id": entry.sliceId ?? null, - ":task_id": entry.taskId ?? null, - ":summary": entry.summary, - ":previous_artifact_path": entry.previousArtifactPath ?? null, - ":replacement_artifact_path": entry.replacementArtifactPath ?? null, - ":created_at": new Date().toISOString(), - }); -} -export function insertAssessment(entry) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR REPLACE INTO assessments (path, milestone_id, slice_id, task_id, status, scope, full_content, created_at) - VALUES (:path, :milestone_id, :slice_id, :task_id, :status, :scope, :full_content, :created_at)`) - .run({ - ":path": entry.path, - ":milestone_id": entry.milestoneId, - ":slice_id": entry.sliceId ?? null, - ":task_id": entry.taskId ?? null, - ":status": entry.status, - ":scope": entry.scope, - ":full_content": entry.fullContent, - ":created_at": new Date().toISOString(), - }); -} -export function deleteAssessmentByScope(milestoneId, scope) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `DELETE FROM assessments WHERE milestone_id = :mid AND scope = :scope`, - ) - .run({ ":mid": milestoneId, ":scope": scope }); -} -export function deleteVerificationEvidence(milestoneId, sliceId, taskId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); -} -export function deleteTask(milestoneId, sliceId, taskId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - transaction(() => { - // Must delete verification_evidence first (FK constraint) - currentDb - .prepare( - `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); - currentDb - .prepare( - `DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); - }); -} -export function deleteSlice(milestoneId, sliceId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - transaction(() => { - // Cascade-style manual deletion: evidence → tasks → dependencies → slice - currentDb - .prepare( - `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId }); - currentDb - .prepare( - `DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId }); - currentDb - .prepare( - `DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId }); - currentDb - .prepare( - `DELETE FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid`, - ) - .run({ ":mid": milestoneId, ":sid": sliceId }); - currentDb - .prepare(`DELETE FROM slices WHERE milestone_id = :mid AND id = :sid`) - .run({ ":mid": milestoneId, ":sid": sliceId }); - }); -} -export function deleteMilestone(milestoneId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - transaction(() => { - currentDb - .prepare(`DELETE FROM verification_evidence WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM quality_gates WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM tasks WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM slice_dependencies WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM slices WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM replan_history WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM assessments WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM artifacts WHERE milestone_id = :mid`) - .run({ ":mid": milestoneId }); - currentDb - .prepare(`DELETE FROM milestones WHERE id = :mid`) - .run({ ":mid": milestoneId }); - }); -} -export function updateSliceFields(milestoneId, sliceId, fields) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE slices SET - title = COALESCE(:title, title), - risk = COALESCE(:risk, risk), - depends = COALESCE(:depends, depends), - demo = COALESCE(:demo, demo) - WHERE milestone_id = :milestone_id AND id = :id`) - .run({ - ":milestone_id": milestoneId, - ":id": sliceId, - ":title": fields.title ?? null, - ":risk": fields.risk ?? null, - ":depends": fields.depends ? JSON.stringify(fields.depends) : null, - ":demo": fields.demo ?? null, - }); -} -export function getReplanHistory(milestoneId, sliceId) { - if (!currentDb) return []; - if (sliceId) { - return currentDb - .prepare( - `SELECT * FROM replan_history WHERE milestone_id = :mid AND slice_id = :sid ORDER BY created_at DESC`, - ) - .all({ ":mid": milestoneId, ":sid": sliceId }); - } - return currentDb - .prepare( - `SELECT * FROM replan_history WHERE milestone_id = :mid ORDER BY created_at DESC`, - ) - .all({ ":mid": milestoneId }); -} -export function getAssessment(path) { - if (!currentDb) return null; - const row = currentDb - .prepare(`SELECT * FROM assessments WHERE path = :path`) - .get({ ":path": path }); - return row ?? null; -} -export function getAssessmentByScope(milestoneId, scope) { - if (!currentDb) return null; - const row = currentDb - .prepare( - `SELECT * FROM assessments - WHERE milestone_id = :mid AND scope = :scope - ORDER BY created_at DESC - LIMIT 1`, - ) - .get({ ":mid": milestoneId, ":scope": scope }); - return row ?? null; -} -export function getMilestoneValidationAssessment(milestoneId) { - return getAssessmentByScope(milestoneId, "milestone-validation"); -} -// ─── Quality Gates ─────────────────────────────────────────────────────── -function rowToGate(row) { - return { - milestone_id: row["milestone_id"], - slice_id: row["slice_id"], - gate_id: row["gate_id"], - scope: row["scope"], - task_id: row["task_id"] ?? "", - status: row["status"], - verdict: row["verdict"] || "", - rationale: row["rationale"] || "", - findings: row["findings"] || "", - evaluated_at: row["evaluated_at"] ?? null, - }; -} -export function insertGateRow(g) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO quality_gates (milestone_id, slice_id, gate_id, scope, task_id, status) - VALUES (:mid, :sid, :gid, :scope, :tid, :status)`) - .run({ - ":mid": g.milestoneId, - ":sid": g.sliceId, - ":gid": g.gateId, - ":scope": g.scope, - ":tid": g.taskId ?? "", - ":status": g.status ?? "pending", - }); -} -export function saveGateResult(g) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE quality_gates - SET status = 'complete', verdict = :verdict, rationale = :rationale, - findings = :findings, evaluated_at = :evaluated_at - WHERE milestone_id = :mid AND slice_id = :sid AND gate_id = :gid - AND task_id = :tid`) - .run({ - ":mid": g.milestoneId, - ":sid": g.sliceId, - ":gid": g.gateId, - ":tid": g.taskId ?? "", - ":verdict": g.verdict, - ":rationale": g.rationale, - ":findings": g.findings, - ":evaluated_at": new Date().toISOString(), - }); - const outcome = - g.verdict === "pass" - ? "pass" - : g.verdict === "omitted" - ? "manual-attention" - : "fail"; - insertGateRun({ - traceId: `quality-gate:${g.milestoneId}:${g.sliceId}`, - turnId: `gate:${g.gateId}:${g.taskId ?? "slice"}`, - gateId: g.gateId, - gateType: "quality-gate", - milestoneId: g.milestoneId, - sliceId: g.sliceId, - taskId: g.taskId ?? undefined, - outcome, - failureClass: - outcome === "fail" - ? "verification" - : outcome === "manual-attention" - ? "manual-attention" - : "none", - rationale: g.rationale, - findings: g.findings, - attempt: 1, - maxAttempts: 1, - retryable: false, - evaluatedAt: new Date().toISOString(), - }); -} -export function getPendingGates(milestoneId, sliceId, scope) { - if (!currentDb) return []; - const sql = scope - ? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope AND status = 'pending'` - : `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`; - const params = { - ":mid": milestoneId, - ":sid": sliceId, - }; - if (scope) params[":scope"] = scope; - return currentDb.prepare(sql).all(params).map(rowToGate); -} -export function getGateResults(milestoneId, sliceId, scope) { - if (!currentDb) return []; - const sql = scope - ? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope` - : `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid`; - const params = { - ":mid": milestoneId, - ":sid": sliceId, - }; - if (scope) params[":scope"] = scope; - return currentDb.prepare(sql).all(params).map(rowToGate); -} -export function markAllGatesOmitted(milestoneId, sliceId) { - if (!currentDb) return; - currentDb - .prepare(`UPDATE quality_gates SET status = 'omitted', verdict = 'omitted', evaluated_at = :now - WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`) - .run({ - ":mid": milestoneId, - ":sid": sliceId, - ":now": new Date().toISOString(), - }); -} -export function getPendingSliceGateCount(milestoneId, sliceId) { - if (!currentDb) return 0; - const row = currentDb - .prepare(`SELECT COUNT(*) as cnt FROM quality_gates - WHERE milestone_id = :mid AND slice_id = :sid AND scope = 'slice' AND status = 'pending'`) - .get({ ":mid": milestoneId, ":sid": sliceId }); - return row ? row["cnt"] : 0; -} -/** - * Return pending gate rows owned by a specific workflow turn. - * - * Unlike `getPendingGates(..., scope)`, this filters by the registry's - * `ownerTurn` metadata so callers can distinguish Q3/Q4 (owned by - * gate-evaluate) from Q8 (owned by complete-slice) even though both are - * scope:"slice". Pass `taskId` to narrow task-scoped results to one task. - */ -export function getPendingGatesForTurn(milestoneId, sliceId, turn, taskId) { - if (!currentDb) return []; - const ids = getGateIdsForTurn(turn); - if (ids.size === 0) return []; - const idList = [...ids]; - const placeholders = idList.map((_, i) => `:gid${i}`).join(","); - const params = { - ":mid": milestoneId, - ":sid": sliceId, - }; - idList.forEach((id, i) => { - params[`:gid${i}`] = id; - }); - let sql = `SELECT * FROM quality_gates - WHERE milestone_id = :mid AND slice_id = :sid - AND status = 'pending' - AND gate_id IN (${placeholders})`; - if (taskId !== undefined) { - sql += ` AND task_id = :tid`; - params[":tid"] = taskId; - } - return currentDb.prepare(sql).all(params).map(rowToGate); -} -/** - * Count pending gates for a turn. Convenience wrapper used by state - * derivation to decide whether a phase transition should pause. - */ -export function getPendingGateCountForTurn(milestoneId, sliceId, turn) { - return getPendingGatesForTurn(milestoneId, sliceId, turn).length; -} -/** @deprecated Gate runs are now written to JSONL trace files via appendTraceEvent(). This is a no-op kept for import compatibility. */ -export function insertGateRun(_entry) { - // no-op: gate runs now written to JSONL trace files -} -/** @deprecated Turn git transactions are now written to JSONL audit events. This is a no-op kept for import compatibility. */ -export function upsertTurnGitTransaction(_entry) { - // no-op: turn git transactions now written to JSONL audit events -} -export function recordUokRunStart(entry) { - if (!currentDb) return; - const now = entry.startedAt ?? new Date().toISOString(); - currentDb - .prepare(`INSERT INTO uok_runs ( - run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at - ) VALUES ( - :run_id, :session_id, :path, 'started', :started_at, NULL, NULL, :flags_json, :updated_at - ) - ON CONFLICT(run_id) DO UPDATE SET - session_id = excluded.session_id, - path = excluded.path, - status = 'started', - started_at = excluded.started_at, - ended_at = NULL, - error = NULL, - flags_json = excluded.flags_json, - updated_at = excluded.updated_at`) - .run({ - ":run_id": entry.runId, - ":session_id": entry.sessionId ?? null, - ":path": entry.path ?? "", - ":started_at": now, - ":flags_json": JSON.stringify(entry.flags ?? {}), - ":updated_at": now, - }); -} -const MAX_ERROR_STORED_BYTES = 4096; -function capErrorForStorage(error, runId) { - if (!error || error.length <= MAX_ERROR_STORED_BYTES) return error; - try { - const errDir = join(dirname(currentPath), "runtime", "errors"); - mkdirSync(errDir, { recursive: true }); - writeFileSync(join(errDir, `${runId}.txt`), error, "utf-8"); - } catch { - // non-fatal — best-effort spill - } - const head = error.slice(0, 2048); - const tail = error.slice(-2048); - const dropped = error.length - MAX_ERROR_STORED_BYTES; - return `${head}\n\n[...${dropped} chars truncated — full error in .sf/runtime/errors/${runId}.txt]\n\n${tail}`; -} -export function recordUokRunExit(entry) { - if (!currentDb) return; - const now = entry.endedAt ?? new Date().toISOString(); - currentDb - .prepare(`INSERT INTO uok_runs ( - run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at - ) VALUES ( - :run_id, :session_id, :path, :status, :started_at, :ended_at, :error, :flags_json, :updated_at - ) - ON CONFLICT(run_id) DO UPDATE SET - session_id = COALESCE(excluded.session_id, uok_runs.session_id), - path = CASE WHEN excluded.path = '' THEN uok_runs.path ELSE excluded.path END, - status = excluded.status, - ended_at = excluded.ended_at, - error = excluded.error, - flags_json = CASE WHEN excluded.flags_json = '{}' THEN uok_runs.flags_json ELSE excluded.flags_json END, - updated_at = excluded.updated_at`) - .run({ - ":run_id": entry.runId, - ":session_id": entry.sessionId ?? null, - ":path": entry.path ?? "", - ":status": entry.status ?? "ok", - ":started_at": entry.startedAt ?? now, - ":ended_at": now, - ":error": entry.error - ? capErrorForStorage(entry.error, entry.runId) - : null, - ":flags_json": JSON.stringify(entry.flags ?? {}), - ":updated_at": now, - }); -} -export function getUokRuns(limit = 500) { - if (!currentDb) return []; - return currentDb - .prepare( - `SELECT run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at - FROM uok_runs - ORDER BY started_at DESC - LIMIT :limit`, - ) - .all({ ":limit": limit }) - .map((row) => ({ - runId: row.run_id, - sessionId: row.session_id, - path: row.path, - status: row.status, - startedAt: row.started_at, - endedAt: row.ended_at, - error: row.error, - flags: (() => { - try { - return JSON.parse(row.flags_json || "{}"); - } catch { - return {}; - } - })(), - updatedAt: row.updated_at, - })); -} -/** @deprecated Audit events are now written exclusively to JSONL files via emitUokAuditEvent(). This is a no-op kept for import compatibility. */ -export function insertAuditEvent(_entry) { - // no-op: audit events now written exclusively to JSONL files -} -// ─── Single-writer bypass wrappers ─────────────────────────────────────── -// These wrappers exist so modules outside this file never need to call -// `_getAdapter()` for writes. Each one is a byte-equivalent replacement for -// a raw prepare/run previously issued from another module. Keep them -// minimal and direct — they exist to hold SQL text in one place, not to -// add new behavior. -/** Delete a decision row by id. Used by db-writer.ts rollback on disk-write failure. */ -export function deleteDecisionById(id) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb.prepare("DELETE FROM decisions WHERE id = :id").run({ ":id": id }); -} -/** Delete a requirement row by id. Used by db-writer.ts rollback on disk-write failure. */ -export function deleteRequirementById(id) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare("DELETE FROM requirements WHERE id = :id") - .run({ ":id": id }); -} -/** Delete an artifact row by path. Used by db-writer.ts rollback on disk-write failure. */ -export function deleteArtifactByPath(path) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare("DELETE FROM artifacts WHERE path = :path") - .run({ ":path": path }); -} -/** - * Drop all rows from tasks/slices/milestones in dependency order inside a - * transaction. Used by `sf recover` to rebuild engine state from markdown. - */ -export function clearEngineHierarchy() { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - transaction(() => { - currentDb.exec("DELETE FROM tasks"); - currentDb.exec("DELETE FROM slices"); - currentDb.exec("DELETE FROM milestones"); - }); -} -/** - * INSERT OR IGNORE a slice during event replay (workflow-reconcile.ts). - * Strict insert-or-ignore semantics are required here to avoid the - * `insertSlice` ON CONFLICT path that could downgrade an already-completed - * slice back to 'pending'. - */ -export function insertOrIgnoreSlice(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO slices (milestone_id, id, title, status, created_at) - VALUES (:mid, :sid, :title, 'pending', :ts)`) - .run({ - ":mid": args.milestoneId, - ":sid": args.sliceId, - ":title": args.title, - ":ts": args.createdAt, - }); -} -/** - * INSERT OR IGNORE a task during event replay (workflow-reconcile.ts). - * Same rationale as `insertOrIgnoreSlice`. - */ -export function insertOrIgnoreTask(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO tasks (milestone_id, slice_id, id, title, status, created_at) - VALUES (:mid, :sid, :tid, :title, 'pending', :ts)`) - .run({ - ":mid": args.milestoneId, - ":sid": args.sliceId, - ":tid": args.taskId, - ":title": args.title, - ":ts": args.createdAt, - }); -} -/** - * Stamp the `replan_triggered_at` column on a slice. Used by triage-resolution - * when a user capture requests a replan so the dispatcher can detect the - * trigger via DB in addition to the on-disk REPLAN-TRIGGER.md marker. - */ -export function setSliceReplanTriggeredAt(milestoneId, sliceId, ts) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - "UPDATE slices SET replan_triggered_at = :ts WHERE milestone_id = :mid AND id = :sid", - ) - .run({ ":ts": ts, ":mid": milestoneId, ":sid": sliceId }); -} -function boolToInt(value) { - if (value === null || value === undefined) return null; - return value ? 1 : 0; -} -export function insertLlmTaskOutcome(input) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - try { - currentDb - .prepare(`INSERT INTO llm_task_outcomes ( - model_id, - provider, - unit_type, - unit_id, - succeeded, - retries, - escalated, - verification_passed, - blocker_discovered, - duration_ms, - tokens_total, - cost_usd, - failure_mode, - recorded_at - ) VALUES ( - :model_id, - :provider, - :unit_type, - :unit_id, - :succeeded, - :retries, - :escalated, - :verification_passed, - :blocker_discovered, - :duration_ms, - :tokens_total, - :cost_usd, - :failure_mode, - :recorded_at - ) - ON CONFLICT(unit_type, unit_id, recorded_at) DO UPDATE SET - model_id = excluded.model_id, - provider = excluded.provider, - succeeded = excluded.succeeded, - retries = excluded.retries, - escalated = excluded.escalated, - verification_passed = excluded.verification_passed, - blocker_discovered = excluded.blocker_discovered, - duration_ms = excluded.duration_ms, - tokens_total = excluded.tokens_total, - cost_usd = excluded.cost_usd, - failure_mode = excluded.failure_mode`) - .run({ - ":model_id": input.modelId, - ":provider": input.provider, - ":unit_type": input.unitType, - ":unit_id": input.unitId, - ":succeeded": boolToInt(input.succeeded), - ":retries": input.retries ?? 0, - ":escalated": boolToInt(input.escalated ?? false), - ":verification_passed": boolToInt(input.verification_passed ?? null), - ":blocker_discovered": boolToInt(input.blocker_discovered ?? false), - ":duration_ms": input.duration_ms ?? null, - ":tokens_total": input.tokens_total ?? null, - ":cost_usd": input.cost_usd ?? null, - ":failure_mode": input.failure_mode ?? null, - ":recorded_at": input.recorded_at ?? Date.now(), - }); - return true; - } catch { - return false; - } -} - -/** - * Query LLM task outcomes for a specific unit. - * - * Purpose: enable outcome-learning and cost-guard gates to inspect - * historical performance of a unit type + id combination. - * - * Consumer: uok/outcome-learning-gate.js, uok/cost-guard-gate.js. - */ -export function getLlmTaskOutcomesByUnit(unitType, unitId, limit = 20) { - if (!currentDb) return []; - try { - return currentDb - .prepare( - `SELECT - model_id, - provider, - unit_type, - unit_id, - succeeded, - retries, - escalated, - verification_passed, - blocker_discovered, - duration_ms, - tokens_total, - cost_usd, - recorded_at - FROM llm_task_outcomes - WHERE unit_type = :unit_type - AND unit_id = :unit_id - ORDER BY recorded_at DESC - LIMIT :limit`, - ) - .all({ - ":unit_type": unitType, - ":unit_id": unitId, - ":limit": limit, - }); - } catch { - return []; - } -} -/** - * Query LLM task outcomes for a specific model. - * - * Purpose: enable cost-guard to detect models with high failure rates - * or excessive cumulative spend. - * - * Consumer: uok/cost-guard-gate.js. - */ -export function getLlmTaskOutcomesByModel(modelId, limit = 50) { - if (!currentDb) return []; - try { - return currentDb - .prepare( - `SELECT - model_id, - provider, - unit_type, - unit_id, - succeeded, - retries, - escalated, - verification_passed, - blocker_discovered, - duration_ms, - tokens_total, - cost_usd, - recorded_at - FROM llm_task_outcomes - WHERE model_id = :model_id - ORDER BY recorded_at DESC - LIMIT :limit`, - ) - .all({ - ":model_id": modelId, - ":limit": limit, - }); - } catch { - return []; - } -} -/** - * Query recent LLM task outcomes across all units. - * - * Purpose: provide a rolling window of outcomes for system-wide - * health and spend analysis. - * - * Consumer: uok/diagnostic-synthesis.js, uok/cost-guard-gate.js. - */ -export function getRecentLlmTaskOutcomes(hours = 24, limit = 100) { - if (!currentDb) return []; - const cutoff = Date.now() - hours * 60 * 60 * 1000; - try { - return currentDb - .prepare( - `SELECT - model_id, - provider, - unit_type, - unit_id, - succeeded, - retries, - escalated, - verification_passed, - blocker_discovered, - duration_ms, - tokens_total, - cost_usd, - recorded_at - FROM llm_task_outcomes - WHERE recorded_at >= :cutoff - ORDER BY recorded_at DESC - LIMIT :limit`, - ) - .all({ - ":cutoff": cutoff, - ":limit": limit, - }); - } catch { - return []; - } -} -/** - * Aggregate LLM task outcome statistics for a model over a time window. - * - * Returns { total, succeeded, failed, totalCostUsd, totalTokens, avgDurationMs }. - * - * Consumer: uok/cost-guard-gate.js, uok/outcome-learning-gate.js. - */ -export function getLlmTaskOutcomeStats(modelId, windowHours = 24) { - if (!currentDb) { - return { - total: 0, - succeeded: 0, - failed: 0, - totalCostUsd: 0, - totalTokens: 0, - avgDurationMs: 0, - }; - } - const cutoff = Date.now() - windowHours * 60 * 60 * 1000; - try { - const row = currentDb - .prepare( - `SELECT - COUNT(*) AS total, - COALESCE(SUM(CASE WHEN succeeded = 1 THEN 1 ELSE 0 END), 0) AS succeeded, - COALESCE(SUM(CASE WHEN succeeded = 0 THEN 1 ELSE 0 END), 0) AS failed, - COALESCE(SUM(cost_usd), 0) AS totalCostUsd, - COALESCE(SUM(tokens_total), 0) AS totalTokens, - COALESCE(AVG(duration_ms), 0) AS avgDurationMs - FROM llm_task_outcomes - WHERE model_id = :model_id - AND recorded_at >= :cutoff`, - ) - .get({ ":model_id": modelId, ":cutoff": cutoff }); - if (!row) { - return { - total: 0, - succeeded: 0, - failed: 0, - totalCostUsd: 0, - totalTokens: 0, - avgDurationMs: 0, - }; - } - return { - total: row.total ?? 0, - succeeded: row.succeeded ?? 0, - failed: row.failed ?? 0, - totalCostUsd: row.totalCostUsd ?? 0, - totalTokens: row.totalTokens ?? 0, - avgDurationMs: row.avgDurationMs ?? 0, - }; - } catch { - return { - total: 0, - succeeded: 0, - failed: 0, - totalCostUsd: 0, - totalTokens: 0, - avgDurationMs: 0, - }; - } -} -/** - * Aggregate gate run statistics for a specific gate over a time window. - * - * Returns { total, pass, fail, retry, manualAttention, lastEvaluatedAt }. - * - * Consumer: uok/diagnostic-synthesis.js, uok/gate-runner.js health checks. - */ -export function getGateRunStats(gateId, windowHours = 24) { - try { - const basePath = - currentPath && currentPath !== ":memory:" - ? dirname(dirname(currentPath)) - : process.cwd(); - const events = readTraceEvents(basePath, "gate_run", windowHours).filter( - (e) => e.gateId === gateId, - ); - const stats = { - total: events.length, - pass: 0, - fail: 0, - retry: 0, - manualAttention: 0, - lastEvaluatedAt: null, - }; - for (const e of events) { - if (e.outcome === "pass") stats.pass++; - else if (e.outcome === "fail") stats.fail++; - else if (e.outcome === "retry") stats.retry++; - else if (e.outcome === "manual-attention") stats.manualAttention++; - if ( - !stats.lastEvaluatedAt || - (e.evaluatedAt ?? e.ts) > stats.lastEvaluatedAt - ) - stats.lastEvaluatedAt = e.evaluatedAt ?? e.ts; - } - return stats; - } catch { - return { - total: 0, - pass: 0, - fail: 0, - retry: 0, - manualAttention: 0, - lastEvaluatedAt: null, - }; - } -} - -/** - * Read the circuit breaker state for a specific gate. - * - * Returns { gateId, state, failureStreak, lastFailureAt, openedAt, halfOpenAttempts, updatedAt }. - * If no record exists, returns a default closed state. - * - * Consumer: uok/gate-runner.js before executing a gate. - */ -export function getGateCircuitBreaker(gateId) { - if (!currentDb) { - return { - gateId, - state: "closed", - failureStreak: 0, - lastFailureAt: null, - openedAt: null, - halfOpenAttempts: 0, - updatedAt: null, - }; - } - try { - const row = currentDb - .prepare( - `SELECT gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at - FROM gate_circuit_breakers - WHERE gate_id = :gate_id`, - ) - .get({ ":gate_id": gateId }); - if (!row) { - return { - gateId, - state: "closed", - failureStreak: 0, - lastFailureAt: null, - openedAt: null, - halfOpenAttempts: 0, - updatedAt: null, - }; - } - return { - gateId: row.gate_id, - state: row.state, - failureStreak: row.failure_streak ?? 0, - lastFailureAt: row.last_failure_at ?? null, - openedAt: row.opened_at ?? null, - halfOpenAttempts: row.half_open_attempts ?? 0, - updatedAt: row.updated_at ?? null, - }; - } catch { - return { - gateId, - state: "closed", - failureStreak: 0, - lastFailureAt: null, - openedAt: null, - halfOpenAttempts: 0, - updatedAt: null, - }; - } -} -/** - * Update the circuit breaker state for a specific gate. - * - * Consumer: uok/gate-runner.js after executing a gate. - */ -export function updateGateCircuitBreaker(gateId, updates) { - if (!currentDb) return; - currentDb - .prepare( - `INSERT INTO gate_circuit_breakers ( - gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at - ) VALUES ( - :gate_id, :state, :failure_streak, :last_failure_at, :opened_at, :half_open_attempts, :updated_at - ) - ON CONFLICT(gate_id) DO UPDATE SET - state = excluded.state, - failure_streak = excluded.failure_streak, - last_failure_at = COALESCE(excluded.last_failure_at, gate_circuit_breakers.last_failure_at), - opened_at = COALESCE(excluded.opened_at, gate_circuit_breakers.opened_at), - half_open_attempts = excluded.half_open_attempts, - updated_at = excluded.updated_at`, - ) - .run({ - ":gate_id": gateId, - ":state": updates.state ?? "closed", - ":failure_streak": updates.failureStreak ?? 0, - ":last_failure_at": updates.lastFailureAt ?? null, - ":opened_at": updates.openedAt ?? null, - ":half_open_attempts": updates.halfOpenAttempts ?? 0, - ":updated_at": new Date().toISOString(), - }); - return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 }; -} -export function getGateLatencyStats(gateId, windowHours = 24) { - try { - const basePath = - currentPath && currentPath !== ":memory:" - ? dirname(dirname(currentPath)) - : process.cwd(); - const durations = readTraceEvents(basePath, "gate_run", windowHours) - .filter((e) => e.gateId === gateId && typeof e.durationMs === "number") - .map((e) => e.durationMs) - .sort((a, b) => a - b); - if (durations.length === 0) - return { - p50: null, - p95: null, - count: 0, - total: 0, - avgMs: 0, - p50Ms: 0, - p95Ms: 0, - maxMs: 0, - }; - const p50Ms = durations[Math.floor(durations.length * 0.5)] ?? 0; - const p95Ms = durations[Math.floor(durations.length * 0.95)] ?? 0; - const maxMs = durations[durations.length - 1] ?? 0; - const avgMs = Math.round( - durations.reduce((s, v) => s + v, 0) / durations.length, - ); - return { - p50: p50Ms, - p95: p95Ms, - count: durations.length, - total: durations.length, - avgMs, - p50Ms, - p95Ms, - maxMs, - }; - } catch { - return { - p50: null, - p95: null, - count: 0, - total: 0, - avgMs: 0, - p50Ms: 0, - p95Ms: 0, - maxMs: 0, - }; - } -} -export function getDistinctGateIds() { - try { - const basePath = - currentPath && currentPath !== ":memory:" - ? dirname(dirname(currentPath)) - : process.cwd(); - const events = readTraceEvents(basePath, "gate_run", 24 * 30); // 30 days - return [...new Set(events.map((e) => e.gateId).filter(Boolean))]; - } catch { - return []; - } -} -export function insertUokMessage(msg) { - if (!currentDb) return; - currentDb - .prepare( - `INSERT OR IGNORE INTO uok_messages (id, from_agent, to_agent, body, metadata_json, sent_at, delivered_at) - VALUES (:id, :from_agent, :to_agent, :body, :metadata_json, :sent_at, :delivered_at)`, - ) - .run({ - ":id": msg.id, - ":from_agent": msg.from, - ":to_agent": msg.to, - ":body": msg.body ?? "", - ":metadata_json": JSON.stringify(msg.metadata ?? {}), - ":sent_at": msg.sentAt, - ":delivered_at": msg.deliveredAt ?? null, - }); -} -export function getUokMessagesForAgent( - agentId, - limit = 1000, - unreadOnly = false, -) { - if (!currentDb) return []; - try { - let sql = `SELECT m.id, m.from_agent AS "from", m.to_agent AS "to", m.body, m.metadata_json AS metadataJson, m.sent_at AS sentAt, m.delivered_at AS deliveredAt, - CASE WHEN r.agent_id IS NOT NULL THEN 1 ELSE 0 END AS read - FROM uok_messages m - LEFT JOIN uok_message_reads r ON r.message_id = m.id AND r.agent_id = :agent_id - WHERE m.to_agent = :agent_id`; - if (unreadOnly) { - sql += " AND r.agent_id IS NULL"; - } - sql += " ORDER BY m.sent_at ASC LIMIT :limit"; - const rows = currentDb.prepare(sql).all({ - ":agent_id": agentId, - ":limit": Math.max(1, Math.min(10_000, Number(limit) || 1000)), - }); - return rows.map((r) => ({ - id: r.id, - from: r.from, - to: r.to, - body: r.body, - metadata: parseJsonObject(r.metadataJson, {}), - sentAt: r.sentAt, - deliveredAt: r.deliveredAt, - read: !!r.read, - })); - } catch { - return []; - } -} -export function getUokConversation(agentA, agentB, limit = 1000) { - if (!currentDb) return []; - try { - const rows = currentDb - .prepare( - `SELECT id, from_agent AS "from", to_agent AS "to", body, metadata_json AS metadataJson, sent_at AS sentAt, delivered_at AS deliveredAt - FROM uok_messages - WHERE (from_agent = :a AND to_agent = :b) OR (from_agent = :b AND to_agent = :a) - ORDER BY sent_at DESC - LIMIT :limit`, - ) - .all({ ":a": agentA, ":b": agentB, ":limit": limit }); - return rows.map((r) => ({ - id: r.id, - from: r.from, - to: r.to, - body: r.body, - metadata: parseJsonObject(r.metadataJson, {}), - sentAt: r.sentAt, - deliveredAt: r.deliveredAt, - })); - } catch { - return []; - } -} -export function markUokMessageRead(messageId, agentId) { - if (!currentDb) return false; - try { - currentDb - .prepare( - `INSERT OR IGNORE INTO uok_message_reads (message_id, agent_id, read_at) VALUES (:message_id, :agent_id, :read_at)`, - ) - .run({ - ":message_id": messageId, - ":agent_id": agentId, - ":read_at": new Date().toISOString(), - }); - return true; - } catch { - return false; - } -} -export function getUokMessageUnreadCount(agentId) { - if (!currentDb) return 0; - try { - const row = currentDb - .prepare( - `SELECT COUNT(*) AS cnt FROM uok_messages m - WHERE m.to_agent = :agent_id - AND NOT EXISTS ( - SELECT 1 FROM uok_message_reads r - WHERE r.message_id = m.id AND r.agent_id = :agent_id - )`, - ) - .get({ ":agent_id": agentId }); - return row?.cnt ?? 0; - } catch { - return 0; - } -} -export function compactUokMessages(retentionDays) { - if (!currentDb) return { before: 0, after: 0 }; - try { - const cutoff = new Date( - Date.now() - retentionDays * 24 * 60 * 60 * 1000, - ).toISOString(); - const beforeRow = currentDb - .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") - .get(); - currentDb - .prepare("DELETE FROM uok_messages WHERE sent_at < :cutoff") - .run({ ":cutoff": cutoff }); - const afterRow = currentDb - .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") - .get(); - return { before: beforeRow?.cnt ?? 0, after: afterRow?.cnt ?? 0 }; - } catch { - return { before: 0, after: 0 }; - } -} -export function getUokMessageReadIds(agentId) { - if (!currentDb) return []; - try { - const rows = currentDb - .prepare( - "SELECT message_id FROM uok_message_reads WHERE agent_id = :agent_id", - ) - .all({ ":agent_id": agentId }); - return rows.map((r) => r.message_id); - } catch { - return []; - } -} -export function getUokMessageBusMetrics() { - if (!currentDb) { - return { - totalMessages: 0, - totalUnread: 0, - uniqueAgents: 0, - uniqueConversations: 0, - }; - } - try { - const totalRow = currentDb - .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") - .get(); - const unreadRow = currentDb - .prepare( - `SELECT COUNT(*) AS cnt FROM uok_messages m - WHERE NOT EXISTS ( - SELECT 1 FROM uok_message_reads r - WHERE r.message_id = m.id - AND r.agent_id = m.to_agent - )`, - ) - .get(); - const agentsRow = currentDb - .prepare(`SELECT COUNT(DISTINCT to_agent) AS cnt FROM uok_messages`) - .get(); - const convRow = currentDb - .prepare( - `SELECT COUNT(DISTINCT from_agent || ':' || to_agent) AS cnt FROM uok_messages`, - ) - .get(); - return { - totalMessages: totalRow?.cnt ?? 0, - totalUnread: unreadRow?.cnt ?? 0, - uniqueAgents: agentsRow?.cnt ?? 0, - uniqueConversations: convRow?.cnt ?? 0, - }; - } catch { - return { - totalMessages: 0, - totalUnread: 0, - uniqueAgents: 0, - uniqueConversations: 0, - }; - } -} -function normalizeScheduleScope(scope) { - return scope === "global" ? "global" : "project"; -} -function scheduleEntryFromRow(row) { - if (!row) return null; - const full = parseJsonObject(row.full_json, {}); - return { - ...full, - schemaVersion: row.schema_version ?? full.schemaVersion ?? 1, - id: row.id, - kind: row.kind, - status: row.status, - due_at: row.due_at, - created_at: row.created_at, - snoozed_at: row.snoozed_at ?? full.snoozed_at, - payload: parseJsonObject(row.payload_json, full.payload ?? {}), - created_by: row.created_by, - autonomous_dispatch: !!row.autonomous_dispatch, - }; -} -/** - * Append a schedule entry to the DB-backed schedule ledger. - * - * Purpose: keep time-bound reminders in structured SQLite state so status, - * due-date, and scope queries are schema-owned instead of JSONL-owned. - * - * Consumer: schedule-store.js for /schedule and launch/auto due-item checks. - */ -export function insertScheduleEntry(scope, entry, importedFrom = null) { - if (!currentDb) return; - const normalizedScope = normalizeScheduleScope(scope); - const schemaVersion = entry.schemaVersion ?? 1; - const full = { schemaVersion, ...entry }; - currentDb - .prepare( - `INSERT INTO schedule_entries ( - scope, id, schema_version, kind, status, due_at, created_at, - snoozed_at, payload_json, created_by, autonomous_dispatch, full_json, - imported_from - ) VALUES ( - :scope, :id, :schema_version, :kind, :status, :due_at, :created_at, - :snoozed_at, :payload_json, :created_by, :autonomous_dispatch, :full_json, - :imported_from - )`, - ) - .run({ - ":scope": normalizedScope, - ":id": entry.id, - ":schema_version": schemaVersion, - ":kind": entry.kind ?? "reminder", - ":status": entry.status ?? "pending", - ":due_at": entry.due_at ?? "", - ":created_at": entry.created_at ?? "", - ":snoozed_at": entry.snoozed_at ?? null, - ":payload_json": JSON.stringify(entry.payload ?? {}), - ":created_by": entry.created_by ?? "user", - ":autonomous_dispatch": entry.autonomous_dispatch ? 1 : 0, - ":full_json": JSON.stringify(full), - ":imported_from": importedFrom, - }); -} -/** - * Return latest schedule entries per id for a scope. - * - * Purpose: preserve append-ledger semantics while serving queries from SQLite. - * - * Consumer: schedule-store.js readEntries/findDue/findUpcoming. - */ -export function getScheduleEntries(scope) { - if (!currentDb) return []; - const normalizedScope = normalizeScheduleScope(scope); - try { - const rows = currentDb - .prepare( - `SELECT s.* - FROM schedule_entries s - JOIN ( - SELECT id, MAX(seq) AS max_seq - FROM schedule_entries - WHERE scope = :scope - GROUP BY id - ) latest ON latest.id = s.id AND latest.max_seq = s.seq - WHERE s.scope = :scope - ORDER BY s.due_at ASC, s.created_at ASC, s.seq ASC`, - ) - .all({ ":scope": normalizedScope }); - return rows.map(scheduleEntryFromRow).filter(Boolean); - } catch { - return []; - } -} -export function countScheduleEntries(scope) { - if (!currentDb) return 0; - const normalizedScope = normalizeScheduleScope(scope); - try { - const row = currentDb - .prepare( - "SELECT COUNT(*) AS cnt FROM schedule_entries WHERE scope = :scope", - ) - .get({ ":scope": normalizedScope }); - return row?.cnt ?? 0; - } catch { - return 0; - } -} -function asStringOrNull(value) { - return typeof value === "string" && value.length > 0 ? value : null; -} -/** - * Persist a repository profile snapshot and update current file observations. - * - * Purpose: make harness evolution's read-only repo facts queryable across - * sessions while preserving first-seen timestamps for untracked observations. - * - * Consumer: `/harness profile` and future pre-plan profile snapshots. - */ -export function recordRepoProfile(profile) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - transaction(() => { - currentDb - .prepare(`INSERT OR REPLACE INTO repo_profiles ( - profile_id, project_hash, project_root, head, branch, remote_hash, - dirty, profile_json, created_at - ) VALUES ( - :profile_id, :project_hash, :project_root, :head, :branch, :remote_hash, - :dirty, :profile_json, :created_at - )`) - .run({ - ":profile_id": profile.profileId, - ":project_hash": profile.projectHash, - ":project_root": profile.projectRoot, - ":head": profile.git.head, - ":branch": profile.git.branch, - ":remote_hash": profile.git.remoteHash, - ":dirty": profile.git.dirty ? 1 : 0, - ":profile_json": JSON.stringify(profile), - ":created_at": profile.createdAt, - }); - const stmt = currentDb.prepare(`INSERT INTO repo_file_observations ( - path, latest_profile_id, git_status, ownership, language, size_bytes, - content_hash, summary, first_seen_at, last_seen_at, adopted_at, - adoption_unit_id - ) VALUES ( - :path, :latest_profile_id, :git_status, :ownership, :language, :size_bytes, - :content_hash, :summary, :first_seen_at, :last_seen_at, :adopted_at, - :adoption_unit_id - ) - ON CONFLICT(path) DO UPDATE SET - latest_profile_id = excluded.latest_profile_id, - git_status = excluded.git_status, - ownership = CASE - WHEN repo_file_observations.ownership = 'sf_generated' - THEN repo_file_observations.ownership - WHEN repo_file_observations.ownership = 'candidate_harness' - THEN repo_file_observations.ownership - ELSE excluded.ownership - END, - language = excluded.language, - size_bytes = excluded.size_bytes, - content_hash = excluded.content_hash, - summary = excluded.summary, - first_seen_at = repo_file_observations.first_seen_at, - last_seen_at = excluded.last_seen_at, - adopted_at = COALESCE(repo_file_observations.adopted_at, excluded.adopted_at), - adoption_unit_id = COALESCE(repo_file_observations.adoption_unit_id, excluded.adoption_unit_id)`); - for (const file of profile.git.changedFiles) { - stmt.run({ - ":path": file.path, - ":latest_profile_id": profile.profileId, - ":git_status": file.gitStatus, - ":ownership": file.ownership, - ":language": file.language, - ":size_bytes": file.sizeBytes, - ":content_hash": file.contentHash, - ":summary": file.summary, - ":first_seen_at": file.firstSeenAt, - ":last_seen_at": file.lastSeenAt, - ":adopted_at": file.adoptedAt, - ":adoption_unit_id": file.adoptionUnitId, - }); - } - }); -} -/** - * Return the most recently recorded repository profile. - * - * Purpose: let harness planning and diagnostics inspect the latest factual - * repo snapshot without re-running the profiler. - * - * Consumer: harness status commands and future plan-phase coverage checks. - */ -export function getLatestRepoProfile() { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const row = currentDb - .prepare(`SELECT profile_id, project_hash, project_root, head, branch, remote_hash, - dirty, profile_json, created_at - FROM repo_profiles - ORDER BY created_at DESC, profile_id DESC - LIMIT 1`) - .get(); - if (!row) return null; - return { - profileId: row["profile_id"], - projectHash: row["project_hash"], - projectRoot: row["project_root"], - head: asStringOrNull(row["head"]), - branch: asStringOrNull(row["branch"]), - remoteHash: asStringOrNull(row["remote_hash"]), - dirty: row["dirty"] === 1, - profileJson: row["profile_json"] ?? "{}", - createdAt: row["created_at"], - }; -} -/** - * Return the current file observations accumulated by repo profiling. - * - * Purpose: keep untracked and modified file awareness queryable without - * treating those paths as SF-owned artifacts. - * - * Consumer: harness planning, diagnostics, and future drift detection. - */ -export function getRepoFileObservations() { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - return currentDb - .prepare(`SELECT path, latest_profile_id, git_status, ownership, language, - size_bytes, content_hash, summary, first_seen_at, last_seen_at, - adopted_at, adoption_unit_id - FROM repo_file_observations - ORDER BY path ASC`) - .all() - .map((row) => ({ - path: row["path"], - latestProfileId: row["latest_profile_id"], - gitStatus: row["git_status"], - ownership: row["ownership"], - language: asStringOrNull(row["language"]), - sizeBytes: row["size_bytes"] ?? 0, - contentHash: asStringOrNull(row["content_hash"]), - summary: asStringOrNull(row["summary"]), - firstSeenAt: row["first_seen_at"], - lastSeenAt: row["last_seen_at"], - adoptedAt: asStringOrNull(row["adopted_at"]), - adoptionUnitId: asStringOrNull(row["adoption_unit_id"]), - })); -} -function intBool(value) { - return value ? 1 : 0; -} -function parseJsonObject(raw, fallback = {}) { - try { - return JSON.parse(raw); - } catch { - return fallback; - } -} -function solverEvalRunFromRow(row) { - return { - runId: row["run_id"], - suiteSource: row["suite_source"], - casesCount: row["cases_count"] ?? 0, - summary: parseJsonObject(row["summary_json"], {}), - reportPath: row["report_path"], - resultsPath: row["results_path"], - dbRecorded: row["db_recorded"] === 1, - createdAt: row["created_at"], - updatedAt: row["updated_at"], - }; -} -function solverEvalCaseFromRow(row) { - return { - runId: row["run_id"], - caseId: row["case_id"], - title: row["title"], - mode: row["mode"], - passed: row["passed"] === 1, - falseComplete: row["false_complete"] === 1, - durationMs: row["duration_ms"], - commandStatus: row["command_status"], - solverOutcome: asStringOrNull(row["solver_outcome"]), - pddComplete: - row["pdd_complete"] === null || row["pdd_complete"] === undefined - ? null - : row["pdd_complete"] === 1, - result: parseJsonObject(row["result_json"], {}), - createdAt: row["created_at"], - }; -} -function headlessRunFromRow(row) { - return { - runId: row["run_id"], - command: row["command"], - status: row["status"], - exitCode: row["exit_code"], - timedOut: row["timed_out"] === 1, - interrupted: row["interrupted"] === 1, - restartCount: row["restart_count"] ?? 0, - maxRestarts: row["max_restarts"] ?? 0, - durationMs: row["duration_ms"] ?? 0, - totalEvents: row["total_events"] ?? 0, - toolCalls: row["tool_calls"] ?? 0, - solverEvalRunId: asStringOrNull(row["solver_eval_run_id"]), - solverEvalReportPath: asStringOrNull(row["solver_eval_report_path"]), - details: parseJsonObject(row["details_json"], {}), - createdAt: row["created_at"], - updatedAt: row["updated_at"], - }; -} -/** - * Persist an autonomous solver eval run and its per-mode case results. - * - * Purpose: make solver-loop benchmark evidence queryable by SF commands, - * harness flows, UOK, and future memory retention instead of treating ignored - * `.sf/evals` JSON/JSONL evidence files as project state. - * - * Consumer: `/solver-eval` after each run completes. - */ -export function recordSolverEvalRun(report) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const now = new Date().toISOString(); - transaction(() => { - currentDb - .prepare(`INSERT INTO solver_eval_runs ( - run_id, suite_source, cases_count, summary_json, report_path, - results_path, db_recorded, created_at, updated_at - ) VALUES ( - :run_id, :suite_source, :cases_count, :summary_json, :report_path, - :results_path, 1, :created_at, :updated_at - ) - ON CONFLICT(run_id) DO UPDATE SET - suite_source = excluded.suite_source, - cases_count = excluded.cases_count, - summary_json = excluded.summary_json, - report_path = excluded.report_path, - results_path = excluded.results_path, - db_recorded = 1, - updated_at = excluded.updated_at`) - .run({ - ":run_id": report.runId, - ":suite_source": report.suiteSource ?? "", - ":cases_count": report.summary?.cases ?? report.results?.length ?? 0, - ":summary_json": JSON.stringify(report.summary ?? {}), - ":report_path": report.reportPath ?? "", - ":results_path": report.resultsPath ?? "", - ":created_at": report.createdAt ?? now, - ":updated_at": now, - }); - const stmt = currentDb.prepare(`INSERT INTO solver_eval_case_results ( - run_id, case_id, title, mode, passed, false_complete, duration_ms, - command_status, solver_outcome, pdd_complete, result_json, created_at - ) VALUES ( - :run_id, :case_id, :title, :mode, :passed, :false_complete, :duration_ms, - :command_status, :solver_outcome, :pdd_complete, :result_json, :created_at - ) - ON CONFLICT(run_id, case_id, mode) DO UPDATE SET - title = excluded.title, - passed = excluded.passed, - false_complete = excluded.false_complete, - duration_ms = excluded.duration_ms, - command_status = excluded.command_status, - solver_outcome = excluded.solver_outcome, - pdd_complete = excluded.pdd_complete, - result_json = excluded.result_json, - created_at = excluded.created_at`); - for (const result of report.results ?? []) { - stmt.run({ - ":run_id": report.runId, - ":case_id": result.caseId, - ":title": result.title ?? "", - ":mode": result.mode, - ":passed": intBool(result.passed), - ":false_complete": intBool(result.falseComplete), - ":duration_ms": result.command?.durationMs ?? null, - ":command_status": result.command?.status ?? null, - ":solver_outcome": result.solverSignals?.outcome ?? null, - ":pdd_complete": - result.solverSignals?.pddComplete === undefined - ? null - : intBool(result.solverSignals.pddComplete), - ":result_json": JSON.stringify(result), - ":created_at": report.createdAt ?? now, - }); - } - }); -} -/** - * List recent autonomous solver eval runs. - * - * Purpose: let operators inspect benchmark history without scraping generated - * report files. - * - * Consumer: `/solver-eval history`. - */ -export function listSolverEvalRuns(limit = 10) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - return currentDb - .prepare(`SELECT run_id, suite_source, cases_count, summary_json, - report_path, results_path, db_recorded, created_at, updated_at - FROM solver_eval_runs - ORDER BY created_at DESC, run_id DESC - LIMIT :limit`) - .all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 10)) }) - .map(solverEvalRunFromRow); -} -/** - * Read one autonomous solver eval run by id. - * - * Purpose: support `/solver-eval show ` and future evidence - * promotion without parsing JSON artifacts. - * - * Consumer: solver eval command handlers. - */ -export function getSolverEvalRun(runId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const row = currentDb - .prepare(`SELECT run_id, suite_source, cases_count, summary_json, - report_path, results_path, db_recorded, created_at, updated_at - FROM solver_eval_runs - WHERE run_id = :run_id`) - .get({ ":run_id": runId }); - return row ? solverEvalRunFromRow(row) : null; -} -/** - * Read per-case results for one autonomous solver eval run. - * - * Purpose: show raw-vs-SF comparisons from DB evidence. - * - * Consumer: `/solver-eval show `. - */ -export function getSolverEvalCaseResults(runId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - return currentDb - .prepare(`SELECT run_id, case_id, title, mode, passed, false_complete, - duration_ms, command_status, solver_outcome, pdd_complete, - result_json, created_at - FROM solver_eval_case_results - WHERE run_id = :run_id - ORDER BY case_id ASC, mode ASC`) - .all({ ":run_id": runId }) - .map(solverEvalCaseFromRow); -} -/** - * Persist one headless session outcome. - * - * Purpose: make headless lifecycle evidence queryable from `sf.db` so timeout, - * restart, and operator-bounded run behavior does not live only in stderr or - * generated JSON artifacts. - * - * Consumer: headless.ts after every session exits. - */ -export function recordHeadlessRun(entry) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const now = new Date().toISOString(); - currentDb - .prepare(`INSERT INTO headless_runs ( - run_id, command, status, exit_code, timed_out, interrupted, - restart_count, max_restarts, duration_ms, total_events, tool_calls, - solver_eval_run_id, solver_eval_report_path, details_json, - created_at, updated_at - ) VALUES ( - :run_id, :command, :status, :exit_code, :timed_out, :interrupted, - :restart_count, :max_restarts, :duration_ms, :total_events, :tool_calls, - :solver_eval_run_id, :solver_eval_report_path, :details_json, - :created_at, :updated_at - ) - ON CONFLICT(run_id) DO UPDATE SET - command = excluded.command, - status = excluded.status, - exit_code = excluded.exit_code, - timed_out = excluded.timed_out, - interrupted = excluded.interrupted, - restart_count = excluded.restart_count, - max_restarts = excluded.max_restarts, - duration_ms = excluded.duration_ms, - total_events = excluded.total_events, - tool_calls = excluded.tool_calls, - solver_eval_run_id = excluded.solver_eval_run_id, - solver_eval_report_path = excluded.solver_eval_report_path, - details_json = excluded.details_json, - updated_at = excluded.updated_at`) - .run({ - ":run_id": entry.runId, - ":command": entry.command ?? "", - ":status": entry.status ?? "", - ":exit_code": Number(entry.exitCode ?? 0), - ":timed_out": intBool(entry.timedOut), - ":interrupted": intBool(entry.interrupted), - ":restart_count": Number(entry.restartCount ?? 0), - ":max_restarts": Number(entry.maxRestarts ?? 0), - ":duration_ms": Number(entry.durationMs ?? 0), - ":total_events": Number(entry.totalEvents ?? 0), - ":tool_calls": Number(entry.toolCalls ?? 0), - ":solver_eval_run_id": entry.solverEvalRunId ?? null, - ":solver_eval_report_path": entry.solverEvalReportPath ?? null, - ":details_json": JSON.stringify(entry.details ?? {}), - ":created_at": entry.createdAt ?? now, - ":updated_at": now, - }); -} -/** - * List recent headless session outcomes. - * - * Purpose: support status/doctor/query surfaces that need durable headless - * lifecycle evidence without parsing stderr logs. - * - * Consumer: tests now; headless query and doctor follow-on surfaces later. - */ -export function listHeadlessRuns(limit = 20) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - return currentDb - .prepare(`SELECT run_id, command, status, exit_code, timed_out, - interrupted, restart_count, max_restarts, duration_ms, - total_events, tool_calls, solver_eval_run_id, - solver_eval_report_path, details_json, created_at, updated_at - FROM headless_runs - ORDER BY created_at DESC, run_id DESC - LIMIT :limit`) - .all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 20)) }) - .map(headlessRunFromRow); -} -/** - * Upsert a session row. Creates on first call; updates updated_at, branch, - * repo, and summary on subsequent calls. Safe to call on every session_start - * and again when context becomes available (e.g. after git detection). - * - * Purpose: establish the session entity that all turns, file-touches, and - * refs hang off — the missing structural layer for cross-session learning. - * - * Consumer: session-recorder.js on session_start and session_shutdown hooks. - */ -export function upsertSession(entry) { - if (!currentDb) return; - const now = new Date().toISOString(); - currentDb - .prepare(`INSERT INTO sessions - (session_id, trace_id, mode, cwd, repo, branch, summary, summary_count, created_at, updated_at) - VALUES (:session_id, :trace_id, :mode, :cwd, :repo, :branch, :summary, 0, :now, :now) - ON CONFLICT(session_id) DO UPDATE SET - trace_id = COALESCE(excluded.trace_id, sessions.trace_id), - repo = COALESCE(excluded.repo, sessions.repo), - branch = COALESCE(excluded.branch, sessions.branch), - summary = COALESCE(excluded.summary, sessions.summary), - summary_count = CASE WHEN excluded.summary IS NOT NULL - THEN sessions.summary_count + 1 - ELSE sessions.summary_count END, - updated_at = excluded.updated_at`) - .run({ - ":session_id": entry.sessionId, - ":trace_id": entry.traceId ?? null, - ":mode": entry.mode ?? "interactive", - ":cwd": entry.cwd ?? "", - ":repo": entry.repo ?? null, - ":branch": entry.branch ?? null, - ":summary": entry.summary ?? null, - ":now": now, - }); -} - -/** - * Mark a session as archived. Archived sessions are hidden from default - * session listings but retained for search and audit. - * - * Purpose: soft-delete sessions without losing their turn history or refs. - * Consumer: /sf sessions --archive , autonomous cleanup. - */ -export function archiveSession(sessionId) { - if (!currentDb) return; - currentDb - .prepare( - `UPDATE sessions SET archived_at = :now, updated_at = :now WHERE session_id = :session_id`, - ) - .run({ ":session_id": sessionId, ":now": new Date().toISOString() }); -} - -/** - * Restore an archived session to active status. - * - * Purpose: undo an accidental archive without data loss. - * Consumer: /sf sessions --unarchive . - */ -export function unarchiveSession(sessionId) { - if (!currentDb) return; - currentDb - .prepare( - `UPDATE sessions SET archived_at = NULL, updated_at = :now WHERE session_id = :session_id`, - ) - .run({ ":session_id": sessionId, ":now": new Date().toISOString() }); -} - -/** - * Insert a turn row for a session. Returns the new turn's integer id so the - * caller can link subsequent file-touches and refs to it. - * - * Purpose: record every user↔assistant exchange so turn text is searchable - * via turns_fts and promotable into the memory pipeline. - * - * Consumer: session-recorder.js on before_agent_start (user_message) and - * agent_end (assistant_response patch). - */ -export function insertSessionTurn(entry) { - if (!currentDb) return null; - const result = currentDb - .prepare(`INSERT INTO turns - (session_id, turn_index, user_message, assistant_response, ts) - VALUES (:session_id, :turn_index, :user_message, :assistant_response, :ts) - ON CONFLICT(session_id, turn_index) DO UPDATE SET - user_message = COALESCE(excluded.user_message, turns.user_message), - assistant_response = COALESCE(excluded.assistant_response, turns.assistant_response)`) - .run({ - ":session_id": entry.sessionId, - ":turn_index": entry.turnIndex, - ":user_message": entry.userMessage ?? null, - ":assistant_response": entry.assistantResponse ?? null, - ":ts": entry.ts ?? new Date().toISOString(), - }); - return result.lastInsertRowid ?? null; -} -/** - * Patch the assistant_response on an existing turn row. Called from agent_end - * after the model finishes so the full response is stored alongside the prompt. - * - * Purpose: complete the turn record so both halves of the exchange are - * searchable and promotable as a unit. - * - * Consumer: session-recorder.js on agent_end. - */ -export function patchTurnResponse(sessionId, turnIndex, assistantResponse) { - if (!currentDb) return; - currentDb - .prepare(`UPDATE turns SET assistant_response = :resp - WHERE session_id = :sid AND turn_index = :idx AND assistant_response IS NULL`) - .run({ - ":resp": assistantResponse, - ":sid": sessionId, - ":idx": turnIndex, - }); -} -/** - * Record that a file path was touched in a session. UNIQUE(session_id, path) - * means repeated touches in one session are collapsed to a single row — - * only first_seen_at and tool_name (of the first touch) are retained. - * - * Purpose: enable "which files did I touch last session?" and cross-session - * file-history queries without storing a full audit log per touch. - * - * Consumer: session-recorder.js on tool_call for write-class tools. - */ -export function recordSessionFileTouch(entry) { - if (!currentDb) return; - currentDb - .prepare(`INSERT OR IGNORE INTO session_file_touches - (session_id, path, tool_name, turn_id, first_seen_at) - VALUES (:session_id, :path, :tool_name, :turn_id, :first_seen_at)`) - .run({ - ":session_id": entry.sessionId, - ":path": entry.path, - ":tool_name": entry.toolName ?? null, - ":turn_id": entry.turnId ?? null, - ":first_seen_at": entry.firstSeenAt ?? new Date().toISOString(), - }); -} -/** - * Record a PR / issue / commit / branch ref mentioned in a session. Idempotent - * via UNIQUE(session_id, ref_type, ref_value). - * - * Purpose: make sessions queryable by the work items they touched so - * "what session created PR #42?" is a single indexed lookup. - * - * Consumer: session-recorder.js when refs are detected in turn text. - */ -export function recordSessionRef(entry) { - if (!currentDb) return; - currentDb - .prepare(`INSERT OR IGNORE INTO session_refs - (session_id, ref_type, ref_value, turn_id, created_at) - VALUES (:session_id, :ref_type, :ref_value, :turn_id, :created_at)`) - .run({ - ":session_id": entry.sessionId, - ":ref_type": entry.refType, - ":ref_value": entry.refValue, - ":turn_id": entry.turnId ?? null, - ":created_at": entry.createdAt ?? new Date().toISOString(), - }); -} -/** - * Full-text search across turns via the FTS5 turns_fts virtual table. - * Returns matching turns with their session metadata ordered by relevance. - * - * Purpose: power cross-session keyword recall — "what did I ask about auth?", - * "find sessions where I worked on retry handling". - * - * Consumer: sf memory search, context-injection, and /session search command. - */ -export function searchSessionTurns(query, limit = 20) { - if (!currentDb) return []; - return currentDb - .prepare(`SELECT t.id, t.session_id, t.turn_index, t.ts, - t.user_message, t.assistant_response, - s.mode, s.cwd, s.repo, s.branch - FROM turns_fts - JOIN turns t ON turns_fts.rowid = t.id - JOIN sessions s ON t.session_id = s.session_id - WHERE turns_fts MATCH :query - ORDER BY rank - LIMIT :limit`) - .all({ ":query": query, ":limit": Math.max(1, Math.min(100, limit)) }); -} -/** - * List recent sessions with their turn count and last-touched file count. - * Useful for /session list and for memory-pipeline ingestion sweeps. - * - * Consumer: trajectory-command, memory-ingest, doctor checks. - */ -export function listRecentSessions(limit = 20) { - if (!currentDb) return []; - return currentDb - .prepare(`SELECT s.session_id, s.mode, s.cwd, s.repo, s.branch, - s.summary, s.created_at, s.updated_at, - COUNT(DISTINCT t.id) AS turn_count, - COUNT(DISTINCT f.id) AS file_count - FROM sessions s - LEFT JOIN turns t ON t.session_id = s.session_id - LEFT JOIN session_file_touches f ON f.session_id = s.session_id - GROUP BY s.session_id - ORDER BY s.updated_at DESC - LIMIT :limit`) - .all({ ":limit": Math.max(1, Math.min(100, limit)) }); -} -/** - * Record a snapshot checkpoint before an irreversible operation. Idempotent - * within a session: the snapshot_index is auto-incremented from the current - * max so callers can create multiple checkpoints per session without - * coordination. - * - * Purpose: give session_snapshots a first-class row so recovery paths and - * irreversible-ops gates can reference the stash ref and label without - * parsing free-text. - * - * Consumer: irreversible-ops safety gate (session_before_compact, future - * verify steps that call git stash before destructive actions). - * - * @param {{ sessionId: string, gitStashRef?: string|null, label?: string|null, ts?: string }} args - * @returns {number} The row id of the inserted snapshot (or 0 on failure). - */ -export function insertSessionSnapshot(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const nextIndex = (() => { - const row = currentDb - .prepare( - "SELECT COALESCE(MAX(snapshot_index), -1) + 1 AS nxt FROM session_snapshots WHERE session_id = :sid", - ) - .get({ ":sid": args.sessionId }); - return row ? Number(row["nxt"]) : 0; - })(); - currentDb - .prepare(`INSERT INTO session_snapshots - (session_id, snapshot_index, git_stash_ref, label, ts) - VALUES (:sid, :idx, :ref, :label, :ts)`) - .run({ - ":sid": args.sessionId, - ":idx": nextIndex, - ":ref": args.gitStashRef ?? null, - ":label": args.label ?? null, - ":ts": args.ts ?? new Date().toISOString(), - }); - const row = currentDb - .prepare( - "SELECT id FROM session_snapshots WHERE session_id = :sid AND snapshot_index = :idx", - ) - .get({ ":sid": args.sessionId, ":idx": nextIndex }); - return row ? Number(row["id"]) : 0; -} -/** - * List all snapshots for a session, ordered by snapshot_index ascending. - * - * Purpose: let recovery tooling enumerate available restore points for a - * session and present them to the operator before a rollback. - * - * Consumer: future /session snapshots command and irreversible-ops skill. - * - * @param {string} sessionId - * @returns {Array<{id:number, session_id:string, snapshot_index:number, git_stash_ref:string|null, label:string|null, ts:string}>} - */ -export function listSessionSnapshots(sessionId) { - if (!currentDb) return []; - return currentDb - .prepare( - "SELECT * FROM session_snapshots WHERE session_id = :sid ORDER BY snapshot_index ASC", - ) - .all({ ":sid": sessionId }); -} - -/** - * INSERT OR REPLACE a quality_gates row. Used by milestone-validation-gates.ts - * to persist milestone-level (MV*) gate outcomes after validate-milestone runs. - */ -export function upsertQualityGate(g) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR REPLACE INTO quality_gates - (milestone_id, slice_id, gate_id, scope, task_id, status, verdict, rationale, findings, evaluated_at) - VALUES (:mid, :sid, :gid, :scope, :tid, :status, :verdict, :rationale, :findings, :evaluated_at)`) - .run({ - ":mid": g.milestoneId, - ":sid": g.sliceId, - ":gid": g.gateId, - ":scope": g.scope, - ":tid": g.taskId, - ":status": g.status, - ":verdict": g.verdict, - ":rationale": g.rationale, - ":findings": g.findings, - ":evaluated_at": g.evaluatedAt, - }); -} -/** - * Atomically replace all workflow state from a manifest. Lifted verbatim from - * workflow-manifest.ts so the single-writer invariant holds. Only touches - * engine tables + decisions. Does NOT modify artifacts or memories. - */ -export function restoreManifest(manifest) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const db = currentDb; - transaction(() => { - // Clear engine tables (order matters for foreign-key-like consistency) - db.exec("DELETE FROM verification_evidence"); - db.exec("DELETE FROM tasks"); - db.exec("DELETE FROM slices"); - db.exec("DELETE FROM milestones"); - db.exec("DELETE FROM decisions WHERE 1=1"); - // Restore milestones - const msStmt = - db.prepare(`INSERT INTO milestones (id, title, status, depends_on, created_at, completed_at, - vision, success_criteria, key_risks, proof_strategy, - verification_contract, verification_integration, verification_operational, verification_uat, - definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); - for (const m of manifest.milestones) { - msStmt.run( - m.id, - m.title, - m.status, - JSON.stringify(m.depends_on), - m.created_at, - m.completed_at, - m.vision, - JSON.stringify(m.success_criteria), - JSON.stringify(m.key_risks), - JSON.stringify(m.proof_strategy), - m.verification_contract, - m.verification_integration, - m.verification_operational, - m.verification_uat, - JSON.stringify(m.definition_of_done), - m.requirement_coverage, - m.boundary_map_markdown, - m.vision_meeting ? JSON.stringify(m.vision_meeting) : "", - m.product_research ? JSON.stringify(m.product_research) : "", - ); - } - // Restore slices - const slStmt = - db.prepare(`INSERT INTO slices (milestone_id, id, title, status, risk, depends, demo, - created_at, completed_at, full_summary_md, full_uat_md, - goal, success_criteria, proof_level, integration_closure, observability_impact, - adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, - sequence, replan_triggered_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); - for (const s of manifest.slices) { - slStmt.run( - s.milestone_id, - s.id, - s.title, - s.status, - s.risk, - JSON.stringify(s.depends), - s.demo, - s.created_at, - s.completed_at, - s.full_summary_md, - s.full_uat_md, - s.goal, - s.success_criteria, - s.proof_level, - s.integration_closure, - s.observability_impact, - s.adversarial_partner ?? "", - s.adversarial_combatant ?? "", - s.adversarial_architect ?? "", - s.planning_meeting ? JSON.stringify(s.planning_meeting) : "", - s.sequence, - s.replan_triggered_at, - ); - } - // Restore tasks - const tkStmt = - db.prepare(`INSERT INTO tasks (milestone_id, slice_id, id, title, status, - one_liner, narrative, verification_result, duration, completed_at, - blocker_discovered, deviations, known_issues, key_files, key_decisions, - full_summary_md, description, estimate, files, verify, - inputs, expected_output, observability_impact, full_plan_md, sequence) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); - for (const t of manifest.tasks) { - tkStmt.run( - t.milestone_id, - t.slice_id, - t.id, - t.title, - t.status, - t.one_liner, - t.narrative, - t.verification_result, - t.duration, - t.completed_at, - t.blocker_discovered ? 1 : 0, - t.deviations, - t.known_issues, - JSON.stringify(t.key_files), - JSON.stringify(t.key_decisions), - t.full_summary_md, - t.description, - t.estimate, - JSON.stringify(t.files), - t.verify, - JSON.stringify(t.inputs), - JSON.stringify(t.expected_output), - t.observability_impact, - t.full_plan_md, - t.sequence, - ); - } - // Restore decisions - const dcStmt = - db.prepare(`INSERT INTO decisions (seq, id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); - for (const d of manifest.decisions) { - dcStmt.run( - d.seq, - d.id, - d.when_context, - d.scope, - d.decision, - d.choice, - d.rationale, - d.revisable, - d.made_by, - d.superseded_by, - ); - } - // Restore verification evidence - const evStmt = - db.prepare(`INSERT INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at) - VALUES (?, ?, ?, ?, ?, ?, ?, ?)`); - for (const e of manifest.verification_evidence) { - evStmt.run( - e.task_id, - e.slice_id, - e.milestone_id, - e.command, - e.exit_code, - e.verdict, - e.duration_ms, - e.created_at, - ); - } - }); -} -/** - * Bulk delete + insert a legacy milestone hierarchy for markdown → DB migration. - * Used by workflow-migration.ts to populate engine tables from parsed ROADMAP/PLAN - * files. All operations run inside a single transaction. - */ -export function bulkInsertLegacyHierarchy(payload) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const db = currentDb; - const { milestones, slices, tasks, clearMilestoneIds, createdAt } = payload; - if (clearMilestoneIds.length === 0) return; - const placeholders = clearMilestoneIds.map(() => "?").join(","); - transaction(() => { - db.prepare(`DELETE FROM tasks WHERE milestone_id IN (${placeholders})`).run( - ...clearMilestoneIds, - ); - db.prepare( - `DELETE FROM slices WHERE milestone_id IN (${placeholders})`, - ).run(...clearMilestoneIds); - db.prepare(`DELETE FROM milestones WHERE id IN (${placeholders})`).run( - ...clearMilestoneIds, - ); - const insertMilestone = db.prepare( - "INSERT INTO milestones (id, title, status, created_at) VALUES (?, ?, ?, ?)", - ); - for (const m of milestones) { - insertMilestone.run(m.id, m.title, m.status, createdAt); - } - const insertSliceStmt = db.prepare( - "INSERT INTO slices (id, milestone_id, title, status, risk, depends, sequence, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", - ); - for (const s of slices) { - insertSliceStmt.run( - s.id, - s.milestoneId, - s.title, - s.status, - s.risk, - "[]", - s.sequence, - createdAt, - ); - } - const insertTaskStmt = db.prepare( - "INSERT INTO tasks (id, slice_id, milestone_id, title, description, status, estimate, files, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", - ); - for (const t of tasks) { - insertTaskStmt.run( - t.id, - t.sliceId, - t.milestoneId, - t.title, - "", - t.status, - "", - "[]", - t.sequence, - ); - } - }); -} -// ─── Memory store writers ──────────────────────────────────────────────── -// All memory writes go through sf-db.ts so the single-writer invariant -// holds. These are direct pass-throughs to the SQL previously in -// memory-store.ts — same bindings, same behavior. -export function getActiveMemories({ category, limit = 200 } = {}) { - if (!currentDb) return []; - const rows = category - ? currentDb - .prepare( - "SELECT * FROM active_memories WHERE category = ? ORDER BY updated_at DESC LIMIT ?", - ) - .all(category, limit) - : currentDb - .prepare( - "SELECT * FROM active_memories ORDER BY updated_at DESC LIMIT ?", - ) - .all(limit); - return rows.map((r) => ({ - id: r["id"], - category: r["category"], - content: r["content"], - confidence: r["confidence"], - sourceUnitId: r["source_unit_id"], - tags: (() => { - try { - return JSON.parse(r["tags"] ?? "[]"); - } catch { - return []; - } - })(), - createdAt: r["created_at"], - updatedAt: r["updated_at"], - })); -} -export function insertMemoryRow(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO memories (id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at, tags) - VALUES (:id, :category, :content, :confidence, :source_unit_type, :source_unit_id, :created_at, :updated_at, :tags)`) - .run({ - ":id": args.id, - ":category": args.category, - ":content": args.content, - ":confidence": args.confidence, - ":source_unit_type": args.sourceUnitType, - ":source_unit_id": args.sourceUnitId, - ":created_at": args.createdAt, - ":updated_at": args.updatedAt, - ":tags": JSON.stringify(args.tags ?? []), - }); -} -export function rewriteMemoryId(placeholderId, realId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare("UPDATE memories SET id = :real_id WHERE id = :placeholder") - .run({ - ":real_id": realId, - ":placeholder": placeholderId, - }); -} -export function updateMemoryContentRow(id, content, confidence, updatedAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - if (confidence != null) { - currentDb - .prepare( - "UPDATE memories SET content = :content, confidence = :confidence, updated_at = :updated_at WHERE id = :id", - ) - .run({ - ":content": content, - ":confidence": confidence, - ":updated_at": updatedAt, - ":id": id, - }); - } else { - currentDb - .prepare( - "UPDATE memories SET content = :content, updated_at = :updated_at WHERE id = :id", - ) - .run({ ":content": content, ":updated_at": updatedAt, ":id": id }); - } -} -export function incrementMemoryHitCount(id, updatedAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - "UPDATE memories SET hit_count = hit_count + 1, updated_at = :updated_at WHERE id = :id", - ) - .run({ ":updated_at": updatedAt, ":id": id }); -} -export function supersedeMemoryRow(oldId, newId, updatedAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - "UPDATE memories SET superseded_by = :new_id, updated_at = :updated_at WHERE id = :old_id", - ) - .run({ ":new_id": newId, ":updated_at": updatedAt, ":old_id": oldId }); -} -export function markMemoryUnitProcessed(unitKey, activityFile, processedAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO memory_processed_units (unit_key, activity_file, processed_at) - VALUES (:key, :file, :at)`) - .run({ ":key": unitKey, ":file": activityFile, ":at": processedAt }); -} -export function decayMemoriesBefore(cutoffTs, now) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE memories - SET confidence = MAX(0.1, confidence - 0.1), updated_at = :now - WHERE superseded_by IS NULL AND updated_at < :cutoff AND confidence > 0.1`) - .run({ ":now": now, ":cutoff": cutoffTs }); -} -/** - * Supersede memories that have exceeded their TTL. - * - * Purpose: prevent stale memories from silently poisoning future sessions. - * Mirrors Copilot Memory's 28-day TTL model — memories that were never - * accessed expire sooner; memories actively used get a longer lease. - * - * Rules: - * - Never accessed (hit_count = 0) + older than unstartedTtlDays → expire - * - Any memory older than maxTtlDays → expire regardless of hit_count - * - * Consumer: called at autonomous mode startup from auto-start.js. - * Returns the number of memories superseded. - */ -export function expireStaleMemories(unstartedTtlDays = 28, maxTtlDays = 90) { - if (!currentDb) return 0; - const now = new Date().toISOString(); - const cutoffUnstarted = new Date( - Date.now() - unstartedTtlDays * 86_400_000, - ).toISOString(); - const cutoffMax = new Date( - Date.now() - maxTtlDays * 86_400_000, - ).toISOString(); - const result = currentDb - .prepare(`UPDATE memories SET superseded_by = 'ttl-expired', updated_at = :now - WHERE superseded_by IS NULL - AND ( - (hit_count = 0 AND updated_at < :cutoff_unstarted) - OR updated_at < :cutoff_max - )`) - .run({ - ":now": now, - ":cutoff_unstarted": cutoffUnstarted, - ":cutoff_max": cutoffMax, - }); - return result.changes ?? 0; -} -export function supersedeLowestRankedMemories(limit, now) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`UPDATE memories SET superseded_by = 'CAP_EXCEEDED', updated_at = :now - WHERE id IN ( - SELECT id FROM memories - WHERE superseded_by IS NULL - ORDER BY (confidence * (1.0 + hit_count * 0.1)) ASC - LIMIT :limit - )`) - .run({ ":now": now, ":limit": limit }); -} -// ─── Memory Sources ────────────────────────────────────────────────────────── -export function insertMemorySourceRow(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT OR IGNORE INTO memory_sources (id, kind, uri, title, content, content_hash, imported_at, scope, tags) - VALUES (:id, :kind, :uri, :title, :content, :content_hash, :imported_at, :scope, :tags)`) - .run({ - ":id": args.id, - ":kind": args.kind, - ":uri": args.uri, - ":title": args.title, - ":content": args.content, - ":content_hash": args.contentHash, - ":imported_at": args.importedAt, - ":scope": args.scope ?? "project", - ":tags": JSON.stringify(args.tags ?? []), - }); -} -export function deleteMemorySourceRow(id) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const res = currentDb - .prepare("DELETE FROM memory_sources WHERE id = :id") - .run({ ":id": id }); - return (res?.changes ?? 0) > 0; -} -// ─── Judgments ─────────────────────────────────────────────────────────────── -export function insertJudgment(entry) { - if (!currentDb) return; - try { - currentDb - .prepare(`INSERT INTO judgments (unit_id, decision, alternatives_json, reasoning, confidence, ts) - VALUES (:unit_id, :decision, :alternatives_json, :reasoning, :confidence, :ts)`) - .run({ - ":unit_id": entry.unitId ?? "", - ":decision": entry.decision ?? "", - ":alternatives_json": JSON.stringify(entry.alternatives ?? []), - ":reasoning": entry.reasoning ?? "", - ":confidence": entry.confidence ?? "medium", - ":ts": entry.ts ?? new Date().toISOString(), - }); - } catch { - // Judgment logging is best-effort - } -} -export function getJudgmentsForUnit(unitIdPrefix, limit = 1000) { - if (!currentDb) return []; - try { - const rows = currentDb - .prepare( - `SELECT id, unit_id AS unitId, decision, alternatives_json AS alternativesJson, reasoning, confidence, ts - FROM judgments - WHERE unit_id LIKE :prefix - ORDER BY ts DESC - LIMIT :limit`, - ) - .all({ - ":prefix": `${unitIdPrefix}%`, - ":limit": limit, - }); - return rows.map((r) => ({ - id: r.id, - unitId: r.unitId, - decision: r.decision, - alternatives: parseJsonObject(r.alternativesJson, []), - reasoning: r.reasoning, - confidence: r.confidence, - ts: r.ts, - })); - } catch { - return []; - } -} -// ─── Retrieval Evidence ───────────────────────────────────────────────────── - -/** - * Record a retrieval lookup with source provenance. - * Purpose: let SF compare live code, semantic, docs, and web context by the same - * freshness and scope contract before planning or implementation trusts it. - * Consumer: Sift/codebase search tools and future Context7/web retrieval bridges. - */ -export function insertRetrievalEvidence(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const now = args.recordedAt ?? new Date().toISOString(); - currentDb - .prepare(`INSERT INTO retrieval_evidence ( - backend, source_kind, query, strategy, scope, project_root, - git_head, git_branch, worktree_dirty, freshness, status, - hit_count, elapsed_ms, cache_path, error, result_json, recorded_at - ) VALUES ( - :backend, :source_kind, :query, :strategy, :scope, :project_root, - :git_head, :git_branch, :worktree_dirty, :freshness, :status, - :hit_count, :elapsed_ms, :cache_path, :error, :result_json, :recorded_at - )`) - .run({ - ":backend": args.backend, - ":source_kind": args.sourceKind ?? "code", - ":query": args.query ?? "", - ":strategy": args.strategy ?? "", - ":scope": args.scope ?? "", - ":project_root": args.projectRoot ?? "", - ":git_head": args.gitHead ?? null, - ":git_branch": args.gitBranch ?? null, - ":worktree_dirty": intBool(args.worktreeDirty), - ":freshness": args.freshness ?? "unknown", - ":status": args.status ?? "ok", - ":hit_count": args.hitCount ?? 0, - ":elapsed_ms": args.elapsedMs ?? 0, - ":cache_path": args.cachePath ?? null, - ":error": args.error ?? null, - ":result_json": JSON.stringify(args.result ?? {}), - ":recorded_at": now, - }); -} - -/** - * Return recent retrieval evidence rows. - * Purpose: support audits that need to distinguish live source evidence from - * stale indexed or prose-only context. - * Consumer: inspect/doctor tooling and tests for retrieval provenance. - */ -export function getRetrievalEvidence(limit = 100) { - if (!currentDb) return []; - const rows = currentDb - .prepare(`SELECT - id, backend, source_kind AS sourceKind, query, strategy, scope, - project_root AS projectRoot, git_head AS gitHead, - git_branch AS gitBranch, worktree_dirty AS worktreeDirty, - freshness, status, hit_count AS hitCount, elapsed_ms AS elapsedMs, - cache_path AS cachePath, error, result_json AS resultJson, recorded_at AS recordedAt - FROM retrieval_evidence - ORDER BY recorded_at DESC, id DESC - LIMIT :limit`) - .all({ ":limit": limit }); - return rows.map((row) => ({ - ...row, - worktreeDirty: row.worktreeDirty === 1, - result: parseJsonObject(row.resultJson, {}), - })); -} -// ─── Memory Embeddings ─────────────────────────────────────────────────────── -export function upsertMemoryEmbedding(args) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO memory_embeddings (memory_id, model, dim, vector, updated_at) - VALUES (:memory_id, :model, :dim, :vector, :updated_at) - ON CONFLICT(memory_id) DO UPDATE SET - model = excluded.model, - dim = excluded.dim, - vector = excluded.vector, - updated_at = excluded.updated_at`) - .run({ - ":memory_id": args.memoryId, - ":model": args.model, - ":dim": args.dim, - ":vector": args.vector, - ":updated_at": args.updatedAt, - }); -} -export function deleteMemoryEmbedding(memoryId) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const res = currentDb - .prepare("DELETE FROM memory_embeddings WHERE memory_id = :id") - .run({ ":id": memoryId }); - return (res?.changes ?? 0) > 0; -} -// ─── Tier 1.3: Spec/Runtime/Evidence Schema ────────────────────────────────── -// Functions for managing evidence in the new spec schema (v32+) - -/** - * Record evidence for a milestone. Appends to milestone_evidence table. - * Purpose: Create audit trail of decisions, verifications, and incidents. - * Consumer: complete-milestone, reassess-milestone, and other tools. - */ -export function insertMilestoneEvidence( - milestoneId, - evidenceType, - content, - phaseName, - recordedBy, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO milestone_evidence (milestone_id, evidence_type, content, recorded_at, phase_name, recorded_by) - VALUES (?, ?, ?, ?, ?, ?)`) - .run( - milestoneId, - evidenceType, - content, - new Date().toISOString(), - phaseName || "", - recordedBy || "", - ); -} - -/** - * Record evidence for a slice. Appends to slice_evidence table. - * Purpose: Create audit trail of slice decisions, verifications, and incidents. - * Consumer: complete-slice, execute-slice, and other tools. - */ -export function insertSliceEvidence( - milestoneId, - sliceId, - evidenceType, - content, - phaseName, - recordedBy, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO slice_evidence (milestone_id, slice_id, evidence_type, content, recorded_at, phase_name, recorded_by) - VALUES (?, ?, ?, ?, ?, ?, ?)`) - .run( - milestoneId, - sliceId, - evidenceType, - content, - new Date().toISOString(), - phaseName || "", - recordedBy || "", - ); -} - -/** - * Record evidence for a task. Appends to task_evidence table. - * Purpose: Create audit trail of task decisions, verifications, and incidents. - * Consumer: complete-task, execute-task, and other tools. - */ -export function insertTaskEvidence( - milestoneId, - sliceId, - taskId, - evidenceType, - content, - phaseName, - recordedBy, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare(`INSERT INTO task_evidence (milestone_id, slice_id, task_id, evidence_type, content, recorded_at, phase_name, recorded_by) - VALUES (?, ?, ?, ?, ?, ?, ?, ?)`) - .run( - milestoneId, - sliceId, - taskId, - evidenceType, - content, - new Date().toISOString(), - phaseName || "", - recordedBy || "", - ); -} - -/** - * Query milestone audit trail (spec + evidence). Returns rows with spec intent and evidence history. - * Purpose: Support data archaeology and decision-tree reconstruction. - * Consumer: forensics tools, doctor checks, audit/compliance queries. - */ -export function getMilestoneAuditTrail(milestoneId) { - if (!currentDb) return []; - return currentDb - .prepare(` - SELECT - r.id, r.title, r.status, - s.vision, s.spec_version, - e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by - FROM milestones r - LEFT JOIN milestone_specs s ON r.id = s.id - LEFT JOIN milestone_evidence e ON r.id = e.milestone_id - WHERE r.id = ? - ORDER BY e.recorded_at ASC - `) - .all(milestoneId); -} - -/** - * Query slice audit trail (spec + evidence). - * Purpose: Support data archaeology and decision-tree reconstruction. - * Consumer: forensics tools, doctor checks, audit/compliance queries. - */ -export function getSliceAuditTrail(milestoneId, sliceId) { - if (!currentDb) return []; - return currentDb - .prepare(` - SELECT - r.id, r.title, r.status, - s.goal, s.spec_version, - e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by - FROM slices r - LEFT JOIN slice_specs s ON r.milestone_id = s.milestone_id AND r.id = s.slice_id - LEFT JOIN slice_evidence e ON r.milestone_id = e.milestone_id AND r.id = e.slice_id - WHERE r.milestone_id = ? AND r.id = ? - ORDER BY e.recorded_at ASC - `) - .all(milestoneId, sliceId); -} - -/** - * Query task audit trail (spec + evidence). - * Purpose: Support data archaeology and decision-tree reconstruction. - * Consumer: forensics tools, doctor checks, audit/compliance queries. - */ -export function getTaskAuditTrail(milestoneId, sliceId, taskId) { - if (!currentDb) return []; - return currentDb - .prepare(` - SELECT - r.id, r.title, r.status, - s.verify, s.spec_version, - e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by - FROM tasks r - LEFT JOIN task_specs s ON r.milestone_id = s.milestone_id AND r.slice_id = s.slice_id AND r.id = s.task_id - LEFT JOIN task_evidence e ON r.milestone_id = e.milestone_id AND r.slice_id = e.slice_id AND r.id = e.task_id - WHERE r.milestone_id = ? AND r.slice_id = ? AND r.id = ? - ORDER BY e.recorded_at ASC - `) - .all(milestoneId, sliceId, taskId); -} - -/** - * Get milestone spec only (immutable intent, no runtime state). - * Purpose: Retrieve spec intent for re-planning or spec validation. - * Consumer: plan-milestone and spec validation tools. - */ -export function getMilestoneSpec(milestoneId) { - if (!currentDb) return null; - return currentDb - .prepare("SELECT * FROM milestone_specs WHERE id = ?") - .get(milestoneId); -} - -/** - * Get slice spec only (immutable intent, no runtime state). - * Purpose: Retrieve spec intent for re-planning or spec validation. - * Consumer: plan-slice and spec validation tools. - */ -export function getSliceSpec(milestoneId, sliceId) { - if (!currentDb) return null; - return currentDb - .prepare( - "SELECT * FROM slice_specs WHERE milestone_id = ? AND slice_id = ?", - ) - .get(milestoneId, sliceId); -} - -/** - * Get task spec only (immutable intent, no runtime state). - * Purpose: Retrieve spec intent for re-planning or spec validation. - * Consumer: plan-task and spec validation tools. - */ -export function getTaskSpec(milestoneId, sliceId, taskId) { - if (!currentDb) return null; - return currentDb - .prepare( - "SELECT * FROM task_specs WHERE milestone_id = ? AND slice_id = ? AND task_id = ?", - ) - .get(milestoneId, sliceId, taskId); -} - -// ─── Validation Runs ─────────────────────────────────────────────────────────── - -/** - * Start a validation run for a milestone, slice, or task. - * Mirrors droid's validation-state.json creation from validation-contract.md. - * - * Purpose: Track explicit validation contracts and their execution state in the - * DB so any surface (CLI, TUI, headless) can answer "what are we validating and - * where are we" with a single query. - * - * Consumer: autonomous-solver, plan-slice, quality gates, eval runners. - */ -export function startValidationRun({ milestoneId, sliceId, taskId, contract }) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const runId = crypto.randomUUID(); - currentDb - .prepare( - `INSERT INTO validation_runs - (run_id, milestone_id, slice_id, task_id, contract, status, started_at, created_at) - VALUES (:run_id, :milestone_id, :slice_id, :task_id, :contract, 'running', datetime('now'), datetime('now'))`, - ) - .run({ - ":run_id": runId, - ":milestone_id": milestoneId, - ":slice_id": sliceId ?? null, - ":task_id": taskId ?? null, - ":contract": contract ?? "", - }); - return runId; -} - -/** - * Complete a validation run with verdict and findings. - * Mirrors droid's update of validation-state.json after run finishes. - * - * Consumer: autonomous-solver after eval execution, quality gate evaluators. - */ -export function completeValidationRun({ - runId, - verdict, - rationale = "", - findings = "", -}) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const status = - verdict === "pass" ? "pass" : verdict === "fail" ? "fail" : "error"; - const result = currentDb - .prepare( - `UPDATE validation_runs SET - status = :status, - verdict = :verdict, - rationale = :rationale, - findings = :findings, - completed_at = datetime('now') - WHERE run_id = :run_id AND status = 'running'`, - ) - .run({ - ":run_id": runId, - ":status": status, - ":verdict": verdict ?? "", - ":rationale": rationale ?? "", - ":findings": findings ?? "", - }); - if (result.changes === 0) { - throw new SFError( - SF_STALE_STATE, - `sf-db: completeValidationRun: no running validation run found for run_id=${runId}`, - ); - } -} - -/** - * Get the latest validation state for a scope (milestone, slice, or task). - * Returns the most recent run — mirrors droid's validation-state.json read. - * - * Consumer: any surface that needs "are we passing?" for a milestone/slice/task. - */ -export function getLatestValidationState(milestoneId, sliceId, taskId) { - if (!currentDb) return null; - const rows = currentDb - .prepare( - `SELECT * FROM validation_runs - WHERE milestone_id = :milestone_id - AND slice_id IS :slice_id - AND task_id IS :task_id - ORDER BY created_at DESC, run_id DESC - LIMIT 1`, - ) - .all({ - ":milestone_id": milestoneId, - ":slice_id": sliceId ?? null, - ":task_id": taskId ?? null, - }); - return rows[0] ?? null; -} - -/** - * Get validation run history for a scope. - * Mirrors droid's historical validation-state.json files. - * - * Consumer: forensics, eval review, audit trail queries. - */ -export function getValidationHistory(milestoneId, sliceId, taskId, limit = 20) { - if (!currentDb) return []; - return currentDb - .prepare( - `SELECT * FROM validation_runs - WHERE milestone_id = :milestone_id - AND slice_id IS :slice_id - AND task_id IS :task_id - ORDER BY created_at DESC, run_id DESC - LIMIT :limit`, - ) - .all({ - ":milestone_id": milestoneId, - ":slice_id": sliceId ?? null, - ":task_id": taskId ?? null, - ":limit": limit, - }); -} - -// ─── Triage DB CRUD ─────────────────────────────────────────────────────────── - -/** - * Insert a triage run record. - * Purpose: replace .sf/triage/evals|inbox|skills JSONL files with queryable DB rows. - * Consumer: commands-todo.js triageTodoDump after successful triage. - */ -export function insertTriageRun(id, sourceFile, createdAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `INSERT INTO triage_runs (id, source_file, status, created_at) - VALUES (:id, :source_file, 'complete', :created_at) - ON CONFLICT(id) DO NOTHING`, - ) - .run({ - ":id": id, - ":source_file": sourceFile ?? null, - ":created_at": createdAt ?? new Date().toISOString(), - }); -} - -/** - * Insert a triage eval candidate row. - * Purpose: store eval candidates in DB instead of .evals.jsonl. - * Consumer: commands-todo.js triageTodoDump. - */ -export function insertTriageEval(id, runId, data, createdAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `INSERT INTO triage_evals (id, run_id, task_input, expected_behavior, evidence, failure_mode, status, created_at) - VALUES (:id, :run_id, :task_input, :expected_behavior, :evidence, :failure_mode, 'pending', :created_at) - ON CONFLICT(id) DO NOTHING`, - ) - .run({ - ":id": id, - ":run_id": runId, - ":task_input": data.task_input ?? "", - ":expected_behavior": data.expected_behavior ?? "", - ":evidence": data.evidence ?? null, - ":failure_mode": data.failure_mode ?? null, - ":created_at": createdAt ?? new Date().toISOString(), - }); -} - -/** - * Insert a normalized triage inbox item row. - * Purpose: store triage inbox items (eval_candidate, implementation_task, etc.) in DB. - * Consumer: commands-todo.js triageTodoDump. - */ -export function insertTriageItem( - id, - runId, - kind, - content, - evidence, - createdAt, -) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `INSERT INTO triage_items (id, run_id, kind, content, evidence, status, created_at) - VALUES (:id, :run_id, :kind, :content, :evidence, 'pending', :created_at) - ON CONFLICT(id) DO NOTHING`, - ) - .run({ - ":id": id, - ":run_id": runId, - ":kind": kind, - ":content": content, - ":evidence": evidence ?? null, - ":created_at": createdAt ?? new Date().toISOString(), - }); -} - -/** - * Insert a triage skill proposal row. - * Purpose: store skill proposals in DB instead of .skills.jsonl. - * Consumer: commands-todo.js triageTodoDump. - */ -export function insertTriageSkill(id, runId, data, createdAt) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `INSERT INTO triage_skills (id, run_id, name, description, trigger, raw_json, status, created_at) - VALUES (:id, :run_id, :name, :description, :trigger, :raw_json, 'pending', :created_at) - ON CONFLICT(id) DO NOTHING`, - ) - .run({ - ":id": id, - ":run_id": runId, - ":name": data.title ?? data.name ?? null, - ":description": data.description ?? null, - ":trigger": data.trigger_pattern ?? data.trigger ?? null, - ":raw_json": JSON.stringify(data), - ":created_at": createdAt ?? new Date().toISOString(), - }); -} - -// ─── Runtime Counters ───────────────────────────────────────────────────────── - -/** - * Get a runtime counter value by key. Returns 0 if the key does not exist. - * Purpose: replace per-key JSON files in .sf/runtime/ with queryable DB rows. - * Consumer: auto-dispatch.js rewrite-count and uat-count logic. - */ -export function getRuntimeCounter(key) { - if (!currentDb) return 0; - const row = currentDb - .prepare("SELECT value FROM runtime_counters WHERE key = ?") - .get(key); - return typeof row?.value === "number" ? row.value : 0; -} - -/** - * Set a runtime counter to an explicit value. - * Purpose: replace JSON file writes for named counters. - * Consumer: auto-dispatch.js setRewriteCount. - */ -export function setRuntimeCounter(key, value) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `INSERT INTO runtime_counters (key, value, updated_at) - VALUES (:key, :value, :updated_at) - ON CONFLICT(key) DO UPDATE SET value = excluded.value, updated_at = excluded.updated_at`, - ) - .run({ - ":key": key, - ":value": value, - ":updated_at": new Date().toISOString(), - }); -} - -/** - * Atomically increment a runtime counter and return the new value. - * Purpose: replace read-modify-write JSON file pattern for counters. - * Consumer: auto-dispatch.js incrementUatCount. - */ -export function incrementRuntimeCounter(key) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - currentDb - .prepare( - `INSERT INTO runtime_counters (key, value, updated_at) - VALUES (:key, 1, :updated_at) - ON CONFLICT(key) DO UPDATE SET value = value + 1, updated_at = excluded.updated_at`, - ) - .run({ ":key": key, ":updated_at": new Date().toISOString() }); - const row = currentDb - .prepare("SELECT value FROM runtime_counters WHERE key = ?") - .get(key); - return typeof row?.value === "number" ? row.value : 1; -} - -// ─── Validation Attention Markers ───────────────────────────────────────────── - -/** - * Get a validation attention marker for a milestone, or null if absent. - * Purpose: replace .sf/runtime/validation-attention/{mid}.json reads. - * Consumer: auto-dispatch.js hasActiveValidationAttentionMarker. - */ -export function getValidationAttentionMarker(milestoneId) { - if (!currentDb) return null; - return ( - currentDb - .prepare( - "SELECT * FROM validation_attention_markers WHERE milestone_id = ?", - ) - .get(milestoneId) ?? null - ); -} - -/** - * Upsert a validation attention marker for a milestone. - * Purpose: replace .sf/runtime/validation-attention/{mid}.json writes. - * Consumer: auto-dispatch.js writeValidationAttentionMarker. - */ -export function upsertValidationAttentionMarker(milestoneId, marker) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const now = new Date().toISOString(); - currentDb - .prepare( - `INSERT INTO validation_attention_markers - (milestone_id, created_at, source, remediation_round, revalidation_round, revalidation_requested_at) - VALUES (:milestone_id, :created_at, :source, :remediation_round, :revalidation_round, :revalidation_requested_at) - ON CONFLICT(milestone_id) DO UPDATE SET - source = excluded.source, - remediation_round = excluded.remediation_round, - revalidation_round = excluded.revalidation_round, - revalidation_requested_at = excluded.revalidation_requested_at`, - ) - .run({ - ":milestone_id": milestoneId, - ":created_at": marker.createdAt ?? now, - ":source": marker.source ?? null, - ":remediation_round": marker.remediationRound ?? null, - ":revalidation_round": marker.revalidationRound ?? null, - ":revalidation_requested_at": marker.revalidationRequestedAt ?? null, - }); -} - -// ─── Routing History ────────────────────────────────────────────────────────── - -/** - * Upsert a routing outcome for a pattern/tier pair, incrementing success or fail count. - * Purpose: persist adaptive tier learning to DB so routing decisions survive restarts. - * Consumer: routing-history.js recordOutcome. - */ -export function upsertRoutingOutcome(db, pattern, tier, success) { - db.prepare( - `INSERT INTO routing_history (pattern, tier, success_count, fail_count, updated_at) - VALUES (:pattern, :tier, :success_count, :fail_count, :updated_at) - ON CONFLICT(pattern, tier) DO UPDATE SET - success_count = success_count + excluded.success_count, - fail_count = fail_count + excluded.fail_count, - updated_at = excluded.updated_at`, - ).run({ - ":pattern": pattern, - ":tier": tier, - ":success_count": success ? 1 : 0, - ":fail_count": success ? 0 : 1, - ":updated_at": new Date().toISOString(), - }); -} - -/** - * Get all routing history rows. - * Purpose: load full routing state into memory on init. - * Consumer: routing-history.js initRoutingHistory. - */ -export function getAllRoutingHistory(db) { - return db - .prepare( - "SELECT pattern, tier, success_count, fail_count, updated_at FROM routing_history", - ) - .all(); -} - -/** - * Get routing history rows for a specific pattern. - * Purpose: targeted pattern lookup for adaptive tier queries. - * Consumer: routing-history.js getRoutingHistoryForPattern. - */ -export function getRoutingHistoryForPattern(db, pattern) { - return db - .prepare( - "SELECT tier, success_count, fail_count FROM routing_history WHERE pattern = ?", - ) - .all(pattern); -} - -/** - * Insert a routing feedback signal into the audit table. - * Purpose: persist user feedback for later analysis and weighted outcome application. - * Consumer: routing-history.js recordFeedback. - */ -export function insertRoutingFeedback(db, pattern, tier, feedback) { - db.prepare( - `INSERT INTO routing_feedback (pattern, tier, feedback, recorded_at) - VALUES (:pattern, :tier, :feedback, :recorded_at)`, - ).run({ - ":pattern": pattern, - ":tier": tier, - ":feedback": feedback, - ":recorded_at": new Date().toISOString(), - }); -} - -/** - * Clear all routing history and feedback rows. - * Purpose: full reset of adaptive learning state on user request. - * Consumer: routing-history.js clearRoutingHistory. - */ -export function clearRoutingHistory(db) { - db.prepare("DELETE FROM routing_history").run(); - db.prepare("DELETE FROM routing_feedback").run(); -} - -// ─── Unit Metrics CRUD ──────────────────────────────────────────────────────── - -function rowToUnitMetrics(row) { - const unit = { - type: row["type"], - id: row["id"], - model: row["model"], - startedAt: row["started_at"], - finishedAt: row["finished_at"], - tokens: { - input: row["tokens_input"], - output: row["tokens_output"], - cacheRead: row["tokens_cache_read"], - cacheWrite: row["tokens_cache_write"], - total: row["tokens_total"], - }, - cost: row["cost"], - toolCalls: row["tool_calls"], - assistantMessages: row["assistant_messages"], - userMessages: row["user_messages"], - apiRequests: row["api_requests"], - }; - if (row["auto_session_key"] != null) - unit.autoSessionKey = row["auto_session_key"]; - if (row["tier"] != null) unit.tier = row["tier"]; - if (row["model_downgraded"] != null) - unit.modelDowngraded = row["model_downgraded"] === 1; - if (row["context_window_tokens"] != null) - unit.contextWindowTokens = row["context_window_tokens"]; - if (row["truncation_sections"] != null) - unit.truncationSections = row["truncation_sections"]; - if (row["continue_here_fired"] != null) - unit.continueHereFired = row["continue_here_fired"] === 1; - if (row["prompt_char_count"] != null) - unit.promptCharCount = row["prompt_char_count"]; - if (row["baseline_char_count"] != null) - unit.baselineCharCount = row["baseline_char_count"]; - if (row["cache_hit_rate"] != null) unit.cacheHitRate = row["cache_hit_rate"]; - if (row["skills"] != null) unit.skills = JSON.parse(row["skills"]); - return unit; -} - -/** - * Upsert a single unit metrics record into the DB. - * - * Purpose: persist per-unit token/cost telemetry from autonomous mode so - * history, cost, and export commands can read from the canonical DB store - * instead of a fragile JSON file on disk. - * - * Consumer: metrics.js saveLedger (called after every unit snapshot). - */ -export function upsertUnitMetrics(db, unit) { - db.prepare( - `INSERT OR REPLACE INTO unit_metrics ( - type, id, started_at, finished_at, model, auto_session_key, - tokens_input, tokens_output, tokens_cache_read, tokens_cache_write, tokens_total, - cost, tool_calls, assistant_messages, user_messages, api_requests, - tier, model_downgraded, context_window_tokens, truncation_sections, - continue_here_fired, prompt_char_count, baseline_char_count, cache_hit_rate, skills - ) VALUES ( - :type, :id, :started_at, :finished_at, :model, :auto_session_key, - :tokens_input, :tokens_output, :tokens_cache_read, :tokens_cache_write, :tokens_total, - :cost, :tool_calls, :assistant_messages, :user_messages, :api_requests, - :tier, :model_downgraded, :context_window_tokens, :truncation_sections, - :continue_here_fired, :prompt_char_count, :baseline_char_count, :cache_hit_rate, :skills - )`, - ).run({ - ":type": unit.type, - ":id": unit.id, - ":started_at": unit.startedAt, - ":finished_at": unit.finishedAt, - ":model": unit.model, - ":auto_session_key": unit.autoSessionKey ?? null, - ":tokens_input": unit.tokens.input, - ":tokens_output": unit.tokens.output, - ":tokens_cache_read": unit.tokens.cacheRead, - ":tokens_cache_write": unit.tokens.cacheWrite, - ":tokens_total": unit.tokens.total, - ":cost": unit.cost, - ":tool_calls": unit.toolCalls, - ":assistant_messages": unit.assistantMessages, - ":user_messages": unit.userMessages, - ":api_requests": unit.apiRequests ?? unit.assistantMessages, - ":tier": unit.tier ?? null, - ":model_downgraded": - unit.modelDowngraded != null ? (unit.modelDowngraded ? 1 : 0) : null, - ":context_window_tokens": unit.contextWindowTokens ?? null, - ":truncation_sections": unit.truncationSections ?? null, - ":continue_here_fired": - unit.continueHereFired != null ? (unit.continueHereFired ? 1 : 0) : null, - ":prompt_char_count": unit.promptCharCount ?? null, - ":baseline_char_count": unit.baselineCharCount ?? null, - ":cache_hit_rate": unit.cacheHitRate ?? null, - ":skills": unit.skills != null ? JSON.stringify(unit.skills) : null, - }); -} - -/** - * Load all unit metrics ordered by started_at ASC (oldest first). - * - * Purpose: reconstruct the in-memory ledger from the canonical DB store - * on session init or on demand from history/cost commands. - * - * Consumer: metrics.js loadLedgerFromDisk and loadLedger. - */ -export function getAllUnitMetrics(db) { - return db - .prepare("SELECT * FROM unit_metrics ORDER BY started_at ASC") - .all() - .map(rowToUnitMetrics); -} - -/** - * Delete oldest unit_metrics rows keeping only the N most recent by finished_at. - * - * Purpose: enforce a max-ledger-size cap so the DB doesn't bloat over long - * autonomous runs. Called by the doctor when the ledger exceeds its threshold. - * - * Consumer: metrics.js pruneMetricsLedger. - */ -export function pruneUnitMetrics(db, keepCount) { - db.prepare( - `DELETE FROM unit_metrics WHERE rowid NOT IN ( - SELECT rowid FROM unit_metrics ORDER BY finished_at DESC LIMIT :keepCount - )`, - ).run({ ":keepCount": keepCount }); -} - -/** - * Get the project start timestamp stored in project_metrics_meta. - * - * Purpose: surface when the autonomous run started for elapsed-time display. - * - * Consumer: metrics.js loadLedger and loadLedgerFromDisk. - */ -export function getProjectStartedAt(db) { - const row = db - .prepare( - "SELECT value FROM project_metrics_meta WHERE key = 'projectStartedAt'", - ) - .get(); - if (!row) return null; - const ts = Number(row["value"]); - return Number.isFinite(ts) ? ts : null; -} - -/** - * Persist the project start timestamp in project_metrics_meta. - * - * Purpose: survive process restarts so the dashboard shows wall-clock elapsed - * time for the full autonomous session, not just the current process lifetime. - * - * Consumer: metrics.js initMetrics (via loadLedger → defaultLedger path). - */ -export function setProjectStartedAt(db, ts) { - db.prepare( - `INSERT INTO project_metrics_meta (key, value) VALUES ('projectStartedAt', :value) - ON CONFLICT(key) DO UPDATE SET value = excluded.value`, - ).run({ ":value": String(ts) }); -} - -// ─── Intent Chapters (crash-resume context, schema v61) ─────────────────────── - -/** - * Open an intent chapter for a unit. - * - * Purpose: record the agent's declared intent at the start of each autonomous - * unit so that on crash-resume the prompt can surface "you were doing X" without - * replaying the full transcript. - * - * Consumer: auto/phases.js at unit start (before LLM dispatch). - * - * @param {object} args - * @param {string} args.id - UUID for this chapter (caller-generated) - * @param {string} args.unitType - e.g. "execute-task" - * @param {string} args.unitId - e.g. "M001/S01/T02" - * @param {string} [args.milestoneId] - * @param {string} [args.sliceId] - * @param {string} [args.taskId] - * @param {string} args.intent - human-readable intent statement - * @param {object} [args.metadata] - optional extra context (serialized to JSON) - * @returns {string} chapter id - */ -export function openIntentChapter({ - id, - unitType, - unitId, - milestoneId, - sliceId, - taskId, - intent, - metadata, -}) { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const now = new Date().toISOString(); - currentDb - .prepare( - `INSERT INTO intent_chapters - (id, unit_type, unit_id, milestone_id, slice_id, task_id, intent, opened_at, metadata_json) - VALUES - (:id, :unitType, :unitId, :milestoneId, :sliceId, :taskId, :intent, :openedAt, :metadataJson) - ON CONFLICT(id) DO NOTHING`, - ) - .run({ - ":id": id, - ":unitType": unitType, - ":unitId": unitId, - ":milestoneId": milestoneId ?? null, - ":sliceId": sliceId ?? null, - ":taskId": taskId ?? null, - ":intent": intent, - ":openedAt": now, - ":metadataJson": metadata ? JSON.stringify(metadata) : null, - }); - return id; -} - -/** - * Close an intent chapter on normal unit completion. - * - * Purpose: mark the chapter closed so it is not surfaced as a crash-resume - * context on the next run. Called after the unit reaches a terminal state. - * - * Consumer: auto/phases.js runFinalize (after successful or failed unit close). - * - * @param {string} id - chapter id returned by openIntentChapter - * @param {string} [outcome] - "done" | "failed" | "skipped" | "blocked" - * @returns {boolean} true if a row was updated - */ -export function closeIntentChapter(id, outcome = "done") { - if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); - const res = currentDb - .prepare( - `UPDATE intent_chapters - SET closed_at = :closedAt, outcome = :outcome - WHERE id = :id AND closed_at IS NULL`, - ) - .run({ - ":id": id, - ":closedAt": new Date().toISOString(), - ":outcome": outcome, - }); - return (res?.changes ?? 0) > 0; -} - -/** - * Return all unclosed intent chapters, newest first. - * - * Purpose: detect which units were interrupted mid-flight so their intent can - * be injected into the next autonomous prompt for crash-resume continuity. - * - * Consumer: auto-prompts.js system context injection and /status handler. - * - * @param {object} [opts] - * @param {number} [opts.limit=5] - cap to avoid prompt bloat - * @returns {Array<{id, unitType, unitId, intent, openedAt}>} - */ -export function getOpenIntentChapters({ limit = 5 } = {}) { - if (!currentDb) return []; - return currentDb - .prepare( - `SELECT id, unit_type as unitType, unit_id as unitId, - milestone_id as milestoneId, slice_id as sliceId, task_id as taskId, - intent, opened_at as openedAt, metadata_json as metadataJson - FROM intent_chapters - WHERE closed_at IS NULL - ORDER BY opened_at DESC - LIMIT :limit`, - ) - .all({ ":limit": limit }); -} - -/** - * Close all unclosed chapters for a unit. - * - * Purpose: bulk-close stale chapters when a unit is force-reset or skipped - * to prevent phantom resume context from earlier failed attempts. - * - * Consumer: reset-slice, skip, and force-dispatch recovery paths. - * - * @param {string} unitType - * @param {string} unitId - * @param {string} [outcome="cancelled"] - * @returns {number} rows updated - */ -export function closeIntentChaptersForUnit( - unitType, - unitId, - outcome = "cancelled", -) { - if (!currentDb) return 0; - const res = currentDb - .prepare( - `UPDATE intent_chapters - SET closed_at = :closedAt, outcome = :outcome - WHERE unit_type = :unitType AND unit_id = :unitId AND closed_at IS NULL`, - ) - .run({ - ":closedAt": new Date().toISOString(), - ":outcome": outcome, - ":unitType": unitType, - ":unitId": unitId, - }); - return res?.changes ?? 0; -} diff --git a/src/resources/extensions/sf/sf-db/sf-db-artifacts.js b/src/resources/extensions/sf/sf-db/sf-db-artifacts.js new file mode 100644 index 000000000..ec3d62bd1 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-artifacts.js @@ -0,0 +1,49 @@ +import { _getAdapter, rowToArtifact } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logWarning } from '../workflow-logger.js'; + +export function clearArtifacts() { + const currentDb = _getAdapter(); + if (!currentDb) return; + try { + currentDb.exec("DELETE FROM artifacts"); + } catch (e) { + logWarning("db", `clearArtifacts failed: ${e.message}`); + } +} + +export function insertArtifact(a) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO artifacts (path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at) + VALUES (:path, :artifact_type, :milestone_id, :slice_id, :task_id, :full_content, :imported_at)`) + .run({ + ":path": a.path, + ":artifact_type": a.artifact_type, + ":milestone_id": a.milestone_id, + ":slice_id": a.slice_id, + ":task_id": a.task_id, + ":full_content": a.full_content, + ":imported_at": new Date().toISOString(), + }); +} + +export function getArtifact(path) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM artifacts WHERE path = :path") + .get({ ":path": path }); + if (!row) return null; + return rowToArtifact(row); +} + +export function deleteArtifactByPath(path) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare("DELETE FROM artifacts WHERE path = :path") + .run({ ":path": path }); +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-backlog.js b/src/resources/extensions/sf/sf-db/sf-db-backlog.js new file mode 100644 index 000000000..e6035eeef --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-backlog.js @@ -0,0 +1,101 @@ +import { _getAdapter, rowToBacklogItem } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; + +export function listBacklogItems() { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT * FROM backlog_items ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id", + ) + .all() + .map(rowToBacklogItem); +} + +export function nextBacklogItemId() { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const row = currentDb + .prepare( + "SELECT id FROM backlog_items WHERE id LIKE '999.%' ORDER BY CAST(substr(id, 5) AS INTEGER) DESC LIMIT 1", + ) + .get(); + const next = row?.id ? Number.parseInt(String(row.id).slice(4), 10) + 1 : 1; + return `999.${Number.isFinite(next) ? next : 1}`; +} + +export function addBacklogItem({ + id, + title, + note = "", + source = "manual", + triageRunId = null, + status = "pending", +}) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const itemId = id ?? nextBacklogItemId(); + const now = new Date().toISOString(); + const sequenceRow = currentDb + .prepare( + "SELECT COALESCE(MAX(sequence), 0) + 1 AS sequence FROM backlog_items", + ) + .get(); + currentDb + .prepare(`INSERT INTO backlog_items ( + id, title, status, note, source, triage_run_id, sequence, created_at, updated_at, promoted_at + ) VALUES ( + :id, :title, :status, :note, :source, :triage_run_id, :sequence, :created_at, :updated_at, :promoted_at + ) + ON CONFLICT(id) DO UPDATE SET + title = excluded.title, + status = excluded.status, + note = excluded.note, + source = excluded.source, + triage_run_id = excluded.triage_run_id, + updated_at = excluded.updated_at, + promoted_at = excluded.promoted_at`) + .run({ + ":id": itemId, + ":title": title, + ":status": status, + ":note": note, + ":source": source, + ":triage_run_id": triageRunId, + ":sequence": sequenceRow?.sequence ?? 1, + ":created_at": now, + ":updated_at": now, + ":promoted_at": status === "promoted" ? now : null, + }); + return itemId; +} + +export function updateBacklogItemStatus(id, status, note = "") { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + const result = currentDb + .prepare(`UPDATE backlog_items + SET status = :status, + note = :note, + updated_at = :updated_at, + promoted_at = CASE WHEN :status = 'promoted' THEN :updated_at ELSE promoted_at END + WHERE id = :id`) + .run({ + ":id": id, + ":status": status, + ":note": note, + ":updated_at": now, + }); + return (result?.changes ?? 0) > 0; +} + +export function removeBacklogItem(id) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const result = currentDb + .prepare("DELETE FROM backlog_items WHERE id = :id") + .run({ ":id": id }); + return (result?.changes ?? 0) > 0; +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-core.js b/src/resources/extensions/sf/sf-db/sf-db-core.js new file mode 100644 index 000000000..f7c19ef7f --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-core.js @@ -0,0 +1,4140 @@ +// SF Database Abstraction Layer +// Provides a SQLite database via node:sqlite (Node >= 26 built-in). +// +// Exposes a unified sync API for decisions and requirements storage. +// Schema is initialized on first open with WAL mode for file-backed DBs. +// +// ─── Single-writer invariant ───────────────────────────────────────────── +// This file is the ONLY place in the codebase that issues write SQL +// (INSERT / UPDATE / DELETE / REPLACE / BEGIN-COMMIT transactions) against +// the engine database at `.sf/sf.db`. All other modules must call the +// typed wrappers exported here. The structural test +// `tests/single-writer-invariant.test.ts` fails CI if a new bypass appears. +// +// `_getAdapter()` is retained for read-only SELECTs in query modules +// (context-store, memory-store queries, doctor checks, projections). +// Do NOT use it for writes — add a wrapper here instead. +// +// The separate `.sf/unit-claims.db` managed by `unit-ownership.ts` is an +// intentionally independent store for cross-worktree claim races and is +// excluded from this invariant. +import { + copyFileSync, + existsSync, + mkdirSync, + readdirSync, + readFileSync, + realpathSync, + statSync, + unlinkSync, + writeFileSync, +} from "node:fs"; +import { dirname, join } from "node:path"; +import { DatabaseSync } from "node:sqlite"; +import { SF_STALE_STATE, SFError } from "../errors.js"; +import { getGateIdsForTurn } from "../gate-registry.js"; +import { + normalizeSchedulerStatus, + normalizeTaskStatus, + taskFrontmatterFromRecord, + withTaskFrontmatter, +} from "../task-frontmatter.js"; +import { readTraceEvents } from "../uok/trace-writer.js"; +import { logError, logWarning } from "../workflow-logger.js"; + +let loadAttempted = false; +function loadProvider() { + if (loadAttempted) return; + loadAttempted = true; + // node:sqlite is built-in in Node >= 26 +} +function normalizeRow(row) { + if (row == null) return undefined; + if (Object.getPrototypeOf(row) === null) { + return { ...row }; + } + return row; +} +function normalizeRows(rows) { + return rows.map((r) => normalizeRow(r)); +} +const DB_QUERY_TIMEOUT_MS = 30_000; +const DB_BACKUP_MIN_INTERVAL_MS = 15 * 60 * 1000; +const DB_BACKUP_RETENTION = 24; +const DB_FULL_VACUUM_MIN_INTERVAL_MS = 6 * 60 * 60 * 1000; +const MAX_ERROR_STORED_BYTES = 2048; + +function createAdapter(rawDb) { + const db = rawDb; + const stmtCache = new Map(); + function wrapStmt(raw) { + return { + run(...params) { + return raw.run(...params); + }, + get(...params) { + return normalizeRow(raw.get(...params)); + }, + all(...params) { + return normalizeRows(raw.all(...params)); + }, + }; + } + return { + exec(sql) { + db.exec(sql); + }, + prepare(sql) { + let cached = stmtCache.get(sql); + if (cached) return cached; + cached = wrapStmt(db.prepare(sql)); + stmtCache.set(sql, cached); + return cached; + }, + close() { + stmtCache.clear(); + db.close(); + }, + }; +} + +/** + * Execute a database query with timeout protection. + * Falls back to empty result if query exceeds timeout. + * + * Purpose: Prevent hanging reads from blocking autonomous dispatch. + * + * Consumer: memory-repository.js, context-store.js, and any read query + * that needs a safety ceiling. + */ +export function withQueryTimeout( + operation, + fallbackValue, + timeoutMs = DB_QUERY_TIMEOUT_MS, +) { + try { + return operation(); + } catch (err) { + if (err?.message?.includes("timeout") || err?.message?.includes("busy")) { + logWarning( + "sf-db", + `Query timed out after ${timeoutMs}ms, returning fallback`, + ); + return fallbackValue; + } + throw err; + } +} +function openRawDb(path) { + loadProvider(); + return new DatabaseSync(path); +} +function sqliteStringLiteral(value) { + return `'${String(value).replaceAll("'", "''")}'`; +} +function databaseBackupDir(path) { + return join(dirname(path), "backups", "db"); +} +function latestDatabaseBackupMtime(dir) { + if (!existsSync(dir)) return 0; + let latest = 0; + for (const entry of readdirSync(dir)) { + if (!entry.startsWith("sf.db.")) continue; + const file = join(dir, entry); + try { + const stat = statSync(file); + if (stat.isFile() && stat.mtimeMs > latest) latest = stat.mtimeMs; + } catch { + // Ignore files that disappear during pruning. + } + } + return latest; +} +function pruneDatabaseBackups(dir) { + if (!existsSync(dir)) return; + const backups = []; + for (const entry of readdirSync(dir)) { + if (!entry.startsWith("sf.db.")) continue; + const file = join(dir, entry); + try { + const stat = statSync(file); + if (stat.isFile()) backups.push({ file, mtimeMs: stat.mtimeMs }); + } catch { + // Ignore files that disappear during pruning. + } + } + backups.sort((a, b) => b.mtimeMs - a.mtimeMs); + for (const backup of backups.slice(DB_BACKUP_RETENTION)) { + try { + unlinkSync(backup.file); + } catch { + // Best-effort retention; never block DB open on pruning. + } + } +} +function databaseMaintenancePath(path) { + return join(databaseBackupDir(path), "maintenance.json"); +} +function readDatabaseMaintenanceState(path) { + try { + return JSON.parse(readFileSync(databaseMaintenancePath(path), "utf-8")); + } catch { + return {}; + } +} +function writeDatabaseMaintenanceState(path, state) { + try { + writeFileSync( + databaseMaintenancePath(path), + JSON.stringify(state, null, 2) + "\n", + "utf-8", + ); + } catch { + // Best-effort maintenance metadata. + } +} +function createDatabaseSnapshot(rawDb, path) { + if (path === ":memory:" || process.env.SF_DB_BACKUP_DISABLE === "1") return; + const dir = databaseBackupDir(path); + try { + mkdirSync(dir, { recursive: true }); + const latest = latestDatabaseBackupMtime(dir); + if (latest > 0 && Date.now() - latest < DB_BACKUP_MIN_INTERVAL_MS) return; + const stamp = new Date().toISOString().replace(/[:.]/g, "-"); + const backupPath = join(dir, `sf.db.${stamp}`); + rawDb.exec(`VACUUM INTO ${sqliteStringLiteral(backupPath)}`); + pruneDatabaseBackups(dir); + } catch (err) { + logWarning( + "sf-db", + `database snapshot failed: ${err instanceof Error ? err.message : String(err)}`, + ); + } +} +function performDatabaseMaintenance(rawDb, path) { + if (path === ":memory:" || process.env.SF_DB_MAINTENANCE_DISABLE === "1") + return; + try { + const quickCheck = rawDb.prepare("PRAGMA quick_check").get(); + if (quickCheck?.quick_check !== "ok") { + logWarning("sf-db", "database quick_check failed; skipping maintenance"); + return; + } + rawDb.exec("PRAGMA wal_checkpoint(PASSIVE)"); + rawDb.exec("PRAGMA optimize"); + rawDb.exec("PRAGMA incremental_vacuum(128)"); + + const state = readDatabaseMaintenanceState(path); + const lastFullVacuumAt = + typeof state.lastFullVacuumAt === "string" + ? Date.parse(state.lastFullVacuumAt) + : 0; + if ( + !Number.isFinite(lastFullVacuumAt) || + Date.now() - lastFullVacuumAt >= DB_FULL_VACUUM_MIN_INTERVAL_MS + ) { + rawDb.exec("VACUUM"); + writeDatabaseMaintenanceState(path, { + ...state, + lastFullVacuumAt: new Date().toISOString(), + }); + } + } catch (err) { + logWarning( + "sf-db", + `database maintenance failed: ${err instanceof Error ? err.message : String(err)}`, + ); + } +} +const SCHEMA_VERSION = 61; +function indexExists(db, name) { + return !!db + .prepare( + "SELECT 1 as present FROM sqlite_master WHERE type = 'index' AND name = ?", + ) + .get(name); +} +function dedupeVerificationEvidenceRows(db) { + db.exec(` + DELETE FROM verification_evidence + WHERE rowid NOT IN ( + SELECT MIN(rowid) + FROM verification_evidence + GROUP BY task_id, slice_id, milestone_id, command, verdict + ) + `); +} +function ensureVerificationEvidenceDedupIndex(db) { + if (indexExists(db, "idx_verification_evidence_dedup")) return; + dedupeVerificationEvidenceRows(db); + db.exec( + "CREATE UNIQUE INDEX IF NOT EXISTS idx_verification_evidence_dedup ON verification_evidence(task_id, slice_id, milestone_id, command, verdict)", + ); +} +function ensureRepoProfileTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS repo_profiles ( + profile_id TEXT PRIMARY KEY, + project_hash TEXT NOT NULL, + project_root TEXT NOT NULL DEFAULT '', + head TEXT DEFAULT NULL, + branch TEXT DEFAULT NULL, + remote_hash TEXT DEFAULT NULL, + dirty INTEGER NOT NULL DEFAULT 0, + profile_json TEXT NOT NULL DEFAULT '{}', + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS repo_file_observations ( + path TEXT PRIMARY KEY, + latest_profile_id TEXT NOT NULL, + git_status TEXT NOT NULL, + ownership TEXT NOT NULL, + language TEXT DEFAULT NULL, + size_bytes INTEGER NOT NULL DEFAULT 0, + content_hash TEXT DEFAULT NULL, + summary TEXT DEFAULT NULL, + first_seen_at TEXT NOT NULL, + last_seen_at TEXT NOT NULL, + adopted_at TEXT DEFAULT NULL, + adoption_unit_id TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_repo_profiles_created ON repo_profiles(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_repo_file_observations_status ON repo_file_observations(git_status, ownership)", + ); +} +function ensureBacklogTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS backlog_items ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + note TEXT NOT NULL DEFAULT '', + source TEXT NOT NULL DEFAULT '', + triage_run_id TEXT DEFAULT NULL, + sequence INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + promoted_at TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_backlog_items_status_sequence ON backlog_items(status, sequence, id)", + ); +} +function ensureScheduleTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS schedule_entries ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + scope TEXT NOT NULL DEFAULT 'project', + id TEXT NOT NULL, + schema_version INTEGER NOT NULL DEFAULT 1, + kind TEXT NOT NULL DEFAULT 'reminder', + status TEXT NOT NULL DEFAULT 'pending', + due_at TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + snoozed_at TEXT DEFAULT NULL, + payload_json TEXT NOT NULL DEFAULT '{}', + created_by TEXT NOT NULL DEFAULT 'user', + autonomous_dispatch INTEGER NOT NULL DEFAULT 0, + full_json TEXT NOT NULL DEFAULT '{}', + imported_from TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_id_created ON schedule_entries(scope, id, created_at DESC, seq DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_schedule_entries_scope_due ON schedule_entries(scope, status, due_at)", + ); + ensureColumn( + db, + "schedule_entries", + "autonomous_dispatch", + "ALTER TABLE schedule_entries ADD COLUMN autonomous_dispatch INTEGER NOT NULL DEFAULT 0", + ); +} +function ensureSolverEvalTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS solver_eval_runs ( + run_id TEXT PRIMARY KEY, + suite_source TEXT NOT NULL DEFAULT '', + cases_count INTEGER NOT NULL DEFAULT 0, + summary_json TEXT NOT NULL DEFAULT '{}', + report_path TEXT NOT NULL DEFAULT '', + results_path TEXT NOT NULL DEFAULT '', + db_recorded INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS solver_eval_case_results ( + run_id TEXT NOT NULL, + case_id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + mode TEXT NOT NULL, + passed INTEGER NOT NULL DEFAULT 0, + false_complete INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER DEFAULT NULL, + command_status INTEGER DEFAULT NULL, + solver_outcome TEXT DEFAULT NULL, + pdd_complete INTEGER DEFAULT NULL, + result_json TEXT NOT NULL DEFAULT '{}', + created_at TEXT NOT NULL, + PRIMARY KEY (run_id, case_id, mode), + FOREIGN KEY (run_id) REFERENCES solver_eval_runs(run_id) ON DELETE CASCADE + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_solver_eval_runs_created ON solver_eval_runs(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_solver_eval_case_lookup ON solver_eval_case_results(run_id, case_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_solver_eval_case_false_complete ON solver_eval_case_results(false_complete, mode)", + ); +} +function ensureSessionTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS sessions ( + session_id TEXT PRIMARY KEY, + trace_id TEXT DEFAULT NULL, + mode TEXT NOT NULL DEFAULT 'interactive', + cwd TEXT NOT NULL DEFAULT '', + repo TEXT DEFAULT NULL, + branch TEXT DEFAULT NULL, + summary TEXT DEFAULT NULL, + summary_count INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS turns ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, + turn_index INTEGER NOT NULL, + user_message TEXT, + assistant_response TEXT, + ts TEXT NOT NULL, + UNIQUE(session_id, turn_index) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS session_file_touches ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, + path TEXT NOT NULL, + tool_name TEXT DEFAULT NULL, + turn_id INTEGER DEFAULT NULL REFERENCES turns(id), + first_seen_at TEXT NOT NULL, + UNIQUE(session_id, path) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS session_refs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + session_id TEXT NOT NULL REFERENCES sessions(session_id) ON DELETE CASCADE, + ref_type TEXT NOT NULL, + ref_value TEXT NOT NULL, + turn_id INTEGER DEFAULT NULL REFERENCES turns(id), + created_at TEXT NOT NULL, + UNIQUE(session_id, ref_type, ref_value) + ) + `); + // FTS5 external-content table over turns for keyword recall. + // content_rowid links to turns.id; triggers below keep it in sync. + db.exec(` + CREATE VIRTUAL TABLE IF NOT EXISTS turns_fts USING fts5( + user_message, + assistant_response, + content='turns', + content_rowid='id' + ) + `); + db.exec(` + CREATE TRIGGER IF NOT EXISTS turns_fts_insert AFTER INSERT ON turns BEGIN + INSERT INTO turns_fts(rowid, user_message, assistant_response) + VALUES (new.id, new.user_message, new.assistant_response); + END + `); + db.exec(` + CREATE TRIGGER IF NOT EXISTS turns_fts_update AFTER UPDATE ON turns BEGIN + INSERT INTO turns_fts(turns_fts, rowid, user_message, assistant_response) + VALUES ('delete', old.id, old.user_message, old.assistant_response); + INSERT INTO turns_fts(rowid, user_message, assistant_response) + VALUES (new.id, new.user_message, new.assistant_response); + END + `); + db.exec(` + CREATE TRIGGER IF NOT EXISTS turns_fts_delete AFTER DELETE ON turns BEGIN + INSERT INTO turns_fts(turns_fts, rowid, user_message, assistant_response) + VALUES ('delete', old.id, old.user_message, old.assistant_response); + END + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_sessions_created ON sessions(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_sessions_repo ON sessions(repo, created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_turns_session ON turns(session_id, turn_index)", + ); + db.exec("CREATE INDEX IF NOT EXISTS idx_turns_ts ON turns(ts DESC)"); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_file_touches_session ON session_file_touches(session_id, first_seen_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_file_touches_path ON session_file_touches(path, session_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_refs_session ON session_refs(session_id, created_at DESC)", + ); +} +function ensureSessionSnapshotTable(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS session_snapshots ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + -- Session that triggered this checkpoint. FK to sessions(session_id). + session_id TEXT NOT NULL, + -- Zero-based counter within the session (first snapshot = 0). + snapshot_index INTEGER NOT NULL DEFAULT 0, + -- Optional git stash ref so the snapshot can be restored exactly. + -- NULL when the working tree had no changes to stash. + git_stash_ref TEXT, + -- Free-text label for the snapshot (e.g. "before migration deploy"). + label TEXT, + ts TEXT NOT NULL, + UNIQUE(session_id, snapshot_index) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_session_snapshots_session ON session_snapshots(session_id, snapshot_index)", + ); +} +function ensureHeadlessRunTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS headless_runs ( + run_id TEXT PRIMARY KEY, + command TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT '', + exit_code INTEGER NOT NULL DEFAULT 0, + timed_out INTEGER NOT NULL DEFAULT 0, + interrupted INTEGER NOT NULL DEFAULT 0, + restart_count INTEGER NOT NULL DEFAULT 0, + max_restarts INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER NOT NULL DEFAULT 0, + total_events INTEGER NOT NULL DEFAULT 0, + tool_calls INTEGER NOT NULL DEFAULT 0, + solver_eval_run_id TEXT DEFAULT NULL, + solver_eval_report_path TEXT DEFAULT NULL, + details_json TEXT NOT NULL DEFAULT '{}', + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_headless_runs_created ON headless_runs(created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_headless_runs_status ON headless_runs(status, created_at DESC)", + ); +} +function ensureUokMessageTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS uok_messages ( + id TEXT PRIMARY KEY, + from_agent TEXT NOT NULL, + to_agent TEXT NOT NULL, + body TEXT NOT NULL DEFAULT '', + metadata_json TEXT NOT NULL DEFAULT '{}', + sent_at TEXT NOT NULL DEFAULT '', + delivered_at TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS uok_message_reads ( + message_id TEXT NOT NULL, + agent_id TEXT NOT NULL, + read_at TEXT NOT NULL DEFAULT '', + PRIMARY KEY (message_id, agent_id), + FOREIGN KEY (message_id) REFERENCES uok_messages(id) ON DELETE CASCADE + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_messages_to ON uok_messages(to_agent, sent_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_messages_conversation ON uok_messages(from_agent, to_agent, sent_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_messages_sent ON uok_messages(sent_at DESC)", + ); +} +function ensureDeployTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS deploy_runs ( + id TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL, + target TEXT NOT NULL, + command TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + exit_code INTEGER DEFAULT NULL, + output TEXT DEFAULT NULL, + deployed_url TEXT DEFAULT NULL, + created_at TEXT NOT NULL, + finished_at TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS smoke_results ( + id TEXT PRIMARY KEY, + deploy_run_id TEXT NOT NULL, + milestone_id TEXT NOT NULL, + url TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT DEFAULT NULL, + checks_json TEXT NOT NULL DEFAULT '[]', + created_at TEXT NOT NULL, + finished_at TEXT DEFAULT NULL, + FOREIGN KEY (deploy_run_id) REFERENCES deploy_runs(id) ON DELETE CASCADE + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS release_records ( + id TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL, + version TEXT NOT NULL, + prev_version TEXT DEFAULT NULL, + changelog_entry TEXT DEFAULT NULL, + git_tag TEXT DEFAULT NULL, + published INTEGER NOT NULL DEFAULT 0, + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS rollback_runs ( + id TEXT PRIMARY KEY, + deploy_run_id TEXT NOT NULL, + milestone_id TEXT NOT NULL, + reason TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + output TEXT DEFAULT NULL, + created_at TEXT NOT NULL, + finished_at TEXT DEFAULT NULL, + FOREIGN KEY (deploy_run_id) REFERENCES deploy_runs(id) ON DELETE CASCADE + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_deploy_runs_milestone ON deploy_runs(milestone_id, created_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_smoke_results_deploy ON smoke_results(deploy_run_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_release_records_milestone ON release_records(milestone_id, created_at DESC)", + ); +} +function ensureSleeptimeQueueTable(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS sleeptime_consolidation_queue ( + id TEXT PRIMARY KEY, + conversation_agent TEXT NOT NULL, + memory_agent TEXT NOT NULL, + content TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL, + processed_at TEXT DEFAULT NULL, + result TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_sleeptime_queue_status ON sleeptime_consolidation_queue(status, created_at ASC)", + ); +} +function ensureSelfFeedbackTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS self_feedback ( + id TEXT PRIMARY KEY, + ts TEXT NOT NULL, + kind TEXT NOT NULL, + severity TEXT NOT NULL, + blocking INTEGER NOT NULL DEFAULT 0, + repo_identity TEXT NOT NULL DEFAULT '', + sf_version TEXT NOT NULL DEFAULT '', + base_path TEXT NOT NULL DEFAULT '', + unit_type TEXT DEFAULT NULL, + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + summary TEXT NOT NULL DEFAULT '', + evidence TEXT NOT NULL DEFAULT '', + suggested_fix TEXT NOT NULL DEFAULT '', + full_json TEXT NOT NULL, + resolved_at TEXT DEFAULT NULL, + resolved_reason TEXT DEFAULT NULL, + resolved_by_sf_version TEXT DEFAULT NULL, + resolved_evidence_json TEXT DEFAULT NULL, + resolved_criteria_json TEXT DEFAULT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)", + ); +} +function ensureRetrievalEvidenceTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS retrieval_evidence ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + backend TEXT NOT NULL, + source_kind TEXT NOT NULL DEFAULT 'code', + query TEXT NOT NULL DEFAULT '', + strategy TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + project_root TEXT NOT NULL DEFAULT '', + git_head TEXT DEFAULT NULL, + git_branch TEXT DEFAULT NULL, + worktree_dirty INTEGER NOT NULL DEFAULT 0, + freshness TEXT NOT NULL DEFAULT 'unknown', + status TEXT NOT NULL DEFAULT 'ok', + hit_count INTEGER NOT NULL DEFAULT 0, + elapsed_ms INTEGER NOT NULL DEFAULT 0, + cache_path TEXT DEFAULT NULL, + error TEXT DEFAULT NULL, + result_json TEXT NOT NULL DEFAULT '{}', + recorded_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_backend_recorded ON retrieval_evidence(backend, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_scope_recorded ON retrieval_evidence(scope, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_retrieval_evidence_status_recorded ON retrieval_evidence(status, recorded_at DESC)", + ); +} +function ensureTriageTables(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS triage_runs ( + id TEXT PRIMARY KEY, + source_file TEXT, + status TEXT NOT NULL DEFAULT 'complete', + result_summary_json TEXT, + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS triage_evals ( + id TEXT PRIMARY KEY, + run_id TEXT NOT NULL REFERENCES triage_runs(id), + task_input TEXT NOT NULL, + expected_behavior TEXT, + evidence TEXT, + failure_mode TEXT, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS triage_items ( + id TEXT PRIMARY KEY, + run_id TEXT NOT NULL REFERENCES triage_runs(id), + kind TEXT NOT NULL, + content TEXT NOT NULL, + evidence TEXT, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS triage_skills ( + id TEXT PRIMARY KEY, + run_id TEXT NOT NULL REFERENCES triage_runs(id), + name TEXT, + description TEXT, + trigger TEXT, + raw_json TEXT, + status TEXT NOT NULL DEFAULT 'pending', + created_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_triage_evals_run ON triage_evals(run_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_triage_items_run_kind ON triage_items(run_id, kind)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_triage_skills_run ON triage_skills(run_id)", + ); +} +function ensureRuntimeCounterTable(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS runtime_counters ( + key TEXT PRIMARY KEY, + value INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL + ) + `); +} +function ensureValidationAttentionMarkersTable(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS validation_attention_markers ( + milestone_id TEXT PRIMARY KEY, + created_at TEXT NOT NULL, + source TEXT, + remediation_round INTEGER, + revalidation_round INTEGER, + revalidation_requested_at TEXT + ) + `); +} +function ensureSpecSchemaTables(db) { + // Tier 1.3: Spec/Runtime/Evidence schema separation + // Creates 9 normalized tables for milestone, slice, task entities + // Each entity type has: _specs (immutable intent), (runtime state), _evidence (audit trail) + + // ── Milestone Spec Table (immutable record of intent) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS milestone_specs ( + id TEXT NOT NULL, + vision TEXT NOT NULL DEFAULT '', + success_criteria TEXT DEFAULT '', + key_risks TEXT DEFAULT '', + proof_strategy TEXT DEFAULT '', + verification_contract TEXT DEFAULT '', + verification_integration TEXT DEFAULT '', + verification_operational TEXT DEFAULT '', + verification_uat TEXT DEFAULT '', + definition_of_done TEXT DEFAULT '', + requirement_coverage TEXT DEFAULT '', + boundary_map_markdown TEXT DEFAULT '', + vision_meeting_json TEXT DEFAULT '', + product_research_json TEXT DEFAULT '', + spec_version INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY (id) REFERENCES milestones(id) + ) + `); + + // ── Slice Spec Table (immutable record of intent) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS slice_specs ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + goal TEXT NOT NULL DEFAULT '', + success_criteria TEXT DEFAULT '', + proof_level TEXT DEFAULT '', + integration_closure TEXT DEFAULT '', + observability_impact TEXT DEFAULT '', + adversarial_partner TEXT DEFAULT '', + adversarial_combatant TEXT DEFAULT '', + adversarial_architect TEXT DEFAULT '', + planning_meeting_json TEXT DEFAULT '', + spec_version INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + + // ── Task Spec Table (immutable record of intent) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS task_specs ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + task_id TEXT NOT NULL, + verify TEXT NOT NULL DEFAULT '', + inputs TEXT DEFAULT '', + expected_output TEXT DEFAULT '', + risk TEXT NOT NULL DEFAULT 'low', + mutation_scope TEXT NOT NULL DEFAULT 'isolated', + verification_type TEXT NOT NULL DEFAULT 'self-check', + plan_approval TEXT NOT NULL DEFAULT 'not-required', + estimated_effort INTEGER DEFAULT NULL, + dependencies TEXT NOT NULL DEFAULT '[]', + blocks_parallel INTEGER NOT NULL DEFAULT 0, + requires_user_input INTEGER NOT NULL DEFAULT 0, + auto_retry INTEGER NOT NULL DEFAULT 1, + max_retries INTEGER NOT NULL DEFAULT 2, + spec_version INTEGER NOT NULL DEFAULT 1, + created_at TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id, task_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + + // ── Milestone Evidence Table (append-only audit trail) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS milestone_evidence ( + milestone_id TEXT NOT NULL, + evidence_type TEXT NOT NULL, + content TEXT NOT NULL, + recorded_at TEXT NOT NULL, + phase_name TEXT DEFAULT '', + recorded_by TEXT DEFAULT '', + evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), + PRIMARY KEY (milestone_id, evidence_id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + + // ── Slice Evidence Table (append-only audit trail) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS slice_evidence ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + evidence_type TEXT NOT NULL, + content TEXT NOT NULL, + recorded_at TEXT NOT NULL, + phase_name TEXT DEFAULT '', + recorded_by TEXT DEFAULT '', + evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), + PRIMARY KEY (milestone_id, slice_id, evidence_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + + // ── Task Evidence Table (append-only audit trail) ─────────── + db.exec(` + CREATE TABLE IF NOT EXISTS task_evidence ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + task_id TEXT NOT NULL, + evidence_type TEXT NOT NULL, + content TEXT NOT NULL, + recorded_at TEXT NOT NULL, + phase_name TEXT DEFAULT '', + recorded_by TEXT DEFAULT '', + evidence_id TEXT NOT NULL DEFAULT (lower(hex(randomblob(16)))), + PRIMARY KEY (milestone_id, slice_id, task_id, evidence_id), + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + + // Indices for efficient querying of evidence trails + db.exec(` + CREATE INDEX IF NOT EXISTS idx_milestone_evidence_type + ON milestone_evidence(milestone_id, evidence_type, recorded_at DESC) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_slice_evidence_type + ON slice_evidence(milestone_id, slice_id, evidence_type, recorded_at DESC) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_task_evidence_type + ON task_evidence(milestone_id, slice_id, task_id, evidence_type, recorded_at DESC) + `); +} +function initSchema(db, fileBacked) { + if (fileBacked) db.exec("PRAGMA journal_mode=WAL"); + if (fileBacked) db.exec("PRAGMA busy_timeout = 5000"); + if (fileBacked) db.exec("PRAGMA synchronous = NORMAL"); + // Disable SQLite's automatic WAL checkpoint (default: every 1000 pages). + // Auto-checkpoint fires at unpredictable times — if the process is killed + // mid-checkpoint (e.g., OOM), the main DB is partially written with an + // empty WAL and cannot be recovered. Explicit checkpoints are issued at + // safe loop boundaries instead (post-unit finalize, close). + if (fileBacked) db.exec("PRAGMA wal_autocheckpoint=0"); + if (fileBacked) db.exec("PRAGMA auto_vacuum = INCREMENTAL"); + if (fileBacked) db.exec("PRAGMA cache_size = -8000"); // 8 MB page cache + if (fileBacked && process.platform !== "darwin") + db.exec("PRAGMA mmap_size = 67108864"); // 64 MB mmap + db.exec("PRAGMA temp_store = MEMORY"); + db.exec("PRAGMA foreign_keys = ON"); + db.exec("BEGIN"); + try { + db.exec(` + CREATE TABLE IF NOT EXISTS schema_version ( + version INTEGER NOT NULL, + applied_at TEXT NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS decisions ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + id TEXT NOT NULL UNIQUE, + when_context TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + decision TEXT NOT NULL DEFAULT '', + choice TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + revisable TEXT NOT NULL DEFAULT '', + made_by TEXT NOT NULL DEFAULT 'agent', + superseded_by TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS requirements ( + id TEXT PRIMARY KEY, + class TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT '', + description TEXT NOT NULL DEFAULT '', + why TEXT NOT NULL DEFAULT '', + source TEXT NOT NULL DEFAULT '', + primary_owner TEXT NOT NULL DEFAULT '', + supporting_slices TEXT NOT NULL DEFAULT '', + validation TEXT NOT NULL DEFAULT '', + notes TEXT NOT NULL DEFAULT '', + full_content TEXT NOT NULL DEFAULT '', + superseded_by TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS artifacts ( + path TEXT PRIMARY KEY, + artifact_type TEXT NOT NULL DEFAULT '', + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + full_content TEXT NOT NULL DEFAULT '', + imported_at TEXT NOT NULL DEFAULT '' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memories ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + id TEXT NOT NULL UNIQUE, + category TEXT NOT NULL, + content TEXT NOT NULL, + confidence REAL NOT NULL DEFAULT 0.8, + source_unit_type TEXT, + source_unit_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + superseded_by TEXT DEFAULT NULL, + hit_count INTEGER NOT NULL DEFAULT 0, + tags TEXT NOT NULL DEFAULT '[]' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_processed_units ( + unit_key TEXT PRIMARY KEY, + activity_file TEXT, + processed_at TEXT NOT NULL + ) + `); + // memory_embeddings, memory_relations, memory_sources used to be referenced + // by helper functions and queries (memory-embeddings.ts, memory-relations.ts, + // memory-ingest.ts) without a corresponding CREATE TABLE — any actual write + // would have failed with "no such table". Creating them as IF NOT EXISTS so + // existing DBs that somehow have them survive, and fresh DBs work. + db.exec(` + CREATE TABLE IF NOT EXISTS memory_embeddings ( + memory_id TEXT PRIMARY KEY, + model TEXT NOT NULL, + dim INTEGER NOT NULL, + vector BLOB NOT NULL, + updated_at TEXT NOT NULL, + FOREIGN KEY (memory_id) REFERENCES memories(id) ON DELETE CASCADE + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_relations ( + from_id TEXT NOT NULL, + to_id TEXT NOT NULL, + rel TEXT NOT NULL, + confidence REAL NOT NULL DEFAULT 0.8, + created_at TEXT NOT NULL, + PRIMARY KEY (from_id, to_id, rel), + FOREIGN KEY (from_id) REFERENCES memories(id) ON DELETE CASCADE, + FOREIGN KEY (to_id) REFERENCES memories(id) ON DELETE CASCADE + ) + `); + // PK covers from_id as leading column already; reverse lookups + // (memory-relations.ts queries WHERE to_id = ?) need their own index + // to avoid a full table scan as the relation count grows. + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memory_relations_to ON memory_relations(to_id)", + ); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_sources ( + id TEXT PRIMARY KEY, + kind TEXT NOT NULL, + uri TEXT, + title TEXT, + content TEXT NOT NULL, + content_hash TEXT NOT NULL, + imported_at TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'project', + tags TEXT NOT NULL DEFAULT '[]' + ) + `); + // content_hash is queried on every insert for deduplication; without an + // index the lookup becomes a full table scan as ingestion volume grows. + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)", + ); + // Category GROUP BY queries (e.g. /memory stats) need a covering + // index that filters active memories and groups by category. + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)", + ); + db.exec(` + CREATE TABLE IF NOT EXISTS judgments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + unit_id TEXT NOT NULL, + decision TEXT NOT NULL DEFAULT '', + alternatives_json TEXT NOT NULL DEFAULT '[]', + reasoning TEXT NOT NULL DEFAULT '', + confidence TEXT NOT NULL DEFAULT 'medium', + ts TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)", + ); + db.exec(` + CREATE TABLE IF NOT EXISTS milestones ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'active', + depends_on TEXT NOT NULL DEFAULT '[]', + created_at TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + vision TEXT NOT NULL DEFAULT '', + success_criteria TEXT NOT NULL DEFAULT '[]', + key_risks TEXT NOT NULL DEFAULT '[]', + proof_strategy TEXT NOT NULL DEFAULT '[]', + verification_contract TEXT NOT NULL DEFAULT '', + verification_integration TEXT NOT NULL DEFAULT '', + verification_operational TEXT NOT NULL DEFAULT '', + verification_uat TEXT NOT NULL DEFAULT '', + definition_of_done TEXT NOT NULL DEFAULT '[]', + requirement_coverage TEXT NOT NULL DEFAULT '', + boundary_map_markdown TEXT NOT NULL DEFAULT '', + vision_meeting_json TEXT NOT NULL DEFAULT '', + product_research_json TEXT NOT NULL DEFAULT '', + sequence INTEGER DEFAULT 0 + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS slices ( + milestone_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + risk TEXT NOT NULL DEFAULT 'medium', + depends TEXT NOT NULL DEFAULT '[]', + demo TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + full_summary_md TEXT NOT NULL DEFAULT '', + full_uat_md TEXT NOT NULL DEFAULT '', + goal TEXT NOT NULL DEFAULT '', + success_criteria TEXT NOT NULL DEFAULT '', + proof_level TEXT NOT NULL DEFAULT '', + integration_closure TEXT NOT NULL DEFAULT '', + observability_impact TEXT NOT NULL DEFAULT '', + adversarial_partner TEXT NOT NULL DEFAULT '', + adversarial_combatant TEXT NOT NULL DEFAULT '', + adversarial_architect TEXT NOT NULL DEFAULT '', + planning_meeting_json TEXT NOT NULL DEFAULT '', + sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order + replan_triggered_at TEXT DEFAULT NULL, + is_sketch INTEGER NOT NULL DEFAULT 0, -- SF ADR-011: 1 = slice is a sketch awaiting refine-slice + sketch_scope TEXT NOT NULL DEFAULT '', -- SF ADR-011: 2-3 sentence scope hint from plan-milestone + PRIMARY KEY (milestone_id, id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS tasks ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + one_liner TEXT NOT NULL DEFAULT '', + narrative TEXT NOT NULL DEFAULT '', + verification_result TEXT NOT NULL DEFAULT '', + duration TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + blocker_discovered INTEGER DEFAULT 0, + deviations TEXT NOT NULL DEFAULT '', + known_issues TEXT NOT NULL DEFAULT '', + key_files TEXT NOT NULL DEFAULT '[]', + key_decisions TEXT NOT NULL DEFAULT '[]', + full_summary_md TEXT NOT NULL DEFAULT '', + description TEXT NOT NULL DEFAULT '', + estimate TEXT NOT NULL DEFAULT '', + files TEXT NOT NULL DEFAULT '[]', + verify TEXT NOT NULL DEFAULT '', + inputs TEXT NOT NULL DEFAULT '[]', + expected_output TEXT NOT NULL DEFAULT '[]', + observability_impact TEXT NOT NULL DEFAULT '', + full_plan_md TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + verification_status TEXT NOT NULL DEFAULT '', + risk TEXT NOT NULL DEFAULT 'low', + mutation_scope TEXT NOT NULL DEFAULT 'isolated', + verification_type TEXT NOT NULL DEFAULT 'self-check', + plan_approval TEXT NOT NULL DEFAULT 'not-required', + task_status TEXT NOT NULL DEFAULT 'todo', + estimated_effort INTEGER DEFAULT NULL, + dependencies TEXT NOT NULL DEFAULT '[]', + blocks_parallel INTEGER NOT NULL DEFAULT 0, + requires_user_input INTEGER NOT NULL DEFAULT 0, + auto_retry INTEGER NOT NULL DEFAULT 1, + max_retries INTEGER NOT NULL DEFAULT 2, + frontmatter_version INTEGER NOT NULL DEFAULT 1, + sequence INTEGER DEFAULT 0, -- Ordering hint: tools may set this to control execution order + escalation_pending INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): pause-on-escalation flag + escalation_awaiting_review INTEGER NOT NULL DEFAULT 0, -- ADR-011 P2 (SF): continueWithDefault=true marker (no pause) + escalation_override_applied INTEGER NOT NULL DEFAULT 0, -- SF ADR-011 P2: 1 once carry-forward injected into a downstream prompt + escalation_artifact_path TEXT DEFAULT NULL, -- ADR-011 P2 (SF): path to T##-ESCALATION.json + PRIMARY KEY (milestone_id, slice_id, id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + ensureTaskSchedulerTable(db); + if (columnExists(db, "tasks", "escalation_pending")) { + db.exec(` + CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending) + `); + } + db.exec(` + CREATE TABLE IF NOT EXISTS verification_evidence ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL DEFAULT '', + slice_id TEXT NOT NULL DEFAULT '', + milestone_id TEXT NOT NULL DEFAULT '', + command TEXT NOT NULL DEFAULT '', + exit_code INTEGER DEFAULT 0, + verdict TEXT NOT NULL DEFAULT '', + duration_ms INTEGER DEFAULT 0, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS replan_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + summary TEXT NOT NULL DEFAULT '', + previous_artifact_path TEXT DEFAULT NULL, + replacement_artifact_path TEXT DEFAULT NULL, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS assessments ( + path TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + status TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + full_content TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS quality_gates ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + gate_id TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'slice', + task_id TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + evaluated_at TEXT DEFAULT NULL, + PRIMARY KEY (milestone_id, slice_id, gate_id, task_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + // Slice dependency junction table (v14) + db.exec(` + CREATE TABLE IF NOT EXISTS slice_dependencies ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + depends_on_slice_id TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), + FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS gate_circuit_breakers ( + gate_id TEXT PRIMARY KEY, + state TEXT NOT NULL DEFAULT 'closed', + failure_streak INTEGER NOT NULL DEFAULT 0, + last_failure_at TEXT DEFAULT NULL, + opened_at TEXT DEFAULT NULL, + half_open_attempts INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL DEFAULT '' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS audit_turn_index ( + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + first_ts TEXT NOT NULL, + last_ts TEXT NOT NULL, + event_count INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY (trace_id, turn_id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS llm_task_outcomes ( + model_id TEXT NOT NULL, + provider TEXT NOT NULL, + unit_type TEXT NOT NULL, + unit_id TEXT NOT NULL, + succeeded INTEGER NOT NULL DEFAULT 0, + retries INTEGER NOT NULL DEFAULT 0, + escalated INTEGER NOT NULL DEFAULT 0, + verification_passed INTEGER DEFAULT NULL, + blocker_discovered INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER DEFAULT NULL, + tokens_total INTEGER DEFAULT NULL, + cost_usd REAL DEFAULT NULL, + failure_mode TEXT DEFAULT NULL, + recorded_at INTEGER NOT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS uok_runs ( + run_id TEXT PRIMARY KEY, + session_id TEXT DEFAULT NULL, + path TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'started', + started_at TEXT NOT NULL, + ended_at TEXT DEFAULT NULL, + error TEXT DEFAULT NULL, + flags_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL + ) + `); + ensureSelfFeedbackTables(db); + ensureSolverEvalTables(db); + ensureRetrievalEvidenceTables(db); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)", + ); + // v13 indexes — hot-path dispatch queries + db.exec( + "CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)", + ); + ensureVerificationEvidenceDedupIndex(db); + // v14 index — slice dependency lookups + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)", + ); + db.exec( + "CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_open ON self_feedback(resolved_at, severity, ts)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_self_feedback_kind ON self_feedback(kind, ts)", + ); + ensureRepoProfileTables(db); + ensureBacklogTables(db); + ensureScheduleTables(db); + ensureSolverEvalTables(db); + ensureHeadlessRunTables(db); + ensureSessionTables(db); + ensureSessionSnapshotTable(db); + ensureUokMessageTables(db); + ensureDeployTables(db); + ensureSleeptimeQueueTable(db); + ensureSpecSchemaTables(db); + ensureTaskFrontmatterColumns(db); + ensureRetrievalEvidenceTables(db); + ensureTriageTables(db); + ensureRuntimeCounterTable(db); + ensureValidationAttentionMarkersTable(db); + db.exec( + `CREATE VIEW IF NOT EXISTS active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL`, + ); + db.exec( + `CREATE VIEW IF NOT EXISTS active_requirements AS SELECT * FROM requirements WHERE superseded_by IS NULL`, + ); + db.exec( + `CREATE VIEW IF NOT EXISTS active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL`, + ); + db.exec( + `CREATE VIEW IF NOT EXISTS active_tasks AS SELECT * FROM tasks WHERE status NOT IN ('done','complete','completed','cancelled')`, + ); + db.exec(` + CREATE VIEW IF NOT EXISTS v_task_full AS + SELECT t.*, ts.spec_version, ts.verify AS spec_verify, + ts.inputs AS spec_inputs, ts.expected_output AS spec_expected_output + FROM tasks t + LEFT JOIN task_specs ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + `); + const existing = db + .prepare("SELECT count(*) as cnt FROM schema_version") + .get(); + if (existing && existing["cnt"] === 0) { + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": SCHEMA_VERSION, + ":applied_at": new Date().toISOString(), + }); + } + db.exec("COMMIT"); + } catch (err) { + db.exec("ROLLBACK"); + throw err; + } + migrateSchema(db); +} +function columnExists(db, table, column) { + const rows = db.prepare(`PRAGMA table_info(${table})`).all(); + return rows.some((row) => row["name"] === column); +} +function tableExists(db, table) { + const row = db + .prepare(`SELECT name FROM sqlite_master WHERE type='table' AND name=?`) + .get(table); + return row != null; +} +function ensureColumn(db, table, column, ddl) { + if (!columnExists(db, table, column)) db.exec(ddl); +} +export function hasPlanningPayload(planning = {}) { + return ( + Boolean(planning.vision) || + (planning.successCriteria?.length ?? 0) > 0 || + (planning.keyRisks?.length ?? 0) > 0 || + (planning.proofStrategy?.length ?? 0) > 0 || + Boolean(planning.verificationContract) || + Boolean(planning.verificationIntegration) || + Boolean(planning.verificationOperational) || + Boolean(planning.verificationUat) || + (planning.definitionOfDone?.length ?? 0) > 0 || + Boolean(planning.requirementCoverage) || + Boolean(planning.boundaryMapMarkdown) || + Boolean(planning.visionMeeting) || + Boolean(planning.productResearch) + ); +} +export function parseJsonOrFallback(raw, fallback) { + if (typeof raw !== "string" || raw.trim().length === 0) return fallback; + try { + return JSON.parse(raw); + } catch { + return fallback; + } +} +export function isEmptyMilestoneSpec(row) { + if (!row) return true; + return ( + (row["vision"] ?? "") === "" && + parseJsonOrFallback(row["success_criteria"], []).length === 0 && + parseJsonOrFallback(row["key_risks"], []).length === 0 && + parseJsonOrFallback(row["proof_strategy"], []).length === 0 && + (row["verification_contract"] ?? "") === "" && + (row["verification_integration"] ?? "") === "" && + (row["verification_operational"] ?? "") === "" && + (row["verification_uat"] ?? "") === "" && + parseJsonOrFallback(row["definition_of_done"], []).length === 0 && + (row["requirement_coverage"] ?? "") === "" && + (row["boundary_map_markdown"] ?? "") === "" && + (row["vision_meeting_json"] ?? "") === "" && + (row["product_research_json"] ?? "") === "" + ); +} +function ensureTaskCreatedAtColumn(db) { + ensureColumn( + db, + "tasks", + "created_at", + `ALTER TABLE tasks ADD COLUMN created_at TEXT NOT NULL DEFAULT ''`, + ); +} +function ensureTaskFrontmatterColumns(db) { + ensureColumn( + db, + "tasks", + "risk", + `ALTER TABLE tasks ADD COLUMN risk TEXT NOT NULL DEFAULT 'low'`, + ); + ensureColumn( + db, + "tasks", + "mutation_scope", + `ALTER TABLE tasks ADD COLUMN mutation_scope TEXT NOT NULL DEFAULT 'isolated'`, + ); + ensureColumn( + db, + "tasks", + "verification_type", + `ALTER TABLE tasks ADD COLUMN verification_type TEXT NOT NULL DEFAULT 'self-check'`, + ); + ensureColumn( + db, + "tasks", + "plan_approval", + `ALTER TABLE tasks ADD COLUMN plan_approval TEXT NOT NULL DEFAULT 'not-required'`, + ); + ensureColumn( + db, + "tasks", + "task_status", + `ALTER TABLE tasks ADD COLUMN task_status TEXT NOT NULL DEFAULT 'todo'`, + ); + ensureColumn( + db, + "tasks", + "estimated_effort", + `ALTER TABLE tasks ADD COLUMN estimated_effort INTEGER DEFAULT NULL`, + ); + ensureColumn( + db, + "tasks", + "dependencies", + `ALTER TABLE tasks ADD COLUMN dependencies TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "blocks_parallel", + `ALTER TABLE tasks ADD COLUMN blocks_parallel INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "requires_user_input", + `ALTER TABLE tasks ADD COLUMN requires_user_input INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "auto_retry", + `ALTER TABLE tasks ADD COLUMN auto_retry INTEGER NOT NULL DEFAULT 1`, + ); + ensureColumn( + db, + "tasks", + "max_retries", + `ALTER TABLE tasks ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 2`, + ); + for (const table of ["task_specs"]) { + ensureColumn( + db, + table, + "risk", + `ALTER TABLE ${table} ADD COLUMN risk TEXT NOT NULL DEFAULT 'low'`, + ); + ensureColumn( + db, + table, + "mutation_scope", + `ALTER TABLE ${table} ADD COLUMN mutation_scope TEXT NOT NULL DEFAULT 'isolated'`, + ); + ensureColumn( + db, + table, + "verification_type", + `ALTER TABLE ${table} ADD COLUMN verification_type TEXT NOT NULL DEFAULT 'self-check'`, + ); + ensureColumn( + db, + table, + "plan_approval", + `ALTER TABLE ${table} ADD COLUMN plan_approval TEXT NOT NULL DEFAULT 'not-required'`, + ); + ensureColumn( + db, + table, + "estimated_effort", + `ALTER TABLE ${table} ADD COLUMN estimated_effort INTEGER DEFAULT NULL`, + ); + ensureColumn( + db, + table, + "dependencies", + `ALTER TABLE ${table} ADD COLUMN dependencies TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + table, + "blocks_parallel", + `ALTER TABLE ${table} ADD COLUMN blocks_parallel INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + table, + "requires_user_input", + `ALTER TABLE ${table} ADD COLUMN requires_user_input INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + table, + "auto_retry", + `ALTER TABLE ${table} ADD COLUMN auto_retry INTEGER NOT NULL DEFAULT 1`, + ); + ensureColumn( + db, + table, + "max_retries", + `ALTER TABLE ${table} ADD COLUMN max_retries INTEGER NOT NULL DEFAULT 2`, + ); + } +} +function ensureTaskSchedulerTable(db) { + db.exec(` + CREATE TABLE IF NOT EXISTS task_scheduler ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + task_id TEXT NOT NULL, + status TEXT NOT NULL DEFAULT 'queued', + due_at TEXT DEFAULT NULL, + claimed_by TEXT DEFAULT NULL, + dispatched_at TEXT DEFAULT NULL, + consumed_at TEXT DEFAULT NULL, + expires_at TEXT DEFAULT NULL, + updated_at TEXT NOT NULL DEFAULT '', + PRIMARY KEY (milestone_id, slice_id, task_id), + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_task_scheduler_status + ON task_scheduler(status, due_at) + `); +} +function migrateCostUsdToMicroUsd(db) { + // Tier 2.7: Migrate cost_usd REAL to cost_micro_usd INTEGER + // Converts floating-point USD values to integer micro-USD (multiply by 1,000,000) + // Benefits: eliminates float drift on accumulated costs, easier reasoning about totals + // Purpose: Enable accurate cost tracking at scale without rounding errors + // Consumer: gate_runs cost tracking, cost analytics, budget checks + + // Guard: gate_runs may not exist in minimal legacy DBs (it will be dropped in v58) + if (!tableExists(db, "gate_runs")) return; + + // Add cost_micro_usd column if it doesn't exist + if (!columnExists(db, "gate_runs", "cost_micro_usd")) { + db.exec( + `ALTER TABLE gate_runs ADD COLUMN cost_micro_usd INTEGER DEFAULT NULL`, + ); + } + + // Migrate data: convert cost_usd to cost_micro_usd + // NULL values stay NULL; non-NULL values are multiplied by 1,000,000 + if (columnExists(db, "gate_runs", "cost_usd")) { + db.prepare(` + UPDATE gate_runs + SET cost_micro_usd = CAST(ROUND(cost_usd * 1000000) AS INTEGER) + WHERE cost_usd IS NOT NULL + AND cost_micro_usd IS NULL + `).run(); + } + + // Drop old cost_usd column (SQLite ALTER TABLE DROP is only available in 3.35.0+) + // For safety, we keep the old column as deprecated but unused + // Future: drop after confirming all queries use cost_micro_usd +} +function populateSpecTablesFromExisting(db) { + // Tier 1.3 Phase 2: Migrate existing spec data to new spec tables + // This populates milestone_specs, slice_specs, task_specs from existing columns + // Evidence tables are left empty; they populate as tools create new evidence. + + const now = new Date().toISOString(); + + // Migrate milestone specs + db.prepare(` + INSERT OR IGNORE INTO milestone_specs ( + id, vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, + spec_version, created_at + ) + SELECT + id, vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, '', + 1, COALESCE(created_at, ?) + FROM milestones + WHERE id NOT IN (SELECT id FROM milestone_specs) + `).run(now); + + // Migrate slice specs + db.prepare(` + INSERT OR IGNORE INTO slice_specs ( + milestone_id, slice_id, goal, success_criteria, proof_level, + integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, + planning_meeting_json, spec_version, created_at + ) + SELECT + milestone_id, id, goal, success_criteria, proof_level, + integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, + planning_meeting_json, 1, COALESCE(created_at, ?) + FROM slices + WHERE (milestone_id, id) NOT IN (SELECT milestone_id, slice_id FROM slice_specs) + `).run(now); + + // Migrate task specs + db.prepare(` + INSERT OR IGNORE INTO task_specs ( + milestone_id, slice_id, task_id, verify, inputs, expected_output, + spec_version, created_at + ) + SELECT + milestone_id, slice_id, id, verify, inputs, expected_output, + 1, COALESCE(created_at, ?) + FROM tasks + WHERE (milestone_id, slice_id, id) NOT IN (SELECT milestone_id, slice_id, task_id FROM task_specs) + `).run(now); +} +function migrateSchema(db) { + const row = withQueryTimeout( + () => db.prepare("SELECT MAX(version) as v FROM schema_version").get(), + null, + ); + const currentVersion = row ? row["v"] : 0; + if (currentVersion >= SCHEMA_VERSION) return; + // Backup database before migration so a mid-migration crash doesn't + // leave a partially-migrated DB with no recovery path. + // WAL-safe: checkpoint first to flush WAL into the main DB file, then copy. + if (currentPath && currentPath !== ":memory:" && existsSync(currentPath)) { + try { + const backupPath = `${currentPath}.backup-v${currentVersion}`; + if (!existsSync(backupPath)) { + // Flush WAL to main DB file before copying — without this, the backup + // may be missing committed data that only exists in the -wal file. + try { + db.exec("PRAGMA wal_checkpoint(TRUNCATE)"); + } catch { + /* checkpoint is best-effort */ + } + copyFileSync(currentPath, backupPath); + } + } catch (backupErr) { + // Log but proceed — blocking migration leaves the DB stuck at an old + // schema version permanently on read-only or full filesystems. + logWarning( + "db", + `Pre-migration backup failed: ${backupErr instanceof Error ? backupErr.message : String(backupErr)}`, + ); + } + } + db.exec("BEGIN"); + try { + if (currentVersion < 2) { + db.exec(` + CREATE TABLE IF NOT EXISTS artifacts ( + path TEXT PRIMARY KEY, + artifact_type TEXT NOT NULL DEFAULT '', + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + full_content TEXT NOT NULL DEFAULT '', + imported_at TEXT NOT NULL DEFAULT '' + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 2, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 3) { + db.exec(` + CREATE TABLE IF NOT EXISTS memories ( + seq INTEGER PRIMARY KEY AUTOINCREMENT, + id TEXT NOT NULL UNIQUE, + category TEXT NOT NULL, + content TEXT NOT NULL, + confidence REAL NOT NULL DEFAULT 0.8, + source_unit_type TEXT, + source_unit_id TEXT, + created_at TEXT NOT NULL, + updated_at TEXT NOT NULL, + superseded_by TEXT DEFAULT NULL, + hit_count INTEGER NOT NULL DEFAULT 0 + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS memory_processed_units ( + unit_key TEXT PRIMARY KEY, + activity_file TEXT, + processed_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", + ); + db.exec("DROP VIEW IF EXISTS active_memories"); + db.exec( + "CREATE VIEW active_memories AS SELECT * FROM memories WHERE superseded_by IS NULL", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 3, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 4) { + ensureColumn( + db, + "decisions", + "made_by", + `ALTER TABLE decisions ADD COLUMN made_by TEXT NOT NULL DEFAULT 'agent'`, + ); + db.exec("DROP VIEW IF EXISTS active_decisions"); + db.exec( + "CREATE VIEW active_decisions AS SELECT * FROM decisions WHERE superseded_by IS NULL", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 4, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 5) { + db.exec(` + CREATE TABLE IF NOT EXISTS milestones ( + id TEXT PRIMARY KEY, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'active', + created_at TEXT NOT NULL, + completed_at TEXT DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS slices ( + milestone_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + risk TEXT NOT NULL DEFAULT 'medium', + created_at TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + PRIMARY KEY (milestone_id, id), + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS tasks ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + id TEXT NOT NULL, + title TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + one_liner TEXT NOT NULL DEFAULT '', + narrative TEXT NOT NULL DEFAULT '', + verification_result TEXT NOT NULL DEFAULT '', + duration TEXT NOT NULL DEFAULT '', + completed_at TEXT DEFAULT NULL, + blocker_discovered INTEGER DEFAULT 0, + deviations TEXT NOT NULL DEFAULT '', + known_issues TEXT NOT NULL DEFAULT '', + key_files TEXT NOT NULL DEFAULT '[]', + key_decisions TEXT NOT NULL DEFAULT '[]', + full_summary_md TEXT NOT NULL DEFAULT '', + PRIMARY KEY (milestone_id, slice_id, id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS verification_evidence ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + task_id TEXT NOT NULL DEFAULT '', + slice_id TEXT NOT NULL DEFAULT '', + milestone_id TEXT NOT NULL DEFAULT '', + command TEXT NOT NULL DEFAULT '', + exit_code INTEGER DEFAULT 0, + verdict TEXT NOT NULL DEFAULT '', + duration_ms INTEGER DEFAULT 0, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id, slice_id, task_id) REFERENCES tasks(milestone_id, slice_id, id) + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 5, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 6) { + ensureColumn( + db, + "slices", + "full_summary_md", + `ALTER TABLE slices ADD COLUMN full_summary_md TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "full_uat_md", + `ALTER TABLE slices ADD COLUMN full_uat_md TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 6, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 7) { + ensureColumn( + db, + "slices", + "depends", + `ALTER TABLE slices ADD COLUMN depends TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "slices", + "demo", + `ALTER TABLE slices ADD COLUMN demo TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "depends_on", + `ALTER TABLE milestones ADD COLUMN depends_on TEXT NOT NULL DEFAULT '[]'`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 7, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 8) { + ensureColumn( + db, + "milestones", + "vision", + `ALTER TABLE milestones ADD COLUMN vision TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "success_criteria", + `ALTER TABLE milestones ADD COLUMN success_criteria TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "key_risks", + `ALTER TABLE milestones ADD COLUMN key_risks TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "proof_strategy", + `ALTER TABLE milestones ADD COLUMN proof_strategy TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "verification_contract", + `ALTER TABLE milestones ADD COLUMN verification_contract TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "verification_integration", + `ALTER TABLE milestones ADD COLUMN verification_integration TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "verification_operational", + `ALTER TABLE milestones ADD COLUMN verification_operational TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "verification_uat", + `ALTER TABLE milestones ADD COLUMN verification_uat TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "definition_of_done", + `ALTER TABLE milestones ADD COLUMN definition_of_done TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "milestones", + "requirement_coverage", + `ALTER TABLE milestones ADD COLUMN requirement_coverage TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestones", + "boundary_map_markdown", + `ALTER TABLE milestones ADD COLUMN boundary_map_markdown TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "goal", + `ALTER TABLE slices ADD COLUMN goal TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "success_criteria", + `ALTER TABLE slices ADD COLUMN success_criteria TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "proof_level", + `ALTER TABLE slices ADD COLUMN proof_level TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "integration_closure", + `ALTER TABLE slices ADD COLUMN integration_closure TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "observability_impact", + `ALTER TABLE slices ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "uat_verdict", + `ALTER TABLE slices ADD COLUMN uat_verdict TEXT DEFAULT NULL`, + ); + ensureColumn( + db, + "tasks", + "description", + `ALTER TABLE tasks ADD COLUMN description TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "tasks", + "estimate", + `ALTER TABLE tasks ADD COLUMN estimate TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "tasks", + "files", + `ALTER TABLE tasks ADD COLUMN files TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "verify", + `ALTER TABLE tasks ADD COLUMN verify TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "tasks", + "inputs", + `ALTER TABLE tasks ADD COLUMN inputs TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "expected_output", + `ALTER TABLE tasks ADD COLUMN expected_output TEXT NOT NULL DEFAULT '[]'`, + ); + ensureColumn( + db, + "tasks", + "observability_impact", + `ALTER TABLE tasks ADD COLUMN observability_impact TEXT NOT NULL DEFAULT ''`, + ); + db.exec(` + CREATE TABLE IF NOT EXISTS replan_history ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + summary TEXT NOT NULL DEFAULT '', + previous_artifact_path TEXT DEFAULT NULL, + replacement_artifact_path TEXT DEFAULT NULL, + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS assessments ( + path TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL DEFAULT '', + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + status TEXT NOT NULL DEFAULT '', + scope TEXT NOT NULL DEFAULT '', + full_content TEXT NOT NULL DEFAULT '', + created_at TEXT NOT NULL DEFAULT '', + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_replan_history_milestone ON replan_history(milestone_id, created_at)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 8, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 9) { + ensureColumn( + db, + "slices", + "sequence", + `ALTER TABLE slices ADD COLUMN sequence INTEGER DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "sequence", + `ALTER TABLE tasks ADD COLUMN sequence INTEGER DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 9, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 10) { + ensureColumn( + db, + "slices", + "replan_triggered_at", + `ALTER TABLE slices ADD COLUMN replan_triggered_at TEXT DEFAULT NULL`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 10, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 11) { + ensureColumn( + db, + "tasks", + "full_plan_md", + `ALTER TABLE tasks ADD COLUMN full_plan_md TEXT NOT NULL DEFAULT ''`, + ); + // Add unique constraint to replan_history for idempotency: + // one replan record per blocker task per slice per milestone. + db.exec(` + CREATE UNIQUE INDEX IF NOT EXISTS idx_replan_history_unique + ON replan_history(milestone_id, slice_id, task_id) + WHERE slice_id IS NOT NULL AND task_id IS NOT NULL + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 11, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 12) { + db.exec(` + CREATE TABLE IF NOT EXISTS quality_gates ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + gate_id TEXT NOT NULL, + scope TEXT NOT NULL DEFAULT 'slice', + task_id TEXT DEFAULT NULL, + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + evaluated_at TEXT DEFAULT NULL, + PRIMARY KEY (milestone_id, slice_id, gate_id, COALESCE(task_id, '')), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 12, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 13) { + // Hot-path indexes for auto-loop dispatch queries + db.exec( + "CREATE INDEX IF NOT EXISTS idx_tasks_active ON tasks(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slices_active ON slices(milestone_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_milestones_status ON milestones(status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_quality_gates_pending ON quality_gates(milestone_id, slice_id, status)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_verification_evidence_task ON verification_evidence(milestone_id, slice_id, task_id)", + ); + ensureVerificationEvidenceDedupIndex(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 13, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 14) { + db.exec(` + CREATE TABLE IF NOT EXISTS slice_dependencies ( + milestone_id TEXT NOT NULL, + slice_id TEXT NOT NULL, + depends_on_slice_id TEXT NOT NULL, + PRIMARY KEY (milestone_id, slice_id, depends_on_slice_id), + FOREIGN KEY (milestone_id, slice_id) REFERENCES slices(milestone_id, id), + FOREIGN KEY (milestone_id, depends_on_slice_id) REFERENCES slices(milestone_id, id) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_slice_deps_target ON slice_dependencies(milestone_id, depends_on_slice_id)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 14, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 15) { + db.exec(` + CREATE TABLE IF NOT EXISTS gate_runs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + gate_id TEXT NOT NULL, + gate_type TEXT NOT NULL DEFAULT '', + unit_type TEXT DEFAULT NULL, + unit_id TEXT DEFAULT NULL, + milestone_id TEXT DEFAULT NULL, + slice_id TEXT DEFAULT NULL, + task_id TEXT DEFAULT NULL, + outcome TEXT NOT NULL DEFAULT 'pass', + failure_class TEXT NOT NULL DEFAULT 'none', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + attempt INTEGER NOT NULL DEFAULT 1, + max_attempts INTEGER NOT NULL DEFAULT 1, + retryable INTEGER NOT NULL DEFAULT 0, + evaluated_at TEXT NOT NULL DEFAULT '', + duration_ms INTEGER DEFAULT NULL, + cost_micro_usd INTEGER DEFAULT NULL + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS turn_git_transactions ( + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + unit_type TEXT DEFAULT NULL, + unit_id TEXT DEFAULT NULL, + stage TEXT NOT NULL DEFAULT 'turn-start', + action TEXT NOT NULL DEFAULT 'status-only', + push INTEGER NOT NULL DEFAULT 0, + status TEXT NOT NULL DEFAULT 'ok', + error TEXT DEFAULT NULL, + metadata_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL DEFAULT '', + PRIMARY KEY (trace_id, turn_id, stage) + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS audit_events ( + event_id TEXT PRIMARY KEY, + trace_id TEXT NOT NULL, + turn_id TEXT DEFAULT NULL, + caused_by TEXT DEFAULT NULL, + category TEXT NOT NULL, + type TEXT NOT NULL, + ts TEXT NOT NULL, + payload_json TEXT NOT NULL DEFAULT '{}' + ) + `); + db.exec(` + CREATE TABLE IF NOT EXISTS audit_turn_index ( + trace_id TEXT NOT NULL, + turn_id TEXT NOT NULL, + first_ts TEXT NOT NULL, + last_ts TEXT NOT NULL, + event_count INTEGER NOT NULL DEFAULT 0, + PRIMARY KEY (trace_id, turn_id) + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_gate_runs_turn ON gate_runs(trace_id, turn_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_gate_runs_lookup ON gate_runs(milestone_id, slice_id, task_id, gate_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_turn_git_tx_turn ON turn_git_transactions(trace_id, turn_id)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_audit_events_trace ON audit_events(trace_id, ts)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_audit_events_turn ON audit_events(trace_id, turn_id, ts)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 15, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 16) { + db.exec(` + CREATE TABLE IF NOT EXISTS llm_task_outcomes ( + model_id TEXT NOT NULL, + provider TEXT NOT NULL, + unit_type TEXT NOT NULL, + unit_id TEXT NOT NULL, + succeeded INTEGER NOT NULL DEFAULT 0, + retries INTEGER NOT NULL DEFAULT 0, + escalated INTEGER NOT NULL DEFAULT 0, + verification_passed INTEGER DEFAULT NULL, + blocker_discovered INTEGER NOT NULL DEFAULT 0, + duration_ms INTEGER DEFAULT NULL, + tokens_total INTEGER DEFAULT NULL, + cost_usd REAL DEFAULT NULL, + failure_mode TEXT DEFAULT NULL, + recorded_at INTEGER NOT NULL + ) + `); + db.exec( + "CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_task_outcomes_identity ON llm_task_outcomes(unit_type, unit_id, recorded_at)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_model_unit ON llm_task_outcomes(model_id, unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_unit ON llm_task_outcomes(unit_type, recorded_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_provider ON llm_task_outcomes(provider, recorded_at DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 16, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 17) { + ensureColumn( + db, + "tasks", + "verification_status", + `ALTER TABLE tasks ADD COLUMN verification_status TEXT NOT NULL DEFAULT ''`, + ); + // Backfill verification_status from existing verification_evidence rows so the + // prior-task guard works on databases upgraded mid-project (not just new ones). + db.exec(` + UPDATE tasks + SET verification_status = CASE + WHEN (SELECT COUNT(*) FROM verification_evidence ve + WHERE ve.milestone_id = tasks.milestone_id + AND ve.slice_id = tasks.slice_id + AND ve.task_id = tasks.id) = 0 + THEN '' + WHEN (SELECT COUNT(*) FROM verification_evidence ve + WHERE ve.milestone_id = tasks.milestone_id + AND ve.slice_id = tasks.slice_id + AND ve.task_id = tasks.id + AND ve.exit_code != 0) = 0 + THEN 'all_pass' + WHEN (SELECT COUNT(*) FROM verification_evidence ve + WHERE ve.milestone_id = tasks.milestone_id + AND ve.slice_id = tasks.slice_id + AND ve.task_id = tasks.id + AND ve.exit_code = 0) > 0 + THEN 'partial' + ELSE 'all_fail' + END + WHERE tasks.status IN ('complete', 'done') + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 17, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 18) { + ensureColumn( + db, + "slices", + "adversarial_partner", + `ALTER TABLE slices ADD COLUMN adversarial_partner TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "adversarial_combatant", + `ALTER TABLE slices ADD COLUMN adversarial_combatant TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "slices", + "adversarial_architect", + `ALTER TABLE slices ADD COLUMN adversarial_architect TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 18, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 19) { + ensureColumn( + db, + "slices", + "planning_meeting_json", + `ALTER TABLE slices ADD COLUMN planning_meeting_json TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 19, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 20) { + ensureColumn( + db, + "milestones", + "vision_meeting_json", + `ALTER TABLE milestones ADD COLUMN vision_meeting_json TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 20, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 21) { + ensureRepoProfileTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 21, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 22) { + // SF ADR-011: progressive planning. is_sketch=1 means the slice is a 2-3 + // sentence sketch awaiting refine-slice expansion; refine fills in the + // real plan and clears the flag. sketch_scope holds the milestone + // planner's stored scope hint that refine treats as a hard boundary. + ensureColumn( + db, + "slices", + "is_sketch", + `ALTER TABLE slices ADD COLUMN is_sketch INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "slices", + "sketch_scope", + `ALTER TABLE slices ADD COLUMN sketch_scope TEXT NOT NULL DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 22, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 23) { + // ADR-011 Phase 2 (SF ADR): mid-execution escalation. escalation_pending=1 + // marks a task that paused for a user decision; escalation_artifact_path + // points to the T##-ESCALATION.json file containing options + recommendation. + // State derivation will emit phase='escalating-task' when any task in the + // active slice has escalation_pending=1; dispatch returns 'stop' so the + // loop never bypasses a pending decision. + ensureColumn( + db, + "tasks", + "escalation_pending", + `ALTER TABLE tasks ADD COLUMN escalation_pending INTEGER NOT NULL DEFAULT 0`, + ); + ensureColumn( + db, + "tasks", + "escalation_artifact_path", + `ALTER TABLE tasks ADD COLUMN escalation_artifact_path TEXT DEFAULT NULL`, + ); + try { + db.exec( + "CREATE INDEX IF NOT EXISTS idx_tasks_escalation_pending ON tasks(milestone_id, slice_id, escalation_pending)", + ); + } catch { + /* index creation is opportunistic — fall through if backend lacks it */ + } + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 23, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 24) { + // ADR-011 P2 (SF ADR): the third escalation flag for the + // continueWithDefault=true case — an artifact is recorded for human + // review later, but the loop is NOT paused. Mutually exclusive with + // escalation_pending (the writer flips one or the other). + ensureColumn( + db, + "tasks", + "escalation_awaiting_review", + `ALTER TABLE tasks ADD COLUMN escalation_awaiting_review INTEGER NOT NULL DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 24, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 25) { + // SF ADR-011 P2 carry-forward: when an escalation is resolved, the user's + // choice should be visible to the next execute-task agent in the same + // slice. escalation_override_applied=0 marks "resolved but not yet + // injected into a downstream prompt"; the prompt builder calls + // claimEscalationOverride which atomically flips it to 1 (idempotent + // race-safe claim). Per-task granularity so multi-task slices can + // carry multiple resolved escalations forward independently. + ensureColumn( + db, + "tasks", + "escalation_override_applied", + `ALTER TABLE tasks ADD COLUMN escalation_override_applied INTEGER NOT NULL DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 25, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 26) { + db.exec(` + CREATE TABLE IF NOT EXISTS uok_runs ( + run_id TEXT PRIMARY KEY, + session_id TEXT DEFAULT NULL, + path TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'started', + started_at TEXT NOT NULL, + ended_at TEXT DEFAULT NULL, + error TEXT DEFAULT NULL, + flags_json TEXT NOT NULL DEFAULT '{}', + updated_at TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_status_started ON uok_runs(status, started_at DESC)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_uok_runs_session ON uok_runs(session_id, started_at DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 26, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 27) { + ensureSolverEvalTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 27, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 28) { + // UOK observability: gate execution latency + // Guard: gate_runs table may not exist in minimal legacy DBs (it will be dropped in v58) + if (tableExists(db, "gate_runs")) { + ensureColumn( + db, + "gate_runs", + "duration_ms", + "ALTER TABLE gate_runs ADD COLUMN duration_ms INTEGER DEFAULT NULL", + ); + } + // UOK circuit breaker state + db.exec(` + CREATE TABLE IF NOT EXISTS gate_circuit_breakers ( + gate_id TEXT PRIMARY KEY, + state TEXT NOT NULL DEFAULT 'closed', + failure_streak INTEGER NOT NULL DEFAULT 0, + last_failure_at TEXT DEFAULT NULL, + opened_at TEXT DEFAULT NULL, + half_open_attempts INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL DEFAULT '' + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 28, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 29) { + ensureHeadlessRunTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 29, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 30) { + ensureSelfFeedbackTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 30, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 31) { + ensureUokMessageTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 31, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 32) { + ensureTaskCreatedAtColumn(db); + ensureSpecSchemaTables(db); + // Populate spec tables from existing spec columns in runtime tables + populateSpecTablesFromExisting(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 32, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 33) { + ensureColumn( + db, + "milestones", + "sequence", + `ALTER TABLE milestones ADD COLUMN sequence INTEGER DEFAULT 0`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 33, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 34) { + ensureTaskCreatedAtColumn(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 34, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 35) { + ensureBacklogTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 35, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 36) { + migrateCostUsdToMicroUsd(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 36, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 37) { + ensureScheduleTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 37, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 38) { + try { + db.exec( + "ALTER TABLE memories ADD COLUMN tags TEXT NOT NULL DEFAULT '[]'", + ); + } catch { + // Column may already exist on fresh DBs + } + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 38, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 39) { + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memory_sources_content_hash ON memory_sources(content_hash)", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_memories_category ON memories(superseded_by, category)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 39, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 40) { + db.exec(` + CREATE TABLE IF NOT EXISTS judgments ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + unit_id TEXT NOT NULL, + decision TEXT NOT NULL DEFAULT '', + alternatives_json TEXT NOT NULL DEFAULT '[]', + reasoning TEXT NOT NULL DEFAULT '', + confidence TEXT NOT NULL DEFAULT 'medium', + ts TEXT NOT NULL + ) + `); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_judgments_unit_id ON judgments(unit_id, ts DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 40, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 41) { + ensureRetrievalEvidenceTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 41, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 42) { + ensureColumn( + db, + "milestones", + "product_research_json", + `ALTER TABLE milestones ADD COLUMN product_research_json TEXT NOT NULL DEFAULT ''`, + ); + ensureColumn( + db, + "milestone_specs", + "product_research_json", + `ALTER TABLE milestone_specs ADD COLUMN product_research_json TEXT DEFAULT ''`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 42, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 43) { + db.exec(` + CREATE TABLE IF NOT EXISTS session_mode_state ( + id INTEGER PRIMARY KEY CHECK (id = 1), + work_mode TEXT NOT NULL DEFAULT 'chat', + run_control TEXT NOT NULL DEFAULT 'manual', + permission_profile TEXT NOT NULL DEFAULT 'restricted', + model_mode TEXT NOT NULL DEFAULT 'smart', + surface TEXT NOT NULL DEFAULT 'tui', + updated_at TEXT NOT NULL DEFAULT '' + ) + `); + db.exec(` + INSERT OR IGNORE INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at) + VALUES (1, 'chat', 'manual', 'restricted', 'smart', 'tui', datetime('now')) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 43, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 44) { + ensureSpecSchemaTables(db); + ensureTaskFrontmatterColumns(db); + db.exec(` + UPDATE tasks + SET task_status = CASE status + WHEN 'complete' THEN 'done' + WHEN 'completed' THEN 'done' + WHEN 'done' THEN 'done' + WHEN 'running' THEN 'running' + WHEN 'in_progress' THEN 'running' + WHEN 'blocked' THEN 'blocked' + WHEN 'failed' THEN 'failed' + WHEN 'cancelled' THEN 'cancelled' + ELSE COALESCE(NULLIF(task_status, ''), 'todo') + END + `); + db.exec(` + UPDATE task_specs + SET risk = COALESCE((SELECT tasks.risk FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), risk), + mutation_scope = COALESCE((SELECT tasks.mutation_scope FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), mutation_scope), + verification_type = COALESCE((SELECT tasks.verification_type FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), verification_type), + plan_approval = COALESCE((SELECT tasks.plan_approval FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), plan_approval), + estimated_effort = COALESCE((SELECT tasks.estimated_effort FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), estimated_effort), + dependencies = COALESCE((SELECT tasks.dependencies FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), dependencies), + blocks_parallel = COALESCE((SELECT tasks.blocks_parallel FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), blocks_parallel), + requires_user_input = COALESCE((SELECT tasks.requires_user_input FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), requires_user_input), + auto_retry = COALESCE((SELECT tasks.auto_retry FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), auto_retry), + max_retries = COALESCE((SELECT tasks.max_retries FROM tasks + WHERE tasks.milestone_id = task_specs.milestone_id + AND tasks.slice_id = task_specs.slice_id + AND tasks.id = task_specs.task_id), max_retries) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 44, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 45) { + ensureTaskSchedulerTable(db); + db.exec(` + INSERT OR IGNORE INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) + SELECT milestone_id, slice_id, id, 'queued', datetime('now') + FROM tasks + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 45, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 46) { + // validation_runs: mirrors droid's validation-contract.md + validation-state.json + // pattern. Each run stores the contract spec inline and its execution state. + db.exec(` + CREATE TABLE IF NOT EXISTS validation_runs ( + run_id TEXT PRIMARY KEY, + milestone_id TEXT NOT NULL, + slice_id TEXT, + task_id TEXT, + contract TEXT NOT NULL DEFAULT '', + status TEXT NOT NULL DEFAULT 'pending', + verdict TEXT NOT NULL DEFAULT '', + rationale TEXT NOT NULL DEFAULT '', + findings TEXT NOT NULL DEFAULT '', + started_at TEXT, + completed_at TEXT, + created_at TEXT NOT NULL, + FOREIGN KEY (milestone_id) REFERENCES milestones(id) + ) + `); + db.exec(` + CREATE INDEX IF NOT EXISTS idx_validation_runs_scope + ON validation_runs(milestone_id, slice_id, task_id) + `); + db.exec(` + CREATE VIEW IF NOT EXISTS latest_validation_state AS + SELECT vr.* + FROM validation_runs vr + WHERE vr.rowid = ( + SELECT MAX(v2.rowid) + FROM validation_runs v2 + WHERE v2.milestone_id = vr.milestone_id + AND v2.slice_id IS vr.slice_id + AND v2.task_id IS vr.task_id + ) + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 46, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 47) { + // Drop unused superseded_by column from validation_runs. + // The column was never written or queried — dead schema from v46. + const cols = db + .prepare("PRAGMA table_info(validation_runs)") + .all() + .map((c) => c.name); + if (cols.includes("superseded_by")) { + db.exec("ALTER TABLE validation_runs DROP COLUMN superseded_by"); + } + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 47, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 48) { + // Session layer: create tables, backfill from existing headless_runs and + // audit_turn_index so historical data is queryable from day one. + // Message text will be NULL for backfilled turns — it was never stored. + ensureSessionTables(db); + // Backfill: one session per headless run. + db.exec(` + INSERT OR IGNORE INTO sessions (session_id, trace_id, mode, cwd, created_at, updated_at) + SELECT run_id, NULL, 'headless', '', created_at, updated_at + FROM headless_runs + `); + // Backfill: one session per distinct trace_id in audit_turn_index. + // Reconstruct created_at/updated_at from the min/max timestamps. + db.exec(` + INSERT OR IGNORE INTO sessions (session_id, trace_id, mode, cwd, created_at, updated_at) + SELECT trace_id, trace_id, 'interactive', + '', MIN(first_ts), MAX(last_ts) + FROM audit_turn_index + GROUP BY trace_id + `); + // Backfill: one turn row per (trace_id, turn_id) in audit_turn_index. + // turn_index derived from row order within trace; message text is NULL. + db.exec(` + INSERT OR IGNORE INTO turns (session_id, turn_index, user_message, assistant_response, ts) + SELECT + trace_id, + ROW_NUMBER() OVER (PARTITION BY trace_id ORDER BY first_ts) - 1, + NULL, NULL, + first_ts + FROM audit_turn_index + `); + // Rebuild FTS index from any turns that have text. + // None from backfill yet, but required so the FTS table is consistent. + db.exec(`INSERT INTO turns_fts(turns_fts) VALUES ('rebuild')`); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 48, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 49) { + // Add session_snapshots table — checkpoints before irreversible ops. + // Safe to call on fresh DBs too (CREATE TABLE IF NOT EXISTS). + ensureSessionSnapshotTable(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 49, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 50) { + // Add sleeptime_consolidation_queue — decouples memory consolidation + // from the conversation turn so the daemon can drain it asynchronously. + ensureSleeptimeQueueTable(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 50, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 51) { + // Add deploy/smoke/release/rollback tables — closes the vision→production loop. + // deploy_runs tracks each deployment attempt; smoke_results tracks live verification; + // release_records tracks version bumps and publishes; rollback_runs tracks reversions. + ensureDeployTables(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 51, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 52) { + // Add triage_runs/evals/items/skills, runtime_counters, and + // validation_attention_markers tables — migrate JSONL structured state to DB. + ensureTriageTables(db); + ensureRuntimeCounterTable(db); + ensureValidationAttentionMarkersTable(db); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 52, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 53) { + // Add routing_history and routing_feedback tables — migrate file-based + // routing history to DB-first storage. + db.exec(` + CREATE TABLE IF NOT EXISTS routing_history ( + pattern TEXT NOT NULL, + tier TEXT NOT NULL, + success_count INTEGER NOT NULL DEFAULT 0, + fail_count INTEGER NOT NULL DEFAULT 0, + updated_at TEXT NOT NULL, + PRIMARY KEY (pattern, tier) + ); + CREATE TABLE IF NOT EXISTS routing_feedback ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + pattern TEXT NOT NULL, + tier TEXT NOT NULL, + feedback TEXT NOT NULL, + recorded_at TEXT NOT NULL + ); + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 53, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 54) { + // Migrate metrics ledger from .sf/runtime/metrics.json to DB-first + // unit_metrics and project_metrics_meta tables. + db.exec(` + CREATE TABLE IF NOT EXISTS unit_metrics ( + type TEXT NOT NULL, + id TEXT NOT NULL, + started_at INTEGER NOT NULL, + finished_at INTEGER NOT NULL, + model TEXT NOT NULL, + auto_session_key TEXT, + tokens_input INTEGER NOT NULL DEFAULT 0, + tokens_output INTEGER NOT NULL DEFAULT 0, + tokens_cache_read INTEGER NOT NULL DEFAULT 0, + tokens_cache_write INTEGER NOT NULL DEFAULT 0, + tokens_total INTEGER NOT NULL DEFAULT 0, + cost REAL NOT NULL DEFAULT 0, + tool_calls INTEGER NOT NULL DEFAULT 0, + assistant_messages INTEGER NOT NULL DEFAULT 0, + user_messages INTEGER NOT NULL DEFAULT 0, + api_requests INTEGER NOT NULL DEFAULT 0, + tier TEXT, + model_downgraded INTEGER, + context_window_tokens INTEGER, + truncation_sections INTEGER, + continue_here_fired INTEGER, + prompt_char_count INTEGER, + baseline_char_count INTEGER, + cache_hit_rate INTEGER, + skills TEXT, + PRIMARY KEY (type, id, started_at) + ); + CREATE TABLE IF NOT EXISTS project_metrics_meta ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL + ); + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 54, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 55) { + // Schema v55: composite index for audit_events + task access-pattern views + // Guard: audit_events may not exist in minimal legacy DBs (it will be dropped in v58) + if (tableExists(db, "audit_events")) { + db.exec( + `CREATE INDEX IF NOT EXISTS idx_audit_events_category ON audit_events(category, type, ts DESC)`, + ); + } + db.exec( + `CREATE VIEW IF NOT EXISTS active_tasks AS SELECT * FROM tasks WHERE status NOT IN ('done','complete','completed','cancelled')`, + ); + db.exec(` + CREATE VIEW IF NOT EXISTS v_task_full AS + SELECT t.*, ts.spec_version, ts.verify AS spec_verify, + ts.inputs AS spec_inputs, ts.expected_output AS spec_expected_output + FROM tasks t + LEFT JOIN task_specs ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 55, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 56) { + // Schema v56: move metrics table to dedicated metrics.db — drop from main DB + // to eliminate WAL pressure from high-frequency telemetry writes. + db.exec(`DROP TABLE IF EXISTS metrics`); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 56, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 57) { + // Schema v57: add archived_at to sessions for soft-delete / archive support. + db.exec(`ALTER TABLE sessions ADD COLUMN archived_at TEXT DEFAULT NULL`); + db.exec( + `CREATE INDEX IF NOT EXISTS idx_sessions_archived ON sessions(archived_at) WHERE archived_at IS NOT NULL`, + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 57, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 58) { + // Schema v58: move trace data to JSONL files — drop gate_runs, turn_git_transactions, audit_events + db.exec("DROP TABLE IF EXISTS gate_runs"); + db.exec("DROP TABLE IF EXISTS turn_git_transactions"); + db.exec("DROP TABLE IF EXISTS audit_events"); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 58, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 59) { + // Schema v59: add failure_mode to llm_task_outcomes so the learning system + // can differentiate transient failures (rate_limit) from hard failures + // (quota_exhausted, auth_error) when weighting model demotions. + ensureColumn( + db, + "llm_task_outcomes", + "failure_mode", + "ALTER TABLE llm_task_outcomes ADD COLUMN failure_mode TEXT DEFAULT NULL", + ); + db.exec( + "CREATE INDEX IF NOT EXISTS idx_llm_task_outcomes_failure_mode ON llm_task_outcomes(model_id, failure_mode, recorded_at DESC)", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 59, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 60) { + // Schema v60: add frontmatter_version to tasks table for future frontmatter + // schema migrations. Defaults to 1 for all existing rows. + ensureColumn( + db, + "tasks", + "frontmatter_version", + "ALTER TABLE tasks ADD COLUMN frontmatter_version INTEGER NOT NULL DEFAULT 1", + ); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 60, + ":applied_at": new Date().toISOString(), + }); + } + if (currentVersion < 61) { + // Schema v61: intent_chapters — crash-resume context for autonomous units. + // Each chapter records the agent's declared intent when a unit begins + // (chapter_open) and clears it on normal close (chapter_close). On + // crash-resume, the open chapter is surfaced to the prompt so the agent + // knows where it left off without replaying the full transcript. + db.exec(` + CREATE TABLE IF NOT EXISTS intent_chapters ( + id TEXT PRIMARY KEY, + unit_type TEXT NOT NULL, + unit_id TEXT NOT NULL, + milestone_id TEXT, + slice_id TEXT, + task_id TEXT, + intent TEXT NOT NULL, + opened_at TEXT NOT NULL, + closed_at TEXT, + outcome TEXT, + metadata_json TEXT + ); + CREATE INDEX IF NOT EXISTS idx_intent_chapters_unit + ON intent_chapters(unit_type, unit_id); + CREATE INDEX IF NOT EXISTS idx_intent_chapters_open + ON intent_chapters(closed_at, opened_at) + WHERE closed_at IS NULL; + `); + db.prepare( + "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", + ).run({ + ":version": 61, + ":applied_at": new Date().toISOString(), + }); + } + db.exec("COMMIT"); + } catch (err) { + db.exec("ROLLBACK"); + throw err; + } +} +let currentDb = null; +let currentPath = null; +let currentPid = 0; +let _exitHandlerRegistered = false; +let _dbOpenAttempted = false; +/** + * Get the name of the SQLite provider currently loaded (or null if unavailable). + */ +export function getDbProvider() { + loadProvider(); + return "node:sqlite"; +} +/** + * Check if the database is currently open and available for queries. + */ +export function isDbAvailable() { + return currentDb !== null; +} +/** + * Returns true if openDatabase() has been called at least once this session. + * Used to distinguish "DB not yet initialized" from "DB genuinely unavailable" + * so that early callers (e.g. before_agent_start context injection) don't + * trigger a false degraded-mode warning. + */ +export function wasDbOpenAttempted() { + return _dbOpenAttempted; +} +/** + * Get the current database adapter, or null if the database is not open. + */ +export function getDatabase() { + return currentDb; +} +/** + * Open the database at the specified path. Returns true if successful. + */ +export function openDatabase(path) { + _dbOpenAttempted = true; + if (currentDb && currentPath !== path) closeDatabase(); + if (currentDb && currentPath === path) return true; + const rawDb = openRawDb(path); + if (!rawDb) return false; + const adapter = createAdapter(rawDb); + const fileBacked = path !== ":memory:"; + try { + initSchema(adapter, fileBacked); + createDatabaseSnapshot(rawDb, path); + performDatabaseMaintenance(rawDb, path); + } catch (err) { + // Corrupt freelist: DDL fails with "malformed" but VACUUM can rebuild. + // Attempt VACUUM recovery before giving up (see #2519). + if ( + fileBacked && + err instanceof Error && + err.message?.includes("malformed") + ) { + try { + adapter.exec("VACUUM"); + initSchema(adapter, fileBacked); + process.stderr.write("sf-db: recovered corrupt database via VACUUM\n"); + } catch (retryErr) { + try { + adapter.close(); + } catch (e) { + logWarning("db", `close after VACUUM failed: ${e.message}`); + } + throw retryErr; + } + } else { + try { + adapter.close(); + } catch (e) { + logWarning("db", `close after VACUUM failed: ${e.message}`); + } + throw err; + } + } + currentDb = adapter; + currentPath = path; + currentPid = process.pid; + if (!_exitHandlerRegistered) { + _exitHandlerRegistered = true; + process.on("exit", () => { + try { + closeDatabase(); + } catch (e) { + logWarning("db", `exit handler close failed: ${e.message}`); + } + }); + } + return true; +} +/** + * Flush the WAL to the main DB file using a PASSIVE checkpoint. + * + * Purpose: safely persist all committed transactions to the main DB file at + * controlled loop boundaries (post-unit finalize). With wal_autocheckpoint=0, + * this is the only way WAL pages are flushed — keeping the checkpoint window + * predictable and crash-safe (no mid-operation checkpoint that an OOM kill + * could interrupt). + * + * PASSIVE is used (not TRUNCATE) so concurrent readers are not blocked. The + * WAL is truncated on close via closeDatabase(). + * + * Consumer: runFinalize() in auto/phases.js after each successful unit. + */ +export function checkpointWal() { + if (!currentDb) return; + try { + currentDb.exec("PRAGMA wal_checkpoint(PASSIVE)"); + } catch (e) { + logWarning( + "db", + `WAL checkpoint failed: ${e instanceof Error ? e.message : String(e)}`, + ); + } +} + +/** + * Close the database connection. + */ +export function closeDatabase() { + if (currentDb) { + try { + currentDb.exec("PRAGMA wal_checkpoint(TRUNCATE)"); + } catch (e) { + logWarning("db", `WAL checkpoint failed: ${e.message}`); + } + try { + // Incremental vacuum to reclaim space without blocking + currentDb.exec("PRAGMA incremental_vacuum(64)"); + } catch (e) { + logWarning("db", `incremental vacuum failed: ${e.message}`); + } + try { + currentDb.close(); + } catch (e) { + logWarning("db", `database close failed: ${e.message}`); + } + currentDb = null; + currentPath = null; + currentPid = 0; + _dbOpenAttempted = false; + } +} +/** Run a full VACUUM — call sparingly (e.g. after milestone completion). */ +/** + * Vacuum the database to reclaim disk space and optimize. + */ +export function vacuumDatabase() { + if (!currentDb) return; + try { + currentDb.exec("VACUUM"); + } catch (e) { + logWarning("db", `VACUUM failed: ${e.message}`); + } +} +let _txDepth = 0; +/** + * Execute a callback within a database transaction (BEGIN...COMMIT or ROLLBACK). + */ +export function transaction(fn) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + // Re-entrant: if already inside a transaction, just run fn() without + // starting a new one. SQLite does not support nested BEGIN/COMMIT. + if (_txDepth > 0) { + _txDepth++; + try { + return fn(); + } finally { + _txDepth--; + } + } + _txDepth++; + currentDb.exec("BEGIN"); + try { + const result = fn(); + currentDb.exec("COMMIT"); + return result; + } catch (err) { + currentDb.exec("ROLLBACK"); + throw err; + } finally { + _txDepth--; + } +} +/** + * Wrap a block of reads in a DEFERRED transaction so that all SELECTs observe + * a consistent snapshot of the DB even if a concurrent writer commits between + * them. Use this for multi-query read flows (e.g. tool executors that query + * milestone + slices + counts and want one snapshot). Re-entrant — if already + * inside a transaction, runs fn() without starting a nested one. + */ +/** + * Execute a callback within a read-only database transaction. + */ +export function readTransaction(fn) { + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + if (_txDepth > 0) { + _txDepth++; + try { + return fn(); + } finally { + _txDepth--; + } + } + _txDepth++; + currentDb.exec("BEGIN DEFERRED"); + try { + const result = fn(); + currentDb.exec("COMMIT"); + return result; + } catch (err) { + try { + currentDb.exec("ROLLBACK"); + } catch (rollbackErr) { + // A failed ROLLBACK after a failed read is a split-brain signal — + // the transaction is in an indeterminate state. Surface it via the + // logger instead of swallowing it. + logError("db", "snapshotState ROLLBACK failed", { + error: rollbackErr.message, + }); + } + throw err; + } finally { + _txDepth--; + } +} +export function getDbOwnerPid() { + return currentPid; +} +export function getDbPath() { + return currentPath; +} + +export function _getAdapter() { + return currentDb; +} +export function _resetProvider() { + loadAttempted = false; +} + +export function insertMilestoneSpecIfAbsent(milestoneId, planning = {}) { + if (!hasPlanningPayload(planning)) return; + const existing = currentDb + .prepare("SELECT * FROM milestone_specs WHERE id = ?") + .get(milestoneId); + if (existing && !isEmptyMilestoneSpec(existing)) return; + const params = { + ":id": milestoneId, + ":vision": planning.vision ?? "", + ":success_criteria": JSON.stringify(planning.successCriteria ?? []), + ":key_risks": JSON.stringify(planning.keyRisks ?? []), + ":proof_strategy": JSON.stringify(planning.proofStrategy ?? []), + ":verification_contract": planning.verificationContract ?? "", + ":verification_integration": planning.verificationIntegration ?? "", + ":verification_operational": planning.verificationOperational ?? "", + ":verification_uat": planning.verificationUat ?? "", + ":definition_of_done": JSON.stringify(planning.definitionOfDone ?? []), + ":requirement_coverage": planning.requirementCoverage ?? "", + ":boundary_map_markdown": planning.boundaryMapMarkdown ?? "", + ":vision_meeting_json": planning.visionMeeting + ? JSON.stringify(planning.visionMeeting) + : "", + ":product_research_json": planning.productResearch + ? JSON.stringify(planning.productResearch) + : "", + ":created_at": new Date().toISOString(), + }; + if (existing) { + currentDb + .prepare(`UPDATE milestone_specs SET + vision = :vision, + success_criteria = :success_criteria, + key_risks = :key_risks, + proof_strategy = :proof_strategy, + verification_contract = :verification_contract, + verification_integration = :verification_integration, + verification_operational = :verification_operational, + verification_uat = :verification_uat, + definition_of_done = :definition_of_done, + requirement_coverage = :requirement_coverage, + boundary_map_markdown = :boundary_map_markdown, + vision_meeting_json = :vision_meeting_json, + product_research_json = :product_research_json + WHERE id = :id`) + .run(params); + return; + } + currentDb + .prepare(`INSERT OR IGNORE INTO milestone_specs ( + id, vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, + spec_version, created_at + ) VALUES ( + :id, :vision, :success_criteria, :key_risks, :proof_strategy, + :verification_contract, :verification_integration, :verification_operational, :verification_uat, + :definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, + 1, :created_at + )`) + .run(params); +} + +export function insertSliceSpecIfAbsent(milestoneId, sliceId, planning = {}) { + currentDb + .prepare(`INSERT OR IGNORE INTO slice_specs ( + milestone_id, slice_id, goal, success_criteria, proof_level, + integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, + planning_meeting_json, spec_version, created_at + ) VALUES ( + :milestone_id, :slice_id, :goal, :success_criteria, :proof_level, + :integration_closure, :observability_impact, + :adversarial_partner, :adversarial_combatant, :adversarial_architect, + :planning_meeting_json, 1, :created_at + )`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":goal": planning.goal ?? "", + ":success_criteria": planning.successCriteria ?? "", + ":proof_level": planning.proofLevel ?? "", + ":integration_closure": planning.integrationClosure ?? "", + ":observability_impact": planning.observabilityImpact ?? "", + ":adversarial_partner": planning.adversarialReview?.partner ?? "", + ":adversarial_combatant": planning.adversarialReview?.combatant ?? "", + ":adversarial_architect": planning.adversarialReview?.architect ?? "", + ":planning_meeting_json": planning.planningMeeting + ? JSON.stringify(planning.planningMeeting) + : "", + ":created_at": new Date().toISOString(), + }); +} + +export function hasTaskSpecIntent(planning = {}) { + if (!planning || typeof planning !== "object") return false; + if (typeof planning.verify === "string" && planning.verify.trim()) + return true; + if (Array.isArray(planning.inputs) && planning.inputs.length > 0) return true; + if ( + Array.isArray(planning.expectedOutput) && + planning.expectedOutput.length > 0 + ) { + return true; + } + for (const key of [ + "risk", + "mutationScope", + "mutation_scope", + "verification", + "verificationType", + "verification_type", + "planApproval", + "plan_approval", + "estimatedEffort", + "estimated_effort", + "dependencies", + "blocksParallel", + "blocks_parallel", + "requiresUserInput", + "requires_user_input", + "autoRetry", + "auto_retry", + "maxRetries", + "max_retries", + ]) { + if (planning[key] !== undefined) return true; + } + return false; +} + +export function insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning = {}) { + if (!hasTaskSpecIntent(planning)) return; + const { normalized: frontmatter, errors } = + taskFrontmatterFromRecord(planning); + if (errors?.length) + logWarning( + "sf-db:insertTaskSpec", + `frontmatter validation errors for ${milestoneId}/${sliceId}/${taskId}: ${errors.join(", ")}`, + ); + currentDb + .prepare(`INSERT OR IGNORE INTO task_specs ( + milestone_id, slice_id, task_id, verify, inputs, expected_output, + risk, mutation_scope, verification_type, plan_approval, estimated_effort, + dependencies, blocks_parallel, requires_user_input, auto_retry, max_retries, + spec_version, created_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :verify, :inputs, :expected_output, + :risk, :mutation_scope, :verification_type, :plan_approval, :estimated_effort, + :dependencies, :blocks_parallel, :requires_user_input, :auto_retry, :max_retries, + 1, :created_at + )`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":task_id": taskId, + ":verify": planning.verify ?? "", + ":inputs": JSON.stringify(planning.inputs ?? []), + ":expected_output": JSON.stringify(planning.expectedOutput ?? []), + ":risk": frontmatter.risk, + ":mutation_scope": frontmatter.mutationScope, + ":verification_type": frontmatter.verification, + ":plan_approval": frontmatter.planApproval, + ":estimated_effort": frontmatter.estimatedEffort, + ":dependencies": JSON.stringify(frontmatter.dependencies), + ":blocks_parallel": frontmatter.blocksParallel ? 1 : 0, + ":requires_user_input": frontmatter.requiresUserInput ? 1 : 0, + ":auto_retry": frontmatter.autoRetry ? 1 : 0, + ":max_retries": frontmatter.maxRetries, + ":created_at": new Date().toISOString(), + }); +} + +export function insertTaskSchedulerIfAbsent(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :status, :updated_at + )`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":task_id": taskId, + ":status": normalizeSchedulerStatus("queued") ?? "queued", + ":updated_at": new Date().toISOString(), + }); +} + +export function parsePlanningMeeting(raw) { + if (typeof raw !== "string" || raw.trim() === "") return null; + try { + return JSON.parse(raw); + } catch { + return null; + } +} + +export function rowToSlice(row) { + return { + milestone_id: row["milestone_id"], + id: row["id"], + title: row["title"], + status: row["status"], + risk: row["risk"], + depends: safeParseJsonArray(row["depends"]), + demo: row["demo"] ?? "", + created_at: row["created_at"], + completed_at: row["completed_at"] ?? null, + full_summary_md: row["full_summary_md"] ?? "", + full_uat_md: row["full_uat_md"] ?? "", + goal: row["goal"] ?? "", + success_criteria: row["success_criteria"] ?? "", + proof_level: row["proof_level"] ?? "", + integration_closure: row["integration_closure"] ?? "", + observability_impact: row["observability_impact"] ?? "", + adversarial_partner: row["adversarial_partner"] ?? "", + adversarial_combatant: row["adversarial_combatant"] ?? "", + adversarial_architect: row["adversarial_architect"] ?? "", + planning_meeting: parsePlanningMeeting(row["planning_meeting_json"]), + sequence: row["sequence"] ?? 0, + replan_triggered_at: row["replan_triggered_at"] ?? null, + sketch_scope: row["sketch_scope"] ?? "", + is_sketch: row["is_sketch"] ?? 0, + }; +} + +export function safeParseJsonArray(raw, fallback = []) { + if (typeof raw !== "string" || raw.trim() === "") return fallback; + try { + const parsed = JSON.parse(raw); + return Array.isArray(parsed) ? parsed : fallback; + } catch { + return fallback; + } +} + +export function parseTaskArrayColumn(raw) { + if (typeof raw !== "string" || raw.trim() === "") return []; + try { + const parsed = JSON.parse(raw); + if (Array.isArray(parsed)) return parsed.map((value) => String(value)); + if (parsed === null || parsed === undefined || parsed === "") return []; + return [String(parsed)]; + } catch { + // Older/corrupt rows may contain comma-separated strings instead of JSON. + return raw + .split(",") + .map((value) => value.trim()) + .filter(Boolean); + } +} + +export function rowToTask(row) { + const parseTaskArray = (value) => { + if (Array.isArray(value)) { + return value.filter((entry) => typeof entry === "string"); + } + if (typeof value !== "string") return []; + const trimmed = value.trim(); + if (!trimmed) return []; + try { + const parsed = JSON.parse(trimmed); + if (Array.isArray(parsed)) { + return parsed.filter((entry) => typeof entry === "string"); + } + if (typeof parsed === "string" && parsed.trim()) { + return [parsed.trim()]; + } + } catch { + // Older/corrupt DB rows may contain raw comma-separated paths instead of JSON arrays. + } + return trimmed + .split(",") + .map((entry) => entry.trim()) + .filter(Boolean); + }; + return withTaskFrontmatter({ + milestone_id: row["milestone_id"], + slice_id: row["slice_id"], + id: row["id"], + title: row["title"], + status: row["status"], + one_liner: row["one_liner"], + narrative: row["narrative"], + verification_result: row["verification_result"], + duration: row["duration"], + completed_at: row["completed_at"] ?? null, + blocker_discovered: row["blocker_discovered"] === 1, + deviations: row["deviations"], + known_issues: row["known_issues"], + key_files: parseTaskArrayColumn(row["key_files"]), + key_decisions: parseTaskArrayColumn(row["key_decisions"]), + full_summary_md: row["full_summary_md"], + description: row["description"] ?? "", + estimate: row["estimate"] ?? "", + files: parseTaskArray(row["files"]), + verify: row["verify"] ?? "", + inputs: parseTaskArray(row["inputs"]), + expected_output: parseTaskArray(row["expected_output"]), + observability_impact: row["observability_impact"] ?? "", + full_plan_md: row["full_plan_md"] ?? "", + sequence: row["sequence"] ?? 0, + verification_status: row["verification_status"] ?? "", + risk: row["risk"] ?? "low", + mutation_scope: row["mutation_scope"] ?? "isolated", + verification_type: row["verification_type"] ?? "self-check", + plan_approval: row["plan_approval"] ?? "not-required", + task_status: row["task_status"] ?? row["status"] ?? "todo", + scheduler_status: row["scheduler_status"] ?? "queued", + estimated_effort: row["estimated_effort"] ?? null, + dependencies: parseTaskArray(row["dependencies"]), + blocks_parallel: row["blocks_parallel"] ?? 0, + requires_user_input: row["requires_user_input"] ?? 0, + auto_retry: row["auto_retry"] ?? 1, + max_retries: row["max_retries"] ?? 2, + escalation_pending: row["escalation_pending"] ?? 0, + escalation_awaiting_review: row["escalation_awaiting_review"] ?? 0, + escalation_override_applied: row["escalation_override_applied"] ?? 0, + escalation_artifact_path: row["escalation_artifact_path"] ?? null, + }); +} + +export function rowToSelfFeedback(row) { + try { + const parsed = JSON.parse(row["full_json"]); + return { + ...parsed, + resolvedAt: row["resolved_at"] ?? parsed.resolvedAt, + resolvedReason: row["resolved_reason"] ?? parsed.resolvedReason, + resolvedBySfVersion: + row["resolved_by_sf_version"] ?? parsed.resolvedBySfVersion, + resolvedEvidence: row["resolved_evidence_json"] + ? JSON.parse(row["resolved_evidence_json"]) + : parsed.resolvedEvidence, + resolvedCriteriaMet: row["resolved_criteria_json"] + ? JSON.parse(row["resolved_criteria_json"]) + : parsed.resolvedCriteriaMet, + }; + } catch { + return { + id: row["id"], + ts: row["ts"], + kind: row["kind"], + severity: row["severity"], + blocking: row["blocking"] === 1, + repoIdentity: row["repo_identity"], + sfVersion: row["sf_version"], + basePath: row["base_path"], + occurredIn: { + unitType: row["unit_type"] ?? undefined, + milestone: row["milestone_id"] ?? undefined, + slice: row["slice_id"] ?? undefined, + task: row["task_id"] ?? undefined, + }, + summary: row["summary"], + evidence: row["evidence"], + suggestedFix: row["suggested_fix"], + resolvedAt: row["resolved_at"] ?? undefined, + resolvedReason: row["resolved_reason"] ?? undefined, + resolvedBySfVersion: row["resolved_by_sf_version"] ?? undefined, + resolvedEvidence: row["resolved_evidence_json"] + ? JSON.parse(row["resolved_evidence_json"]) + : undefined, + resolvedCriteriaMet: row["resolved_criteria_json"] + ? JSON.parse(row["resolved_criteria_json"]) + : undefined, + }; + } +} + +export function parseVisionMeeting(raw) { + if (typeof raw !== "string" || raw.trim().length === 0) return null; + try { + return JSON.parse(raw); + } catch { + return null; + } +} + +export function parseProductResearch(raw) { + if (typeof raw !== "string" || raw.trim().length === 0) return null; + try { + return JSON.parse(raw); + } catch { + return null; + } +} + +export function rowToMilestone(row) { + return { + id: row["id"], + title: row["title"], + status: row["status"], + depends_on: safeParseJsonArray(row["depends_on"]), + created_at: row["created_at"], + completed_at: row["completed_at"] ?? null, + vision: row["vision"] ?? "", + success_criteria: safeParseJsonArray(row["success_criteria"]), + key_risks: safeParseJsonArray(row["key_risks"]), + proof_strategy: safeParseJsonArray(row["proof_strategy"]), + verification_contract: row["verification_contract"] ?? "", + verification_integration: row["verification_integration"] ?? "", + verification_operational: row["verification_operational"] ?? "", + verification_uat: row["verification_uat"] ?? "", + definition_of_done: safeParseJsonArray(row["definition_of_done"]), + requirement_coverage: row["requirement_coverage"] ?? "", + boundary_map_markdown: row["boundary_map_markdown"] ?? "", + vision_meeting: parseVisionMeeting(row["vision_meeting_json"]), + product_research: parseProductResearch(row["product_research_json"]), + sequence: row["sequence"] ?? 0, + }; +} + +export function rowToArtifact(row) { + return { + path: row["path"], + artifact_type: row["artifact_type"], + milestone_id: row["milestone_id"] ?? null, + slice_id: row["slice_id"] ?? null, + task_id: row["task_id"] ?? null, + full_content: row["full_content"], + imported_at: row["imported_at"], + }; +} + +export function rowToBacklogItem(row) { + return { + id: row["id"], + title: row["title"], + status: row["status"], + note: row["note"] ?? "", + source: row["source"] ?? "", + triageRunId: row["triage_run_id"] ?? null, + sequence: row["sequence"] ?? 0, + createdAt: row["created_at"], + updatedAt: row["updated_at"], + promotedAt: row["promoted_at"] ?? null, + }; +} + +export function rowToGate(row) { + return { + milestone_id: row["milestone_id"], + slice_id: row["slice_id"], + gate_id: row["gate_id"], + scope: row["scope"], + task_id: row["task_id"] ?? "", + status: row["status"], + verdict: row["verdict"] || "", + rationale: row["rationale"] || "", + findings: row["findings"] || "", + evaluated_at: row["evaluated_at"] ?? null, + }; +} + +export function capErrorForStorage(error, runId) { + if (!error || error.length <= MAX_ERROR_STORED_BYTES) return error; + try { + const errDir = join(dirname(currentPath), "runtime", "errors"); + mkdirSync(errDir, { recursive: true }); + writeFileSync(join(errDir, `${runId}.txt`), error, "utf-8"); + } catch { + // non-fatal — best-effort spill + } + const head = error.slice(0, 2048); + const tail = error.slice(-2048); + const dropped = error.length - MAX_ERROR_STORED_BYTES; + return `${head}\n\n[...${dropped} chars truncated — full error in .sf/runtime/errors/${runId}.txt]\n\n${tail}`; +} + +export function boolToInt(value) { + if (value === null || value === undefined) return null; + return value ? 1 : 0; +} + +export function normalizeScheduleScope(scope) { + return scope === "global" ? "global" : "project"; +} + +export function scheduleEntryFromRow(row) { + if (!row) return null; + const full = parseJsonObject(row.full_json, {}); + return { + ...full, + schemaVersion: row.schema_version ?? full.schemaVersion ?? 1, + id: row.id, + kind: row.kind, + status: row.status, + due_at: row.due_at, + created_at: row.created_at, + snoozed_at: row.snoozed_at ?? full.snoozed_at, + payload: parseJsonObject(row.payload_json, full.payload ?? {}), + created_by: row.created_by, + autonomous_dispatch: !!row.autonomous_dispatch, + }; +} + +export function asStringOrNull(value) { + return typeof value === "string" && value.length > 0 ? value : null; +} + +export function intBool(value) { + return value ? 1 : 0; +} + +export function parseJsonObject(raw, fallback = {}) { + try { + return JSON.parse(raw); + } catch { + return fallback; + } +} + +export function solverEvalRunFromRow(row) { + return { + runId: row["run_id"], + suiteSource: row["suite_source"], + casesCount: row["cases_count"] ?? 0, + summary: parseJsonObject(row["summary_json"], {}), + reportPath: row["report_path"], + resultsPath: row["results_path"], + dbRecorded: row["db_recorded"] === 1, + createdAt: row["created_at"], + updatedAt: row["updated_at"], + }; +} + +export function solverEvalCaseFromRow(row) { + return { + runId: row["run_id"], + caseId: row["case_id"], + title: row["title"], + mode: row["mode"], + passed: row["passed"] === 1, + falseComplete: row["false_complete"] === 1, + durationMs: row["duration_ms"], + commandStatus: row["command_status"], + solverOutcome: asStringOrNull(row["solver_outcome"]), + pddComplete: + row["pdd_complete"] === null || row["pdd_complete"] === undefined + ? null + : row["pdd_complete"] === 1, + result: parseJsonObject(row["result_json"], {}), + createdAt: row["created_at"], + }; +} + +export function headlessRunFromRow(row) { + return { + runId: row["run_id"], + command: row["command"], + status: row["status"], + exitCode: row["exit_code"], + timedOut: row["timed_out"] === 1, + interrupted: row["interrupted"] === 1, + restartCount: row["restart_count"] ?? 0, + maxRestarts: row["max_restarts"] ?? 0, + durationMs: row["duration_ms"] ?? 0, + totalEvents: row["total_events"] ?? 0, + toolCalls: row["tool_calls"] ?? 0, + solverEvalRunId: asStringOrNull(row["solver_eval_run_id"]), + solverEvalReportPath: asStringOrNull(row["solver_eval_report_path"]), + details: parseJsonObject(row["details_json"], {}), + createdAt: row["created_at"], + updatedAt: row["updated_at"], + }; +} + +export function rowToUnitMetrics(row) { + const unit = { + type: row["type"], + id: row["id"], + model: row["model"], + startedAt: row["started_at"], + finishedAt: row["finished_at"], + tokens: { + input: row["tokens_input"], + output: row["tokens_output"], + cacheRead: row["tokens_cache_read"], + cacheWrite: row["tokens_cache_write"], + total: row["tokens_total"], + }, + cost: row["cost"], + toolCalls: row["tool_calls"], + assistantMessages: row["assistant_messages"], + userMessages: row["user_messages"], + apiRequests: row["api_requests"], + }; + if (row["auto_session_key"] != null) + unit.autoSessionKey = row["auto_session_key"]; + if (row["tier"] != null) unit.tier = row["tier"]; + if (row["model_downgraded"] != null) + unit.modelDowngraded = row["model_downgraded"] === 1; + if (row["context_window_tokens"] != null) + unit.contextWindowTokens = row["context_window_tokens"]; + if (row["truncation_sections"] != null) + unit.truncationSections = row["truncation_sections"]; + if (row["continue_here_fired"] != null) + unit.continueHereFired = row["continue_here_fired"] === 1; + if (row["prompt_char_count"] != null) + unit.promptCharCount = row["prompt_char_count"]; + if (row["baseline_char_count"] != null) + unit.baselineCharCount = row["baseline_char_count"]; + if (row["cache_hit_rate"] != null) unit.cacheHitRate = row["cache_hit_rate"]; + if (row["skills"] != null) unit.skills = JSON.parse(row["skills"]); + return unit; +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-decisions.js b/src/resources/extensions/sf/sf-db/sf-db-decisions.js new file mode 100644 index 000000000..85cfdd011 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-decisions.js @@ -0,0 +1,191 @@ +import { _getAdapter } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; + +export function insertDecision(d) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) + VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by)`) + .run({ + ":id": d.id, + ":when_context": d.when_context, + ":scope": d.scope, + ":decision": d.decision, + ":choice": d.choice, + ":rationale": d.rationale, + ":revisable": d.revisable, + ":made_by": d.made_by ?? "agent", + ":superseded_by": d.superseded_by, + }); +} + +export function getDecisionById(id) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb.prepare("SELECT * FROM decisions WHERE id = ?").get(id); + if (!row) return null; + return { + seq: row["seq"], + id: row["id"], + when_context: row["when_context"], + scope: row["scope"], + decision: row["decision"], + choice: row["choice"], + rationale: row["rationale"], + revisable: row["revisable"], + made_by: row["made_by"] ?? "agent", + superseded_by: row["superseded_by"] ?? null, + }; +} + +export function getActiveDecisions() { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb.prepare("SELECT * FROM active_decisions").all(); + return rows.map((row) => ({ + seq: row["seq"], + id: row["id"], + when_context: row["when_context"], + scope: row["scope"], + decision: row["decision"], + choice: row["choice"], + rationale: row["rationale"], + revisable: row["revisable"], + made_by: row["made_by"] ?? "agent", + superseded_by: null, + })); +} + +export function upsertDecision(d) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + // Use ON CONFLICT DO UPDATE instead of INSERT OR REPLACE to preserve the + // seq column. INSERT OR REPLACE deletes then reinserts, resetting seq and + // corrupting decision ordering in DECISIONS.md after reconcile replay. + currentDb + .prepare(`INSERT INTO decisions (id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) + VALUES (:id, :when_context, :scope, :decision, :choice, :rationale, :revisable, :made_by, :superseded_by) + ON CONFLICT(id) DO UPDATE SET + when_context = excluded.when_context, + scope = excluded.scope, + decision = excluded.decision, + choice = excluded.choice, + rationale = excluded.rationale, + revisable = excluded.revisable, + made_by = excluded.made_by, + superseded_by = excluded.superseded_by`) + .run({ + ":id": d.id, + ":when_context": d.when_context, + ":scope": d.scope, + ":decision": d.decision, + ":choice": d.choice, + ":rationale": d.rationale, + ":revisable": d.revisable, + ":made_by": d.made_by ?? "agent", + ":superseded_by": d.superseded_by ?? null, + }); +} + +export function deleteDecisionById(id) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb.prepare("DELETE FROM decisions WHERE id = :id").run({ ":id": id }); +} + +export function insertRequirement(r) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by) + VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`) + .run({ + ":id": r.id, + ":class": r.class, + ":status": r.status, + ":description": r.description, + ":why": r.why, + ":source": r.source, + ":primary_owner": r.primary_owner, + ":supporting_slices": r.supporting_slices, + ":validation": r.validation, + ":notes": r.notes, + ":full_content": r.full_content, + ":superseded_by": r.superseded_by, + }); +} + +export function getRequirementById(id) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM requirements WHERE id = ?") + .get(id); + if (!row) return null; + return { + id: row["id"], + class: row["class"], + status: row["status"], + description: row["description"], + why: row["why"], + source: row["source"], + primary_owner: row["primary_owner"], + supporting_slices: row["supporting_slices"], + validation: row["validation"], + notes: row["notes"], + full_content: row["full_content"], + superseded_by: row["superseded_by"] ?? null, + }; +} + +export function getActiveRequirements() { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb.prepare("SELECT * FROM active_requirements").all(); + return rows.map((row) => ({ + id: row["id"], + class: row["class"], + status: row["status"], + description: row["description"], + why: row["why"], + source: row["source"], + primary_owner: row["primary_owner"], + supporting_slices: row["supporting_slices"], + validation: row["validation"], + notes: row["notes"], + full_content: row["full_content"], + superseded_by: null, + })); +} + +export function upsertRequirement(r) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO requirements (id, class, status, description, why, source, primary_owner, supporting_slices, validation, notes, full_content, superseded_by) + VALUES (:id, :class, :status, :description, :why, :source, :primary_owner, :supporting_slices, :validation, :notes, :full_content, :superseded_by)`) + .run({ + ":id": r.id, + ":class": r.class, + ":status": r.status, + ":description": r.description, + ":why": r.why, + ":source": r.source, + ":primary_owner": r.primary_owner, + ":supporting_slices": r.supporting_slices, + ":validation": r.validation, + ":notes": r.notes, + ":full_content": r.full_content, + ":superseded_by": r.superseded_by ?? null, + }); +} + +export function deleteRequirementById(id) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare("DELETE FROM requirements WHERE id = :id") + .run({ ":id": id }); +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-evidence.js b/src/resources/extensions/sf/sf-db/sf-db-evidence.js new file mode 100644 index 000000000..a2d3953ee --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-evidence.js @@ -0,0 +1,259 @@ +import { _getAdapter } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logWarning } from '../workflow-logger.js'; + +export function insertVerificationEvidence(e) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at) + VALUES (:task_id, :slice_id, :milestone_id, :command, :exit_code, :verdict, :duration_ms, :created_at)`) + .run({ + ":task_id": e.taskId, + ":slice_id": e.sliceId, + ":milestone_id": e.milestoneId, + ":command": e.command, + ":exit_code": e.exitCode, + ":verdict": e.verdict, + ":duration_ms": e.durationMs, + ":created_at": new Date().toISOString(), + }); +} + +export function getVerificationEvidence(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb + .prepare( + "SELECT * FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid ORDER BY id", + ) + .all({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + return rows; +} + +export function deleteVerificationEvidence(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); +} + +export function insertMilestoneEvidence( + milestoneId, + evidenceType, + content, + phaseName, + recordedBy, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO milestone_evidence (milestone_id, evidence_type, content, recorded_at, phase_name, recorded_by) + VALUES (?, ?, ?, ?, ?, ?)`) + .run( + milestoneId, + evidenceType, + content, + new Date().toISOString(), + phaseName || "", + recordedBy || "", + ); +} + +export function insertSliceEvidence( + milestoneId, + sliceId, + evidenceType, + content, + phaseName, + recordedBy, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO slice_evidence (milestone_id, slice_id, evidence_type, content, recorded_at, phase_name, recorded_by) + VALUES (?, ?, ?, ?, ?, ?, ?)`) + .run( + milestoneId, + sliceId, + evidenceType, + content, + new Date().toISOString(), + phaseName || "", + recordedBy || "", + ); +} + +export function insertTaskEvidence( + milestoneId, + sliceId, + taskId, + evidenceType, + content, + phaseName, + recordedBy, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO task_evidence (milestone_id, slice_id, task_id, evidence_type, content, recorded_at, phase_name, recorded_by) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`) + .run( + milestoneId, + sliceId, + taskId, + evidenceType, + content, + new Date().toISOString(), + phaseName || "", + recordedBy || "", + ); +} + +export function getMilestoneAuditTrail(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare(` + SELECT + r.id, r.title, r.status, + s.vision, s.spec_version, + e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by + FROM milestones r + LEFT JOIN milestone_specs s ON r.id = s.id + LEFT JOIN milestone_evidence e ON r.id = e.milestone_id + WHERE r.id = ? + ORDER BY e.recorded_at ASC + `) + .all(milestoneId); +} + +export function getSliceAuditTrail(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare(` + SELECT + r.id, r.title, r.status, + s.goal, s.spec_version, + e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by + FROM slices r + LEFT JOIN slice_specs s ON r.milestone_id = s.milestone_id AND r.id = s.slice_id + LEFT JOIN slice_evidence e ON r.milestone_id = e.milestone_id AND r.id = e.slice_id + WHERE r.milestone_id = ? AND r.id = ? + ORDER BY e.recorded_at ASC + `) + .all(milestoneId, sliceId); +} + +export function getTaskAuditTrail(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare(` + SELECT + r.id, r.title, r.status, + s.verify, s.spec_version, + e.evidence_type, e.content, e.recorded_at, e.phase_name, e.recorded_by + FROM tasks r + LEFT JOIN task_specs s ON r.milestone_id = s.milestone_id AND r.slice_id = s.slice_id AND r.id = s.task_id + LEFT JOIN task_evidence e ON r.milestone_id = e.milestone_id AND r.slice_id = e.slice_id AND r.id = e.task_id + WHERE r.milestone_id = ? AND r.slice_id = ? AND r.id = ? + ORDER BY e.recorded_at ASC + `) + .all(milestoneId, sliceId, taskId); +} + +export function insertAssessment(entry) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO assessments (path, milestone_id, slice_id, task_id, status, scope, full_content, created_at) + VALUES (:path, :milestone_id, :slice_id, :task_id, :status, :scope, :full_content, :created_at)`) + .run({ + ":path": entry.path, + ":milestone_id": entry.milestoneId, + ":slice_id": entry.sliceId ?? null, + ":task_id": entry.taskId ?? null, + ":status": entry.status, + ":scope": entry.scope, + ":full_content": entry.fullContent, + ":created_at": new Date().toISOString(), + }); +} + +export function deleteAssessmentByScope(milestoneId, scope) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `DELETE FROM assessments WHERE milestone_id = :mid AND scope = :scope`, + ) + .run({ ":mid": milestoneId, ":scope": scope }); +} + +export function getAssessment(path) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare(`SELECT * FROM assessments WHERE path = :path`) + .get({ ":path": path }); + return row ?? null; +} + +export function getAssessmentByScope(milestoneId, scope) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + `SELECT * FROM assessments + WHERE milestone_id = :mid AND scope = :scope + ORDER BY created_at DESC + LIMIT 1`, + ) + .get({ ":mid": milestoneId, ":scope": scope }); + return row ?? null; +} + +export function getMilestoneValidationAssessment(milestoneId) { + return getAssessmentByScope(milestoneId, "milestone-validation"); +} + +export function insertReplanHistory(entry) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + // INSERT OR REPLACE: idempotent on (milestone_id, slice_id, task_id) via schema v11 unique index. + // Retrying the same replan silently updates summary instead of accumulating duplicate rows. + currentDb + .prepare(`INSERT OR REPLACE INTO replan_history (milestone_id, slice_id, task_id, summary, previous_artifact_path, replacement_artifact_path, created_at) + VALUES (:milestone_id, :slice_id, :task_id, :summary, :previous_artifact_path, :replacement_artifact_path, :created_at)`) + .run({ + ":milestone_id": entry.milestoneId, + ":slice_id": entry.sliceId ?? null, + ":task_id": entry.taskId ?? null, + ":summary": entry.summary, + ":previous_artifact_path": entry.previousArtifactPath ?? null, + ":replacement_artifact_path": entry.replacementArtifactPath ?? null, + ":created_at": new Date().toISOString(), + }); +} + +export function getReplanHistory(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + if (sliceId) { + return currentDb + .prepare( + `SELECT * FROM replan_history WHERE milestone_id = :mid AND slice_id = :sid ORDER BY created_at DESC`, + ) + .all({ ":mid": milestoneId, ":sid": sliceId }); + } + return currentDb + .prepare( + `SELECT * FROM replan_history WHERE milestone_id = :mid ORDER BY created_at DESC`, + ) + .all({ ":mid": milestoneId }); +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-gates.js b/src/resources/extensions/sf/sf-db/sf-db-gates.js new file mode 100644 index 000000000..d2ca3302c --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-gates.js @@ -0,0 +1,372 @@ +import { dirname } from 'node:path'; +import { _getAdapter, getDbPath, rowToGate, transaction } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { getGateIdsForTurn } from '../gate-registry.js'; +import { readTraceEvents } from '../uok/trace-writer.js'; +import { logWarning } from '../workflow-logger.js'; + +export function insertGateRow(g) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO quality_gates (milestone_id, slice_id, gate_id, scope, task_id, status) + VALUES (:mid, :sid, :gid, :scope, :tid, :status)`) + .run({ + ":mid": g.milestoneId, + ":sid": g.sliceId, + ":gid": g.gateId, + ":scope": g.scope, + ":tid": g.taskId ?? "", + ":status": g.status ?? "pending", + }); +} + +export function saveGateResult(g) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE quality_gates + SET status = 'complete', verdict = :verdict, rationale = :rationale, + findings = :findings, evaluated_at = :evaluated_at + WHERE milestone_id = :mid AND slice_id = :sid AND gate_id = :gid + AND task_id = :tid`) + .run({ + ":mid": g.milestoneId, + ":sid": g.sliceId, + ":gid": g.gateId, + ":tid": g.taskId ?? "", + ":verdict": g.verdict, + ":rationale": g.rationale, + ":findings": g.findings, + ":evaluated_at": new Date().toISOString(), + }); + const outcome = + g.verdict === "pass" + ? "pass" + : g.verdict === "omitted" + ? "manual-attention" + : "fail"; + insertGateRun({ + traceId: `quality-gate:${g.milestoneId}:${g.sliceId}`, + turnId: `gate:${g.gateId}:${g.taskId ?? "slice"}`, + gateId: g.gateId, + gateType: "quality-gate", + milestoneId: g.milestoneId, + sliceId: g.sliceId, + taskId: g.taskId ?? undefined, + outcome, + failureClass: + outcome === "fail" + ? "verification" + : outcome === "manual-attention" + ? "manual-attention" + : "none", + rationale: g.rationale, + findings: g.findings, + attempt: 1, + maxAttempts: 1, + retryable: false, + evaluatedAt: new Date().toISOString(), + }); +} + +export function getPendingGates(milestoneId, sliceId, scope) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const sql = scope + ? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope AND status = 'pending'` + : `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`; + const params = { + ":mid": milestoneId, + ":sid": sliceId, + }; + if (scope) params[":scope"] = scope; + return currentDb.prepare(sql).all(params).map(rowToGate); +} + +export function getGateResults(milestoneId, sliceId, scope) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const sql = scope + ? `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid AND scope = :scope` + : `SELECT * FROM quality_gates WHERE milestone_id = :mid AND slice_id = :sid`; + const params = { + ":mid": milestoneId, + ":sid": sliceId, + }; + if (scope) params[":scope"] = scope; + return currentDb.prepare(sql).all(params).map(rowToGate); +} + +export function markAllGatesOmitted(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare(`UPDATE quality_gates SET status = 'omitted', verdict = 'omitted', evaluated_at = :now + WHERE milestone_id = :mid AND slice_id = :sid AND status = 'pending'`) + .run({ + ":mid": milestoneId, + ":sid": sliceId, + ":now": new Date().toISOString(), + }); +} + +export function getPendingSliceGateCount(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return 0; + const row = currentDb + .prepare(`SELECT COUNT(*) as cnt FROM quality_gates + WHERE milestone_id = :mid AND slice_id = :sid AND scope = 'slice' AND status = 'pending'`) + .get({ ":mid": milestoneId, ":sid": sliceId }); + return row ? row["cnt"] : 0; +} + +export function getPendingGatesForTurn(milestoneId, sliceId, turn, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const ids = getGateIdsForTurn(turn); + if (ids.size === 0) return []; + const idList = [...ids]; + const placeholders = idList.map((_, i) => `:gid${i}`).join(","); + const params = { + ":mid": milestoneId, + ":sid": sliceId, + }; + idList.forEach((id, i) => { + params[`:gid${i}`] = id; + }); + let sql = `SELECT * FROM quality_gates + WHERE milestone_id = :mid AND slice_id = :sid + AND status = 'pending' + AND gate_id IN (${placeholders})`; + if (taskId !== undefined) { + sql += ` AND task_id = :tid`; + params[":tid"] = taskId; + } + return currentDb.prepare(sql).all(params).map(rowToGate); +} + +export function getPendingGateCountForTurn(milestoneId, sliceId, turn) { + return getPendingGatesForTurn(milestoneId, sliceId, turn).length; +} + +export function insertGateRun(_entry) { + // no-op: gate runs now written to JSONL trace files +} + +export function upsertTurnGitTransaction(_entry) { + // no-op: turn git transactions now written to JSONL audit events +} + +export function getGateRunStats(gateId, windowHours = 24) { + try { + const currentPath = getDbPath(); + const basePath = + currentPath && currentPath !== ":memory:" + ? dirname(dirname(currentPath)) + : process.cwd(); + const events = readTraceEvents(basePath, "gate_run", windowHours).filter( + (e) => e.gateId === gateId, + ); + const stats = { + total: events.length, + pass: 0, + fail: 0, + retry: 0, + manualAttention: 0, + lastEvaluatedAt: null, + }; + for (const e of events) { + if (e.outcome === "pass") stats.pass++; + else if (e.outcome === "fail") stats.fail++; + else if (e.outcome === "retry") stats.retry++; + else if (e.outcome === "manual-attention") stats.manualAttention++; + if ( + !stats.lastEvaluatedAt || + (e.evaluatedAt ?? e.ts) > stats.lastEvaluatedAt + ) + stats.lastEvaluatedAt = e.evaluatedAt ?? e.ts; + } + return stats; + } catch { + return { + total: 0, + pass: 0, + fail: 0, + retry: 0, + manualAttention: 0, + lastEvaluatedAt: null, + }; + } +} + +export function getGateCircuitBreaker(gateId) { + const currentDb = _getAdapter(); + if (!currentDb) { + return { + gateId, + state: "closed", + failureStreak: 0, + lastFailureAt: null, + openedAt: null, + halfOpenAttempts: 0, + updatedAt: null, + }; + } + try { + const row = currentDb + .prepare( + `SELECT gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at + FROM gate_circuit_breakers + WHERE gate_id = :gate_id`, + ) + .get({ ":gate_id": gateId }); + if (!row) { + return { + gateId, + state: "closed", + failureStreak: 0, + lastFailureAt: null, + openedAt: null, + halfOpenAttempts: 0, + updatedAt: null, + }; + } + return { + gateId: row.gate_id, + state: row.state, + failureStreak: row.failure_streak ?? 0, + lastFailureAt: row.last_failure_at ?? null, + openedAt: row.opened_at ?? null, + halfOpenAttempts: row.half_open_attempts ?? 0, + updatedAt: row.updated_at ?? null, + }; + } catch { + return { + gateId, + state: "closed", + failureStreak: 0, + lastFailureAt: null, + openedAt: null, + halfOpenAttempts: 0, + updatedAt: null, + }; + } +} + +export function updateGateCircuitBreaker(gateId, updates) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare( + `INSERT INTO gate_circuit_breakers ( + gate_id, state, failure_streak, last_failure_at, opened_at, half_open_attempts, updated_at + ) VALUES ( + :gate_id, :state, :failure_streak, :last_failure_at, :opened_at, :half_open_attempts, :updated_at + ) + ON CONFLICT(gate_id) DO UPDATE SET + state = excluded.state, + failure_streak = excluded.failure_streak, + last_failure_at = COALESCE(excluded.last_failure_at, gate_circuit_breakers.last_failure_at), + opened_at = COALESCE(excluded.opened_at, gate_circuit_breakers.opened_at), + half_open_attempts = excluded.half_open_attempts, + updated_at = excluded.updated_at`, + ) + .run({ + ":gate_id": gateId, + ":state": updates.state ?? "closed", + ":failure_streak": updates.failureStreak ?? 0, + ":last_failure_at": updates.lastFailureAt ?? null, + ":opened_at": updates.openedAt ?? null, + ":half_open_attempts": updates.halfOpenAttempts ?? 0, + ":updated_at": new Date().toISOString(), + }); + return { total: 0, avgMs: 0, p50Ms: 0, p95Ms: 0, maxMs: 0 }; +} + +export function getGateLatencyStats(gateId, windowHours = 24) { + try { + const currentPath = getDbPath(); + const basePath = + currentPath && currentPath !== ":memory:" + ? dirname(dirname(currentPath)) + : process.cwd(); + const durations = readTraceEvents(basePath, "gate_run", windowHours) + .filter((e) => e.gateId === gateId && typeof e.durationMs === "number") + .map((e) => e.durationMs) + .sort((a, b) => a - b); + if (durations.length === 0) + return { + p50: null, + p95: null, + count: 0, + total: 0, + avgMs: 0, + p50Ms: 0, + p95Ms: 0, + maxMs: 0, + }; + const p50Ms = durations[Math.floor(durations.length * 0.5)] ?? 0; + const p95Ms = durations[Math.floor(durations.length * 0.95)] ?? 0; + const maxMs = durations[durations.length - 1] ?? 0; + const avgMs = Math.round( + durations.reduce((s, v) => s + v, 0) / durations.length, + ); + return { + p50: p50Ms, + p95: p95Ms, + count: durations.length, + total: durations.length, + avgMs, + p50Ms, + p95Ms, + maxMs, + }; + } catch { + return { + p50: null, + p95: null, + count: 0, + total: 0, + avgMs: 0, + p50Ms: 0, + p95Ms: 0, + maxMs: 0, + }; + } +} + +export function getDistinctGateIds() { + try { + const currentPath = getDbPath(); + const basePath = + currentPath && currentPath !== ":memory:" + ? dirname(dirname(currentPath)) + : process.cwd(); + const events = readTraceEvents(basePath, "gate_run", 24 * 30); // 30 days + return [...new Set(events.map((e) => e.gateId).filter(Boolean))]; + } catch { + return []; + } +} + +export function upsertQualityGate(g) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR REPLACE INTO quality_gates + (milestone_id, slice_id, gate_id, scope, task_id, status, verdict, rationale, findings, evaluated_at) + VALUES (:mid, :sid, :gid, :scope, :tid, :status, :verdict, :rationale, :findings, :evaluated_at)`) + .run({ + ":mid": g.milestoneId, + ":sid": g.sliceId, + ":gid": g.gateId, + ":scope": g.scope, + ":tid": g.taskId, + ":status": g.status, + ":verdict": g.verdict, + ":rationale": g.rationale, + ":findings": g.findings, + ":evaluated_at": g.evaluatedAt, + }); +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-learning.js b/src/resources/extensions/sf/sf-db/sf-db-learning.js new file mode 100644 index 000000000..81858c3ea --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-learning.js @@ -0,0 +1,543 @@ +import { _getAdapter, boolToInt, intBool, parseJsonObject, solverEvalRunFromRow, solverEvalCaseFromRow, headlessRunFromRow, transaction } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logError, logWarning } from '../workflow-logger.js'; + +export function insertLlmTaskOutcome(input) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + try { + currentDb + .prepare(`INSERT INTO llm_task_outcomes ( + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + failure_mode, + recorded_at + ) VALUES ( + :model_id, + :provider, + :unit_type, + :unit_id, + :succeeded, + :retries, + :escalated, + :verification_passed, + :blocker_discovered, + :duration_ms, + :tokens_total, + :cost_usd, + :failure_mode, + :recorded_at + ) + ON CONFLICT(unit_type, unit_id, recorded_at) DO UPDATE SET + model_id = excluded.model_id, + provider = excluded.provider, + succeeded = excluded.succeeded, + retries = excluded.retries, + escalated = excluded.escalated, + verification_passed = excluded.verification_passed, + blocker_discovered = excluded.blocker_discovered, + duration_ms = excluded.duration_ms, + tokens_total = excluded.tokens_total, + cost_usd = excluded.cost_usd, + failure_mode = excluded.failure_mode`) + .run({ + ":model_id": input.modelId, + ":provider": input.provider, + ":unit_type": input.unitType, + ":unit_id": input.unitId, + ":succeeded": boolToInt(input.succeeded), + ":retries": input.retries ?? 0, + ":escalated": boolToInt(input.escalated ?? false), + ":verification_passed": boolToInt(input.verification_passed ?? null), + ":blocker_discovered": boolToInt(input.blocker_discovered ?? false), + ":duration_ms": input.duration_ms ?? null, + ":tokens_total": input.tokens_total ?? null, + ":cost_usd": input.cost_usd ?? null, + ":failure_mode": input.failure_mode ?? null, + ":recorded_at": input.recorded_at ?? Date.now(), + }); + return true; + } catch { + return false; + } +} + +export function getLlmTaskOutcomesByUnit(unitType, unitId, limit = 20) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + try { + return currentDb + .prepare( + `SELECT + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + FROM llm_task_outcomes + WHERE unit_type = :unit_type + AND unit_id = :unit_id + ORDER BY recorded_at DESC + LIMIT :limit`, + ) + .all({ + ":unit_type": unitType, + ":unit_id": unitId, + ":limit": limit, + }); + } catch { + return []; + } +} + +export function getLlmTaskOutcomesByModel(modelId, limit = 50) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + try { + return currentDb + .prepare( + `SELECT + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + FROM llm_task_outcomes + WHERE model_id = :model_id + ORDER BY recorded_at DESC + LIMIT :limit`, + ) + .all({ + ":model_id": modelId, + ":limit": limit, + }); + } catch { + return []; + } +} + +export function getRecentLlmTaskOutcomes(hours = 24, limit = 100) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const cutoff = Date.now() - hours * 60 * 60 * 1000; + try { + return currentDb + .prepare( + `SELECT + model_id, + provider, + unit_type, + unit_id, + succeeded, + retries, + escalated, + verification_passed, + blocker_discovered, + duration_ms, + tokens_total, + cost_usd, + recorded_at + FROM llm_task_outcomes + WHERE recorded_at >= :cutoff + ORDER BY recorded_at DESC + LIMIT :limit`, + ) + .all({ + ":cutoff": cutoff, + ":limit": limit, + }); + } catch { + return []; + } +} + +export function getLlmTaskOutcomeStats(modelId, windowHours = 24) { + const currentDb = _getAdapter(); + if (!currentDb) { + return { + total: 0, + succeeded: 0, + failed: 0, + totalCostUsd: 0, + totalTokens: 0, + avgDurationMs: 0, + }; + } + const cutoff = Date.now() - windowHours * 60 * 60 * 1000; + try { + const row = currentDb + .prepare( + `SELECT + COUNT(*) AS total, + COALESCE(SUM(CASE WHEN succeeded = 1 THEN 1 ELSE 0 END), 0) AS succeeded, + COALESCE(SUM(CASE WHEN succeeded = 0 THEN 1 ELSE 0 END), 0) AS failed, + COALESCE(SUM(cost_usd), 0) AS totalCostUsd, + COALESCE(SUM(tokens_total), 0) AS totalTokens, + COALESCE(AVG(duration_ms), 0) AS avgDurationMs + FROM llm_task_outcomes + WHERE model_id = :model_id + AND recorded_at >= :cutoff`, + ) + .get({ ":model_id": modelId, ":cutoff": cutoff }); + if (!row) { + return { + total: 0, + succeeded: 0, + failed: 0, + totalCostUsd: 0, + totalTokens: 0, + avgDurationMs: 0, + }; + } + return { + total: row.total ?? 0, + succeeded: row.succeeded ?? 0, + failed: row.failed ?? 0, + totalCostUsd: row.totalCostUsd ?? 0, + totalTokens: row.totalTokens ?? 0, + avgDurationMs: row.avgDurationMs ?? 0, + }; + } catch { + return { + total: 0, + succeeded: 0, + failed: 0, + totalCostUsd: 0, + totalTokens: 0, + avgDurationMs: 0, + }; + } +} + +export function recordSolverEvalRun(report) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + transaction(() => { + currentDb + .prepare(`INSERT INTO solver_eval_runs ( + run_id, suite_source, cases_count, summary_json, report_path, + results_path, db_recorded, created_at, updated_at + ) VALUES ( + :run_id, :suite_source, :cases_count, :summary_json, :report_path, + :results_path, 1, :created_at, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + suite_source = excluded.suite_source, + cases_count = excluded.cases_count, + summary_json = excluded.summary_json, + report_path = excluded.report_path, + results_path = excluded.results_path, + db_recorded = 1, + updated_at = excluded.updated_at`) + .run({ + ":run_id": report.runId, + ":suite_source": report.suiteSource ?? "", + ":cases_count": report.summary?.cases ?? report.results?.length ?? 0, + ":summary_json": JSON.stringify(report.summary ?? {}), + ":report_path": report.reportPath ?? "", + ":results_path": report.resultsPath ?? "", + ":created_at": report.createdAt ?? now, + ":updated_at": now, + }); + const stmt = currentDb.prepare(`INSERT INTO solver_eval_case_results ( + run_id, case_id, title, mode, passed, false_complete, duration_ms, + command_status, solver_outcome, pdd_complete, result_json, created_at + ) VALUES ( + :run_id, :case_id, :title, :mode, :passed, :false_complete, :duration_ms, + :command_status, :solver_outcome, :pdd_complete, :result_json, :created_at + ) + ON CONFLICT(run_id, case_id, mode) DO UPDATE SET + title = excluded.title, + passed = excluded.passed, + false_complete = excluded.false_complete, + duration_ms = excluded.duration_ms, + command_status = excluded.command_status, + solver_outcome = excluded.solver_outcome, + pdd_complete = excluded.pdd_complete, + result_json = excluded.result_json, + created_at = excluded.created_at`); + for (const result of report.results ?? []) { + stmt.run({ + ":run_id": report.runId, + ":case_id": result.caseId, + ":title": result.title ?? "", + ":mode": result.mode, + ":passed": intBool(result.passed), + ":false_complete": intBool(result.falseComplete), + ":duration_ms": result.command?.durationMs ?? null, + ":command_status": result.command?.status ?? null, + ":solver_outcome": result.solverSignals?.outcome ?? null, + ":pdd_complete": + result.solverSignals?.pddComplete === undefined + ? null + : intBool(result.solverSignals.pddComplete), + ":result_json": JSON.stringify(result), + ":created_at": report.createdAt ?? now, + }); + } + }); +} + +export function listSolverEvalRuns(limit = 10) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT run_id, suite_source, cases_count, summary_json, + report_path, results_path, db_recorded, created_at, updated_at + FROM solver_eval_runs + ORDER BY created_at DESC, run_id DESC + LIMIT :limit`) + .all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 10)) }) + .map(solverEvalRunFromRow); +} + +export function getSolverEvalRun(runId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const row = currentDb + .prepare(`SELECT run_id, suite_source, cases_count, summary_json, + report_path, results_path, db_recorded, created_at, updated_at + FROM solver_eval_runs + WHERE run_id = :run_id`) + .get({ ":run_id": runId }); + return row ? solverEvalRunFromRow(row) : null; +} + +export function getSolverEvalCaseResults(runId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT run_id, case_id, title, mode, passed, false_complete, + duration_ms, command_status, solver_outcome, pdd_complete, + result_json, created_at + FROM solver_eval_case_results + WHERE run_id = :run_id + ORDER BY case_id ASC, mode ASC`) + .all({ ":run_id": runId }) + .map(solverEvalCaseFromRow); +} + +export function recordHeadlessRun(entry) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + currentDb + .prepare(`INSERT INTO headless_runs ( + run_id, command, status, exit_code, timed_out, interrupted, + restart_count, max_restarts, duration_ms, total_events, tool_calls, + solver_eval_run_id, solver_eval_report_path, details_json, + created_at, updated_at + ) VALUES ( + :run_id, :command, :status, :exit_code, :timed_out, :interrupted, + :restart_count, :max_restarts, :duration_ms, :total_events, :tool_calls, + :solver_eval_run_id, :solver_eval_report_path, :details_json, + :created_at, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + command = excluded.command, + status = excluded.status, + exit_code = excluded.exit_code, + timed_out = excluded.timed_out, + interrupted = excluded.interrupted, + restart_count = excluded.restart_count, + max_restarts = excluded.max_restarts, + duration_ms = excluded.duration_ms, + total_events = excluded.total_events, + tool_calls = excluded.tool_calls, + solver_eval_run_id = excluded.solver_eval_run_id, + solver_eval_report_path = excluded.solver_eval_report_path, + details_json = excluded.details_json, + updated_at = excluded.updated_at`) + .run({ + ":run_id": entry.runId, + ":command": entry.command ?? "", + ":status": entry.status ?? "", + ":exit_code": Number(entry.exitCode ?? 0), + ":timed_out": intBool(entry.timedOut), + ":interrupted": intBool(entry.interrupted), + ":restart_count": Number(entry.restartCount ?? 0), + ":max_restarts": Number(entry.maxRestarts ?? 0), + ":duration_ms": Number(entry.durationMs ?? 0), + ":total_events": Number(entry.totalEvents ?? 0), + ":tool_calls": Number(entry.toolCalls ?? 0), + ":solver_eval_run_id": entry.solverEvalRunId ?? null, + ":solver_eval_report_path": entry.solverEvalReportPath ?? null, + ":details_json": JSON.stringify(entry.details ?? {}), + ":created_at": entry.createdAt ?? now, + ":updated_at": now, + }); +} + +export function listHeadlessRuns(limit = 20) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT run_id, command, status, exit_code, timed_out, + interrupted, restart_count, max_restarts, duration_ms, + total_events, tool_calls, solver_eval_run_id, + solver_eval_report_path, details_json, created_at, updated_at + FROM headless_runs + ORDER BY created_at DESC, run_id DESC + LIMIT :limit`) + .all({ ":limit": Math.max(1, Math.min(100, Number(limit) || 20)) }) + .map(headlessRunFromRow); +} + +export function upsertRoutingOutcome(db, pattern, tier, success) { + db.prepare( + `INSERT INTO routing_history (pattern, tier, success_count, fail_count, updated_at) + VALUES (:pattern, :tier, :success_count, :fail_count, :updated_at) + ON CONFLICT(pattern, tier) DO UPDATE SET + success_count = success_count + excluded.success_count, + fail_count = fail_count + excluded.fail_count, + updated_at = excluded.updated_at`, + ).run({ + ":pattern": pattern, + ":tier": tier, + ":success_count": success ? 1 : 0, + ":fail_count": success ? 0 : 1, + ":updated_at": new Date().toISOString(), + }); +} + +export function getAllRoutingHistory(db) { + return db + .prepare( + "SELECT pattern, tier, success_count, fail_count, updated_at FROM routing_history", + ) + .all(); +} + +export function getRoutingHistoryForPattern(db, pattern) { + return db + .prepare( + "SELECT tier, success_count, fail_count FROM routing_history WHERE pattern = ?", + ) + .all(pattern); +} + +export function insertRoutingFeedback(db, pattern, tier, feedback) { + db.prepare( + `INSERT INTO routing_feedback (pattern, tier, feedback, recorded_at) + VALUES (:pattern, :tier, :feedback, :recorded_at)`, + ).run({ + ":pattern": pattern, + ":tier": tier, + ":feedback": feedback, + ":recorded_at": new Date().toISOString(), + }); +} + +export function clearRoutingHistory(db) { + db.prepare("DELETE FROM routing_history").run(); + db.prepare("DELETE FROM routing_feedback").run(); +} + +export function insertTriageRun(id, sourceFile, createdAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_runs (id, source_file, status, created_at) + VALUES (:id, :source_file, 'complete', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":source_file": sourceFile ?? null, + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +export function insertTriageEval(id, runId, data, createdAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_evals (id, run_id, task_input, expected_behavior, evidence, failure_mode, status, created_at) + VALUES (:id, :run_id, :task_input, :expected_behavior, :evidence, :failure_mode, 'pending', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":run_id": runId, + ":task_input": data.task_input ?? "", + ":expected_behavior": data.expected_behavior ?? "", + ":evidence": data.evidence ?? null, + ":failure_mode": data.failure_mode ?? null, + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +export function insertTriageItem( + id, + runId, + kind, + content, + evidence, + createdAt, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_items (id, run_id, kind, content, evidence, status, created_at) + VALUES (:id, :run_id, :kind, :content, :evidence, 'pending', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":run_id": runId, + ":kind": kind, + ":content": content, + ":evidence": evidence ?? null, + ":created_at": createdAt ?? new Date().toISOString(), + }); +} + +export function insertTriageSkill(id, runId, data, createdAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO triage_skills (id, run_id, name, description, trigger, raw_json, status, created_at) + VALUES (:id, :run_id, :name, :description, :trigger, :raw_json, 'pending', :created_at) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":run_id": runId, + ":name": data.title ?? data.name ?? null, + ":description": data.description ?? null, + ":trigger": data.trigger_pattern ?? data.trigger ?? null, + ":raw_json": JSON.stringify(data), + ":created_at": createdAt ?? new Date().toISOString(), + }); +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-memory.js b/src/resources/extensions/sf/sf-db/sf-db-memory.js new file mode 100644 index 000000000..bfa7fa536 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-memory.js @@ -0,0 +1,329 @@ +import { _getAdapter, intBool, parseJsonObject } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logWarning } from '../workflow-logger.js'; + +export function getActiveMemories({ category, limit = 200 } = {}) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = category + ? currentDb + .prepare( + "SELECT * FROM active_memories WHERE category = ? ORDER BY updated_at DESC LIMIT ?", + ) + .all(category, limit) + : currentDb + .prepare( + "SELECT * FROM active_memories ORDER BY updated_at DESC LIMIT ?", + ) + .all(limit); + return rows.map((r) => ({ + id: r["id"], + category: r["category"], + content: r["content"], + confidence: r["confidence"], + sourceUnitId: r["source_unit_id"], + tags: (() => { + try { + return JSON.parse(r["tags"] ?? "[]"); + } catch { + return []; + } + })(), + createdAt: r["created_at"], + updatedAt: r["updated_at"], + })); +} + +export function insertMemoryRow(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO memories (id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at, tags) + VALUES (:id, :category, :content, :confidence, :source_unit_type, :source_unit_id, :created_at, :updated_at, :tags)`) + .run({ + ":id": args.id, + ":category": args.category, + ":content": args.content, + ":confidence": args.confidence, + ":source_unit_type": args.sourceUnitType, + ":source_unit_id": args.sourceUnitId, + ":created_at": args.createdAt, + ":updated_at": args.updatedAt, + ":tags": JSON.stringify(args.tags ?? []), + }); +} + +export function rewriteMemoryId(placeholderId, realId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare("UPDATE memories SET id = :real_id WHERE id = :placeholder") + .run({ + ":real_id": realId, + ":placeholder": placeholderId, + }); +} + +export function updateMemoryContentRow(id, content, confidence, updatedAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + if (confidence != null) { + currentDb + .prepare( + "UPDATE memories SET content = :content, confidence = :confidence, updated_at = :updated_at WHERE id = :id", + ) + .run({ + ":content": content, + ":confidence": confidence, + ":updated_at": updatedAt, + ":id": id, + }); + } else { + currentDb + .prepare( + "UPDATE memories SET content = :content, updated_at = :updated_at WHERE id = :id", + ) + .run({ ":content": content, ":updated_at": updatedAt, ":id": id }); + } +} + +export function incrementMemoryHitCount(id, updatedAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + "UPDATE memories SET hit_count = hit_count + 1, updated_at = :updated_at WHERE id = :id", + ) + .run({ ":updated_at": updatedAt, ":id": id }); +} + +export function supersedeMemoryRow(oldId, newId, updatedAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + "UPDATE memories SET superseded_by = :new_id, updated_at = :updated_at WHERE id = :old_id", + ) + .run({ ":new_id": newId, ":updated_at": updatedAt, ":old_id": oldId }); +} + +export function markMemoryUnitProcessed(unitKey, activityFile, processedAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO memory_processed_units (unit_key, activity_file, processed_at) + VALUES (:key, :file, :at)`) + .run({ ":key": unitKey, ":file": activityFile, ":at": processedAt }); +} + +export function decayMemoriesBefore(cutoffTs, now) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE memories + SET confidence = MAX(0.1, confidence - 0.1), updated_at = :now + WHERE superseded_by IS NULL AND updated_at < :cutoff AND confidence > 0.1`) + .run({ ":now": now, ":cutoff": cutoffTs }); +} + +export function expireStaleMemories(unstartedTtlDays = 28, maxTtlDays = 90) { + const currentDb = _getAdapter(); + if (!currentDb) return 0; + const now = new Date().toISOString(); + const cutoffUnstarted = new Date( + Date.now() - unstartedTtlDays * 86_400_000, + ).toISOString(); + const cutoffMax = new Date( + Date.now() - maxTtlDays * 86_400_000, + ).toISOString(); + const result = currentDb + .prepare(`UPDATE memories SET superseded_by = 'ttl-expired', updated_at = :now + WHERE superseded_by IS NULL + AND ( + (hit_count = 0 AND updated_at < :cutoff_unstarted) + OR updated_at < :cutoff_max + )`) + .run({ + ":now": now, + ":cutoff_unstarted": cutoffUnstarted, + ":cutoff_max": cutoffMax, + }); + return result.changes ?? 0; +} + +export function supersedeLowestRankedMemories(limit, now) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE memories SET superseded_by = 'CAP_EXCEEDED', updated_at = :now + WHERE id IN ( + SELECT id FROM memories + WHERE superseded_by IS NULL + ORDER BY (confidence * (1.0 + hit_count * 0.1)) ASC + LIMIT :limit + )`) + .run({ ":now": now, ":limit": limit }); +} + +export function insertMemorySourceRow(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO memory_sources (id, kind, uri, title, content, content_hash, imported_at, scope, tags) + VALUES (:id, :kind, :uri, :title, :content, :content_hash, :imported_at, :scope, :tags)`) + .run({ + ":id": args.id, + ":kind": args.kind, + ":uri": args.uri, + ":title": args.title, + ":content": args.content, + ":content_hash": args.contentHash, + ":imported_at": args.importedAt, + ":scope": args.scope ?? "project", + ":tags": JSON.stringify(args.tags ?? []), + }); +} + +export function deleteMemorySourceRow(id) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const res = currentDb + .prepare("DELETE FROM memory_sources WHERE id = :id") + .run({ ":id": id }); + return (res?.changes ?? 0) > 0; +} + +export function insertJudgment(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return; + try { + currentDb + .prepare(`INSERT INTO judgments (unit_id, decision, alternatives_json, reasoning, confidence, ts) + VALUES (:unit_id, :decision, :alternatives_json, :reasoning, :confidence, :ts)`) + .run({ + ":unit_id": entry.unitId ?? "", + ":decision": entry.decision ?? "", + ":alternatives_json": JSON.stringify(entry.alternatives ?? []), + ":reasoning": entry.reasoning ?? "", + ":confidence": entry.confidence ?? "medium", + ":ts": entry.ts ?? new Date().toISOString(), + }); + } catch { + // Judgment logging is best-effort + } +} + +export function getJudgmentsForUnit(unitIdPrefix, limit = 1000) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + try { + const rows = currentDb + .prepare( + `SELECT id, unit_id AS unitId, decision, alternatives_json AS alternativesJson, reasoning, confidence, ts + FROM judgments + WHERE unit_id LIKE :prefix + ORDER BY ts DESC + LIMIT :limit`, + ) + .all({ + ":prefix": `${unitIdPrefix}%`, + ":limit": limit, + }); + return rows.map((r) => ({ + id: r.id, + unitId: r.unitId, + decision: r.decision, + alternatives: parseJsonObject(r.alternativesJson, []), + reasoning: r.reasoning, + confidence: r.confidence, + ts: r.ts, + })); + } catch { + return []; + } +} + +export function insertRetrievalEvidence(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = args.recordedAt ?? new Date().toISOString(); + currentDb + .prepare(`INSERT INTO retrieval_evidence ( + backend, source_kind, query, strategy, scope, project_root, + git_head, git_branch, worktree_dirty, freshness, status, + hit_count, elapsed_ms, cache_path, error, result_json, recorded_at + ) VALUES ( + :backend, :source_kind, :query, :strategy, :scope, :project_root, + :git_head, :git_branch, :worktree_dirty, :freshness, :status, + :hit_count, :elapsed_ms, :cache_path, :error, :result_json, :recorded_at + )`) + .run({ + ":backend": args.backend, + ":source_kind": args.sourceKind ?? "code", + ":query": args.query ?? "", + ":strategy": args.strategy ?? "", + ":scope": args.scope ?? "", + ":project_root": args.projectRoot ?? "", + ":git_head": args.gitHead ?? null, + ":git_branch": args.gitBranch ?? null, + ":worktree_dirty": intBool(args.worktreeDirty), + ":freshness": args.freshness ?? "unknown", + ":status": args.status ?? "ok", + ":hit_count": args.hitCount ?? 0, + ":elapsed_ms": args.elapsedMs ?? 0, + ":cache_path": args.cachePath ?? null, + ":error": args.error ?? null, + ":result_json": JSON.stringify(args.result ?? {}), + ":recorded_at": now, + }); +} + +export function getRetrievalEvidence(limit = 100) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb + .prepare(`SELECT + id, backend, source_kind AS sourceKind, query, strategy, scope, + project_root AS projectRoot, git_head AS gitHead, + git_branch AS gitBranch, worktree_dirty AS worktreeDirty, + freshness, status, hit_count AS hitCount, elapsed_ms AS elapsedMs, + cache_path AS cachePath, error, result_json AS resultJson, recorded_at AS recordedAt + FROM retrieval_evidence + ORDER BY recorded_at DESC, id DESC + LIMIT :limit`) + .all({ ":limit": limit }); + return rows.map((row) => ({ + ...row, + worktreeDirty: row.worktreeDirty === 1, + result: parseJsonObject(row.resultJson, {}), + })); +} + +export function upsertMemoryEmbedding(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO memory_embeddings (memory_id, model, dim, vector, updated_at) + VALUES (:memory_id, :model, :dim, :vector, :updated_at) + ON CONFLICT(memory_id) DO UPDATE SET + model = excluded.model, + dim = excluded.dim, + vector = excluded.vector, + updated_at = excluded.updated_at`) + .run({ + ":memory_id": args.memoryId, + ":model": args.model, + ":dim": args.dim, + ":vector": args.vector, + ":updated_at": args.updatedAt, + }); +} + +export function deleteMemoryEmbedding(memoryId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const res = currentDb + .prepare("DELETE FROM memory_embeddings WHERE memory_id = :id") + .run({ ":id": memoryId }); + return (res?.changes ?? 0) > 0; +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-milestones.js b/src/resources/extensions/sf/sf-db/sf-db-milestones.js new file mode 100644 index 000000000..d0ebd2eb5 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-milestones.js @@ -0,0 +1,427 @@ +import { _getAdapter, hasPlanningPayload, isEmptyMilestoneSpec, parseJsonOrFallback, insertMilestoneSpecIfAbsent, rowToMilestone, parseVisionMeeting, parseProductResearch, transaction } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { existsSync, readFileSync } from 'node:fs'; + +export function insertMilestone(m) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO milestones ( + id, title, status, depends_on, created_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json, sequence + ) VALUES ( + :id, :title, :status, :depends_on, :created_at, + :vision, :success_criteria, :key_risks, :proof_strategy, + :verification_contract, :verification_integration, :verification_operational, :verification_uat, + :definition_of_done, :requirement_coverage, :boundary_map_markdown, :vision_meeting_json, :product_research_json, :sequence + )`) + .run({ + ":id": m.id, + ":title": m.title ?? "", + // Default to "queued" — never auto-create milestones as "active" (#3380). + // Callers that need "active" must pass it explicitly. + ":status": m.status ?? "queued", + ":depends_on": JSON.stringify(m.depends_on ?? []), + ":created_at": new Date().toISOString(), + ":vision": m.planning?.vision ?? "", + ":success_criteria": JSON.stringify(m.planning?.successCriteria ?? []), + ":key_risks": JSON.stringify(m.planning?.keyRisks ?? []), + ":proof_strategy": JSON.stringify(m.planning?.proofStrategy ?? []), + ":verification_contract": m.planning?.verificationContract ?? "", + ":verification_integration": m.planning?.verificationIntegration ?? "", + ":verification_operational": m.planning?.verificationOperational ?? "", + ":verification_uat": m.planning?.verificationUat ?? "", + ":definition_of_done": JSON.stringify(m.planning?.definitionOfDone ?? []), + ":requirement_coverage": m.planning?.requirementCoverage ?? "", + ":boundary_map_markdown": m.planning?.boundaryMapMarkdown ?? "", + ":vision_meeting_json": m.planning?.visionMeeting + ? JSON.stringify(m.planning.visionMeeting) + : "", + ":product_research_json": m.planning?.productResearch + ? JSON.stringify(m.planning.productResearch) + : "", + ":sequence": m.sequence ?? 0, + }); + if (hasPlanningPayload(m.planning)) { + insertMilestoneSpecIfAbsent(m.id, m.planning ?? {}); + } +} + +export function upsertMilestonePlanning(milestoneId, planning) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + insertMilestoneSpecIfAbsent(milestoneId, planning); + currentDb + .prepare(`UPDATE milestones SET + title = COALESCE(NULLIF(:title, ''), title), + status = COALESCE(NULLIF(:status, ''), status), + vision = COALESCE(:vision, vision), + success_criteria = COALESCE(:success_criteria, success_criteria), + key_risks = COALESCE(:key_risks, key_risks), + proof_strategy = COALESCE(:proof_strategy, proof_strategy), + verification_contract = COALESCE(:verification_contract, verification_contract), + verification_integration = COALESCE(:verification_integration, verification_integration), + verification_operational = COALESCE(:verification_operational, verification_operational), + verification_uat = COALESCE(:verification_uat, verification_uat), + definition_of_done = COALESCE(:definition_of_done, definition_of_done), + requirement_coverage = COALESCE(:requirement_coverage, requirement_coverage), + boundary_map_markdown = COALESCE(:boundary_map_markdown, boundary_map_markdown), + vision_meeting_json = COALESCE(:vision_meeting_json, vision_meeting_json), + product_research_json = COALESCE(:product_research_json, product_research_json) + WHERE id = :id`) + .run({ + ":id": milestoneId, + ":title": planning.title ?? "", + ":status": planning.status ?? "", + ":vision": planning.vision ?? null, + ":success_criteria": planning.successCriteria + ? JSON.stringify(planning.successCriteria) + : null, + ":key_risks": planning.keyRisks + ? JSON.stringify(planning.keyRisks) + : null, + ":proof_strategy": planning.proofStrategy + ? JSON.stringify(planning.proofStrategy) + : null, + ":verification_contract": planning.verificationContract ?? null, + ":verification_integration": planning.verificationIntegration ?? null, + ":verification_operational": planning.verificationOperational ?? null, + ":verification_uat": planning.verificationUat ?? null, + ":definition_of_done": planning.definitionOfDone + ? JSON.stringify(planning.definitionOfDone) + : null, + ":requirement_coverage": planning.requirementCoverage ?? null, + ":boundary_map_markdown": planning.boundaryMapMarkdown ?? null, + ":vision_meeting_json": planning.visionMeeting + ? JSON.stringify(planning.visionMeeting) + : null, + ":product_research_json": planning.productResearch + ? JSON.stringify(planning.productResearch) + : null, + }); +} + +export function getAllMilestones() { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb + .prepare( + "SELECT * FROM milestones ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id", + ) + .all(); + return rows.map(rowToMilestone); +} + +export function getMilestone(id) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM milestones WHERE id = :id") + .get({ ":id": id }); + if (!row) return null; + return rowToMilestone(row); +} + +export function updateMilestoneStatus(milestoneId, status, completedAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE milestones SET status = :status, completed_at = :completed_at WHERE id = :id`, + ) + .run({ + ":status": status, + ":completed_at": completedAt ?? null, + ":id": milestoneId, + }); +} + +export function updateMilestoneQueueOrder(order) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + const stmt = currentDb.prepare( + "UPDATE milestones SET sequence = :sequence WHERE id = :id", + ); + for (let i = 0; i < order.length; i++) { + stmt.run({ ":sequence": i + 1, ":id": order[i] }); + } + }); +} + +export function getActiveMilestoneFromDb() { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT * FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY CASE WHEN sequence > 0 THEN 0 ELSE 1 END, sequence, id LIMIT 1", + ) + .get(); + if (!row) return null; + return rowToMilestone(row); +} + +export function getActiveMilestoneIdFromDb() { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT id, status FROM milestones WHERE status NOT IN ('complete', 'parked') ORDER BY id LIMIT 1", + ) + .get(); + if (!row) return null; + return { id: row["id"], status: row["status"] }; +} + +export function deleteMilestone(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + currentDb + .prepare(`DELETE FROM verification_evidence WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM quality_gates WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM tasks WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM slice_dependencies WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM slices WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM replan_history WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM assessments WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM artifacts WHERE milestone_id = :mid`) + .run({ ":mid": milestoneId }); + currentDb + .prepare(`DELETE FROM milestones WHERE id = :mid`) + .run({ ":mid": milestoneId }); + }); +} + +export function bulkInsertLegacyHierarchy(payload) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const db = currentDb; + const { milestones, slices, tasks, clearMilestoneIds, createdAt } = payload; + if (clearMilestoneIds.length === 0) return; + const placeholders = clearMilestoneIds.map(() => "?").join(","); + transaction(() => { + db.prepare(`DELETE FROM tasks WHERE milestone_id IN (${placeholders})`).run( + ...clearMilestoneIds, + ); + db.prepare( + `DELETE FROM slices WHERE milestone_id IN (${placeholders})`, + ).run(...clearMilestoneIds); + db.prepare(`DELETE FROM milestones WHERE id IN (${placeholders})`).run( + ...clearMilestoneIds, + ); + const insertMilestone = db.prepare( + "INSERT INTO milestones (id, title, status, created_at) VALUES (?, ?, ?, ?)", + ); + for (const m of milestones) { + insertMilestone.run(m.id, m.title, m.status, createdAt); + } + const insertSliceStmt = db.prepare( + "INSERT INTO slices (id, milestone_id, title, status, risk, depends, sequence, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)", + ); + for (const s of slices) { + insertSliceStmt.run( + s.id, + s.milestoneId, + s.title, + s.status, + s.risk, + "[]", + s.sequence, + createdAt, + ); + } + const insertTaskStmt = db.prepare( + "INSERT INTO tasks (id, slice_id, milestone_id, title, description, status, estimate, files, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)", + ); + for (const t of tasks) { + insertTaskStmt.run( + t.id, + t.sliceId, + t.milestoneId, + t.title, + "", + t.status, + "", + "[]", + t.sequence, + ); + } + }); +} + +export function clearEngineHierarchy() { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + currentDb.exec("DELETE FROM tasks"); + currentDb.exec("DELETE FROM slices"); + currentDb.exec("DELETE FROM milestones"); + }); +} + +export function restoreManifest(manifest) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const db = currentDb; + transaction(() => { + // Clear engine tables (order matters for foreign-key-like consistency) + db.exec("DELETE FROM verification_evidence"); + db.exec("DELETE FROM tasks"); + db.exec("DELETE FROM slices"); + db.exec("DELETE FROM milestones"); + db.exec("DELETE FROM decisions WHERE 1=1"); + // Restore milestones + const msStmt = + db.prepare(`INSERT INTO milestones (id, title, status, depends_on, created_at, completed_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const m of manifest.milestones) { + msStmt.run( + m.id, + m.title, + m.status, + JSON.stringify(m.depends_on), + m.created_at, + m.completed_at, + m.vision, + JSON.stringify(m.success_criteria), + JSON.stringify(m.key_risks), + JSON.stringify(m.proof_strategy), + m.verification_contract, + m.verification_integration, + m.verification_operational, + m.verification_uat, + JSON.stringify(m.definition_of_done), + m.requirement_coverage, + m.boundary_map_markdown, + m.vision_meeting ? JSON.stringify(m.vision_meeting) : "", + m.product_research ? JSON.stringify(m.product_research) : "", + ); + } + // Restore slices + const slStmt = + db.prepare(`INSERT INTO slices (milestone_id, id, title, status, risk, depends, demo, + created_at, completed_at, full_summary_md, full_uat_md, + goal, success_criteria, proof_level, integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, + sequence, replan_triggered_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const s of manifest.slices) { + slStmt.run( + s.milestone_id, + s.id, + s.title, + s.status, + s.risk, + JSON.stringify(s.depends), + s.demo, + s.created_at, + s.completed_at, + s.full_summary_md, + s.full_uat_md, + s.goal, + s.success_criteria, + s.proof_level, + s.integration_closure, + s.observability_impact, + s.adversarial_partner ?? "", + s.adversarial_combatant ?? "", + s.adversarial_architect ?? "", + s.planning_meeting ? JSON.stringify(s.planning_meeting) : "", + s.sequence, + s.replan_triggered_at, + ); + } + // Restore tasks + const tkStmt = + db.prepare(`INSERT INTO tasks (milestone_id, slice_id, id, title, status, + one_liner, narrative, verification_result, duration, completed_at, + blocker_discovered, deviations, known_issues, key_files, key_decisions, + full_summary_md, description, estimate, files, verify, + inputs, expected_output, observability_impact, full_plan_md, sequence) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const t of manifest.tasks) { + tkStmt.run( + t.milestone_id, + t.slice_id, + t.id, + t.title, + t.status, + t.one_liner, + t.narrative, + t.verification_result, + t.duration, + t.completed_at, + t.blocker_discovered ? 1 : 0, + t.deviations, + t.known_issues, + JSON.stringify(t.key_files), + JSON.stringify(t.key_decisions), + t.full_summary_md, + t.description, + t.estimate, + JSON.stringify(t.files), + t.verify, + JSON.stringify(t.inputs), + JSON.stringify(t.expected_output), + t.observability_impact, + t.full_plan_md, + t.sequence, + ); + } + // Restore decisions + const dcStmt = + db.prepare(`INSERT INTO decisions (seq, id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`); + for (const d of manifest.decisions) { + dcStmt.run( + d.seq, + d.id, + d.when_context, + d.scope, + d.decision, + d.choice, + d.rationale, + d.revisable, + d.made_by, + d.superseded_by, + ); + } + // Restore verification evidence + const evStmt = + db.prepare(`INSERT INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at) + VALUES (?, ?, ?, ?, ?, ?, ?, ?)`); + for (const e of manifest.verification_evidence) { + evStmt.run( + e.task_id, + e.slice_id, + e.milestone_id, + e.command, + e.exit_code, + e.verdict, + e.duration_ms, + e.created_at, + ); + } + }); +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-mode-state.js b/src/resources/extensions/sf/sf-db/sf-db-mode-state.js new file mode 100644 index 000000000..dfdf36498 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-mode-state.js @@ -0,0 +1,49 @@ +import { _getAdapter } from './sf-db-core.js'; + +export function loadSessionModeState() { + const currentDb = _getAdapter(); + if (!currentDb) return null; + try { + const row = currentDb + .prepare("SELECT * FROM session_mode_state WHERE id = 1") + .get(); + if (!row) return null; + return { + workMode: row["work_mode"] ?? "chat", + runControl: row["run_control"] ?? "manual", + permissionProfile: row["permission_profile"] ?? "restricted", + modelMode: row["model_mode"] ?? "smart", + surface: row["surface"] ?? "tui", + updatedAt: row["updated_at"] ?? null, + }; + } catch { + return null; + } +} + +export function saveSessionModeState(mode) { + const currentDb = _getAdapter(); + if (!currentDb) return false; + currentDb + .prepare(` + INSERT INTO session_mode_state (id, work_mode, run_control, permission_profile, model_mode, surface, updated_at) + VALUES (1, :workMode, :runControl, :permissionProfile, :modelMode, :surface, :updatedAt) + ON CONFLICT(id) DO UPDATE SET + work_mode = excluded.work_mode, + run_control = excluded.run_control, + permission_profile = excluded.permission_profile, + model_mode = excluded.model_mode, + surface = excluded.surface, + updated_at = excluded.updated_at + `) + .run({ + ":workMode": mode.workMode, + ":runControl": mode.runControl, + ":permissionProfile": mode.permissionProfile, + ":modelMode": mode.modelMode, + ":surface": mode.surface ?? "tui", + ":updatedAt": mode.updatedAt ?? new Date().toISOString(), + }); + return true; +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-profile.js b/src/resources/extensions/sf/sf-db/sf-db-profile.js new file mode 100644 index 000000000..a8fb90f58 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-profile.js @@ -0,0 +1,347 @@ +import { _getAdapter, normalizeScheduleScope, scheduleEntryFromRow, asStringOrNull, transaction } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logWarning } from '../workflow-logger.js'; + +export function recordRepoProfile(profile) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + currentDb + .prepare(`INSERT OR REPLACE INTO repo_profiles ( + profile_id, project_hash, project_root, head, branch, remote_hash, + dirty, profile_json, created_at + ) VALUES ( + :profile_id, :project_hash, :project_root, :head, :branch, :remote_hash, + :dirty, :profile_json, :created_at + )`) + .run({ + ":profile_id": profile.profileId, + ":project_hash": profile.projectHash, + ":project_root": profile.projectRoot, + ":head": profile.git.head, + ":branch": profile.git.branch, + ":remote_hash": profile.git.remoteHash, + ":dirty": profile.git.dirty ? 1 : 0, + ":profile_json": JSON.stringify(profile), + ":created_at": profile.createdAt, + }); + const stmt = currentDb.prepare(`INSERT INTO repo_file_observations ( + path, latest_profile_id, git_status, ownership, language, size_bytes, + content_hash, summary, first_seen_at, last_seen_at, adopted_at, + adoption_unit_id + ) VALUES ( + :path, :latest_profile_id, :git_status, :ownership, :language, :size_bytes, + :content_hash, :summary, :first_seen_at, :last_seen_at, :adopted_at, + :adoption_unit_id + ) + ON CONFLICT(path) DO UPDATE SET + latest_profile_id = excluded.latest_profile_id, + git_status = excluded.git_status, + ownership = CASE + WHEN repo_file_observations.ownership = 'sf_generated' + THEN repo_file_observations.ownership + WHEN repo_file_observations.ownership = 'candidate_harness' + THEN repo_file_observations.ownership + ELSE excluded.ownership + END, + language = excluded.language, + size_bytes = excluded.size_bytes, + content_hash = excluded.content_hash, + summary = excluded.summary, + first_seen_at = repo_file_observations.first_seen_at, + last_seen_at = excluded.last_seen_at, + adopted_at = COALESCE(repo_file_observations.adopted_at, excluded.adopted_at), + adoption_unit_id = COALESCE(repo_file_observations.adoption_unit_id, excluded.adoption_unit_id)`); + for (const file of profile.git.changedFiles) { + stmt.run({ + ":path": file.path, + ":latest_profile_id": profile.profileId, + ":git_status": file.gitStatus, + ":ownership": file.ownership, + ":language": file.language, + ":size_bytes": file.sizeBytes, + ":content_hash": file.contentHash, + ":summary": file.summary, + ":first_seen_at": file.firstSeenAt, + ":last_seen_at": file.lastSeenAt, + ":adopted_at": file.adoptedAt, + ":adoption_unit_id": file.adoptionUnitId, + }); + } + }); +} + +export function getLatestRepoProfile() { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const row = currentDb + .prepare(`SELECT profile_id, project_hash, project_root, head, branch, remote_hash, + dirty, profile_json, created_at + FROM repo_profiles + ORDER BY created_at DESC, profile_id DESC + LIMIT 1`) + .get(); + if (!row) return null; + return { + profileId: row["profile_id"], + projectHash: row["project_hash"], + projectRoot: row["project_root"], + head: asStringOrNull(row["head"]), + branch: asStringOrNull(row["branch"]), + remoteHash: asStringOrNull(row["remote_hash"]), + dirty: row["dirty"] === 1, + profileJson: row["profile_json"] ?? "{}", + createdAt: row["created_at"], + }; +} + +export function getRepoFileObservations() { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + return currentDb + .prepare(`SELECT path, latest_profile_id, git_status, ownership, language, + size_bytes, content_hash, summary, first_seen_at, last_seen_at, + adopted_at, adoption_unit_id + FROM repo_file_observations + ORDER BY path ASC`) + .all() + .map((row) => ({ + path: row["path"], + latestProfileId: row["latest_profile_id"], + gitStatus: row["git_status"], + ownership: row["ownership"], + language: asStringOrNull(row["language"]), + sizeBytes: row["size_bytes"] ?? 0, + contentHash: asStringOrNull(row["content_hash"]), + summary: asStringOrNull(row["summary"]), + firstSeenAt: row["first_seen_at"], + lastSeenAt: row["last_seen_at"], + adoptedAt: asStringOrNull(row["adopted_at"]), + adoptionUnitId: asStringOrNull(row["adoption_unit_id"]), + })); +} + +export function insertScheduleEntry(scope, entry, importedFrom = null) { + const currentDb = _getAdapter(); + if (!currentDb) return; + const normalizedScope = normalizeScheduleScope(scope); + const schemaVersion = entry.schemaVersion ?? 1; + const full = { schemaVersion, ...entry }; + currentDb + .prepare( + `INSERT INTO schedule_entries ( + scope, id, schema_version, kind, status, due_at, created_at, + snoozed_at, payload_json, created_by, autonomous_dispatch, full_json, + imported_from + ) VALUES ( + :scope, :id, :schema_version, :kind, :status, :due_at, :created_at, + :snoozed_at, :payload_json, :created_by, :autonomous_dispatch, :full_json, + :imported_from + )`, + ) + .run({ + ":scope": normalizedScope, + ":id": entry.id, + ":schema_version": schemaVersion, + ":kind": entry.kind ?? "reminder", + ":status": entry.status ?? "pending", + ":due_at": entry.due_at ?? "", + ":created_at": entry.created_at ?? "", + ":snoozed_at": entry.snoozed_at ?? null, + ":payload_json": JSON.stringify(entry.payload ?? {}), + ":created_by": entry.created_by ?? "user", + ":autonomous_dispatch": entry.autonomous_dispatch ? 1 : 0, + ":full_json": JSON.stringify(full), + ":imported_from": importedFrom, + }); +} + +export function getScheduleEntries(scope) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const normalizedScope = normalizeScheduleScope(scope); + try { + const rows = currentDb + .prepare( + `SELECT s.* + FROM schedule_entries s + JOIN ( + SELECT id, MAX(seq) AS max_seq + FROM schedule_entries + WHERE scope = :scope + GROUP BY id + ) latest ON latest.id = s.id AND latest.max_seq = s.seq + WHERE s.scope = :scope + ORDER BY s.due_at ASC, s.created_at ASC, s.seq ASC`, + ) + .all({ ":scope": normalizedScope }); + return rows.map(scheduleEntryFromRow).filter(Boolean); + } catch { + return []; + } +} + +export function countScheduleEntries(scope) { + const currentDb = _getAdapter(); + if (!currentDb) return 0; + const normalizedScope = normalizeScheduleScope(scope); + try { + const row = currentDb + .prepare( + "SELECT COUNT(*) AS cnt FROM schedule_entries WHERE scope = :scope", + ) + .get({ ":scope": normalizedScope }); + return row?.cnt ?? 0; + } catch { + return 0; + } +} + +export function getRuntimeCounter(key) { + const currentDb = _getAdapter(); + if (!currentDb) return 0; + const row = currentDb + .prepare("SELECT value FROM runtime_counters WHERE key = ?") + .get(key); + return typeof row?.value === "number" ? row.value : 0; +} + +export function setRuntimeCounter(key, value) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO runtime_counters (key, value, updated_at) + VALUES (:key, :value, :updated_at) + ON CONFLICT(key) DO UPDATE SET value = excluded.value, updated_at = excluded.updated_at`, + ) + .run({ + ":key": key, + ":value": value, + ":updated_at": new Date().toISOString(), + }); +} + +export function incrementRuntimeCounter(key) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `INSERT INTO runtime_counters (key, value, updated_at) + VALUES (:key, 1, :updated_at) + ON CONFLICT(key) DO UPDATE SET value = value + 1, updated_at = excluded.updated_at`, + ) + .run({ ":key": key, ":updated_at": new Date().toISOString() }); + const row = currentDb + .prepare("SELECT value FROM runtime_counters WHERE key = ?") + .get(key); + return typeof row?.value === "number" ? row.value : 1; +} + +export function getProjectStartedAt(db) { + const row = db + .prepare( + "SELECT value FROM project_metrics_meta WHERE key = 'projectStartedAt'", + ) + .get(); + if (!row) return null; + const ts = Number(row["value"]); + return Number.isFinite(ts) ? ts : null; +} + +export function setProjectStartedAt(db, ts) { + db.prepare( + `INSERT INTO project_metrics_meta (key, value) VALUES ('projectStartedAt', :value) + ON CONFLICT(key) DO UPDATE SET value = excluded.value`, + ).run({ ":value": String(ts) }); +} + +export function openIntentChapter({ + id, + unitType, + unitId, + milestoneId, + sliceId, + taskId, + intent, + metadata, +}) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + currentDb + .prepare( + `INSERT INTO intent_chapters + (id, unit_type, unit_id, milestone_id, slice_id, task_id, intent, opened_at, metadata_json) + VALUES + (:id, :unitType, :unitId, :milestoneId, :sliceId, :taskId, :intent, :openedAt, :metadataJson) + ON CONFLICT(id) DO NOTHING`, + ) + .run({ + ":id": id, + ":unitType": unitType, + ":unitId": unitId, + ":milestoneId": milestoneId ?? null, + ":sliceId": sliceId ?? null, + ":taskId": taskId ?? null, + ":intent": intent, + ":openedAt": now, + ":metadataJson": metadata ? JSON.stringify(metadata) : null, + }); + return id; +} + +export function closeIntentChapter(id, outcome = "done") { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const res = currentDb + .prepare( + `UPDATE intent_chapters + SET closed_at = :closedAt, outcome = :outcome + WHERE id = :id AND closed_at IS NULL`, + ) + .run({ + ":id": id, + ":closedAt": new Date().toISOString(), + ":outcome": outcome, + }); + return (res?.changes ?? 0) > 0; +} + +export function getOpenIntentChapters({ limit = 5 } = {}) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + `SELECT id, unit_type as unitType, unit_id as unitId, + milestone_id as milestoneId, slice_id as sliceId, task_id as taskId, + intent, opened_at as openedAt, metadata_json as metadataJson + FROM intent_chapters + WHERE closed_at IS NULL + ORDER BY opened_at DESC + LIMIT :limit`, + ) + .all({ ":limit": limit }); +} + +export function closeIntentChaptersForUnit( + unitType, + unitId, + outcome = "cancelled", +) { + const currentDb = _getAdapter(); + if (!currentDb) return 0; + const res = currentDb + .prepare( + `UPDATE intent_chapters + SET closed_at = :closedAt, outcome = :outcome + WHERE unit_type = :unitType AND unit_id = :unitId AND closed_at IS NULL`, + ) + .run({ + ":closedAt": new Date().toISOString(), + ":outcome": outcome, + ":unitType": unitType, + ":unitId": unitId, + }); + return res?.changes ?? 0; +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-self-feedback.js b/src/resources/extensions/sf/sf-db/sf-db-self-feedback.js new file mode 100644 index 000000000..84655ae63 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-self-feedback.js @@ -0,0 +1,98 @@ +import { _getAdapter, rowToSelfFeedback } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logWarning } from '../workflow-logger.js'; + +export function insertSelfFeedbackEntry(entry) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const occurred = entry.occurredIn ?? {}; + currentDb + .prepare(`INSERT INTO self_feedback ( + id, ts, kind, severity, blocking, repo_identity, sf_version, base_path, + unit_type, milestone_id, slice_id, task_id, summary, evidence, suggested_fix, full_json, + resolved_at, resolved_reason, resolved_by_sf_version, resolved_evidence_json, resolved_criteria_json + ) VALUES ( + :id, :ts, :kind, :severity, :blocking, :repo_identity, :sf_version, :base_path, + :unit_type, :milestone_id, :slice_id, :task_id, :summary, :evidence, :suggested_fix, :full_json, + :resolved_at, :resolved_reason, :resolved_by_sf_version, :resolved_evidence_json, :resolved_criteria_json + ) + ON CONFLICT(id) DO NOTHING`) + .run({ + ":id": entry.id, + ":ts": entry.ts, + ":kind": entry.kind, + ":severity": entry.severity, + ":blocking": entry.blocking ? 1 : 0, + ":repo_identity": entry.repoIdentity ?? "", + ":sf_version": entry.sfVersion ?? "", + ":base_path": entry.basePath ?? "", + ":unit_type": occurred.unitType ?? null, + ":milestone_id": occurred.milestone ?? null, + ":slice_id": occurred.slice ?? null, + ":task_id": occurred.task ?? null, + ":summary": entry.summary ?? "", + ":evidence": entry.evidence ?? "", + ":suggested_fix": entry.suggestedFix ?? "", + ":full_json": JSON.stringify(entry), + ":resolved_at": entry.resolvedAt ?? null, + ":resolved_reason": entry.resolvedReason ?? null, + ":resolved_by_sf_version": entry.resolvedBySfVersion ?? null, + ":resolved_evidence_json": entry.resolvedEvidence + ? JSON.stringify(entry.resolvedEvidence) + : null, + ":resolved_criteria_json": entry.resolvedCriteriaMet + ? JSON.stringify(entry.resolvedCriteriaMet) + : null, + }); +} + +export function listSelfFeedbackEntries() { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb + .prepare("SELECT * FROM self_feedback ORDER BY ts ASC, id ASC") + .all(); + return rows.map(rowToSelfFeedback); +} + +export function resolveSelfFeedbackEntry(entryId, resolution) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const existing = currentDb + .prepare("SELECT * FROM self_feedback WHERE id = :id") + .get({ ":id": entryId }); + if (!existing || existing["resolved_at"]) return false; + const resolvedAt = resolution.resolvedAt ?? new Date().toISOString(); + const entry = { + ...rowToSelfFeedback(existing), + resolvedAt, + resolvedReason: resolution.reason, + resolvedBySfVersion: resolution.resolvedBySfVersion ?? "", + resolvedEvidence: resolution.evidence, + }; + if (resolution.criteriaMet) + entry.resolvedCriteriaMet = resolution.criteriaMet; + const result = currentDb + .prepare(`UPDATE self_feedback SET + full_json = :full_json, + resolved_at = :resolved_at, + resolved_reason = :resolved_reason, + resolved_by_sf_version = :resolved_by_sf_version, + resolved_evidence_json = :resolved_evidence_json, + resolved_criteria_json = :resolved_criteria_json + WHERE id = :id AND resolved_at IS NULL`) + .run({ + ":id": entryId, + ":full_json": JSON.stringify(entry), + ":resolved_at": resolvedAt, + ":resolved_reason": resolution.reason ?? "", + ":resolved_by_sf_version": resolution.resolvedBySfVersion ?? "", + ":resolved_evidence_json": resolution.evidence + ? JSON.stringify(resolution.evidence) + : null, + ":resolved_criteria_json": resolution.criteriaMet + ? JSON.stringify(resolution.criteriaMet) + : null, + }); + return result.changes > 0; +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-session-store.js b/src/resources/extensions/sf/sf-db/sf-db-session-store.js new file mode 100644 index 000000000..a8fd14eb8 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-session-store.js @@ -0,0 +1,191 @@ +import { _getAdapter } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { logWarning, logError } from '../workflow-logger.js'; + +export function upsertSession(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return; + const now = new Date().toISOString(); + currentDb + .prepare(`INSERT INTO sessions + (session_id, trace_id, mode, cwd, repo, branch, summary, summary_count, created_at, updated_at) + VALUES (:session_id, :trace_id, :mode, :cwd, :repo, :branch, :summary, 0, :now, :now) + ON CONFLICT(session_id) DO UPDATE SET + trace_id = COALESCE(excluded.trace_id, sessions.trace_id), + repo = COALESCE(excluded.repo, sessions.repo), + branch = COALESCE(excluded.branch, sessions.branch), + summary = COALESCE(excluded.summary, sessions.summary), + summary_count = CASE WHEN excluded.summary IS NOT NULL + THEN sessions.summary_count + 1 + ELSE sessions.summary_count END, + updated_at = excluded.updated_at`) + .run({ + ":session_id": entry.sessionId, + ":trace_id": entry.traceId ?? null, + ":mode": entry.mode ?? "interactive", + ":cwd": entry.cwd ?? "", + ":repo": entry.repo ?? null, + ":branch": entry.branch ?? null, + ":summary": entry.summary ?? null, + ":now": now, + }); +} + +export function archiveSession(sessionId) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare( + `UPDATE sessions SET archived_at = :now, updated_at = :now WHERE session_id = :session_id`, + ) + .run({ ":session_id": sessionId, ":now": new Date().toISOString() }); +} + +export function unarchiveSession(sessionId) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare( + `UPDATE sessions SET archived_at = NULL, updated_at = :now WHERE session_id = :session_id`, + ) + .run({ ":session_id": sessionId, ":now": new Date().toISOString() }); +} + +export function insertSessionTurn(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const result = currentDb + .prepare(`INSERT INTO turns + (session_id, turn_index, user_message, assistant_response, ts) + VALUES (:session_id, :turn_index, :user_message, :assistant_response, :ts) + ON CONFLICT(session_id, turn_index) DO UPDATE SET + user_message = COALESCE(excluded.user_message, turns.user_message), + assistant_response = COALESCE(excluded.assistant_response, turns.assistant_response)`) + .run({ + ":session_id": entry.sessionId, + ":turn_index": entry.turnIndex, + ":user_message": entry.userMessage ?? null, + ":assistant_response": entry.assistantResponse ?? null, + ":ts": entry.ts ?? new Date().toISOString(), + }); + return result.lastInsertRowid ?? null; +} + +export function patchTurnResponse(sessionId, turnIndex, assistantResponse) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare(`UPDATE turns SET assistant_response = :resp + WHERE session_id = :sid AND turn_index = :idx AND assistant_response IS NULL`) + .run({ + ":resp": assistantResponse, + ":sid": sessionId, + ":idx": turnIndex, + }); +} + +export function recordSessionFileTouch(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare(`INSERT OR IGNORE INTO session_file_touches + (session_id, path, tool_name, turn_id, first_seen_at) + VALUES (:session_id, :path, :tool_name, :turn_id, :first_seen_at)`) + .run({ + ":session_id": entry.sessionId, + ":path": entry.path, + ":tool_name": entry.toolName ?? null, + ":turn_id": entry.turnId ?? null, + ":first_seen_at": entry.firstSeenAt ?? new Date().toISOString(), + }); +} + +export function recordSessionRef(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare(`INSERT OR IGNORE INTO session_refs + (session_id, ref_type, ref_value, turn_id, created_at) + VALUES (:session_id, :ref_type, :ref_value, :turn_id, :created_at)`) + .run({ + ":session_id": entry.sessionId, + ":ref_type": entry.refType, + ":ref_value": entry.refValue, + ":turn_id": entry.turnId ?? null, + ":created_at": entry.createdAt ?? new Date().toISOString(), + }); +} + +export function searchSessionTurns(query, limit = 20) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare(`SELECT t.id, t.session_id, t.turn_index, t.ts, + t.user_message, t.assistant_response, + s.mode, s.cwd, s.repo, s.branch + FROM turns_fts + JOIN turns t ON turns_fts.rowid = t.id + JOIN sessions s ON t.session_id = s.session_id + WHERE turns_fts MATCH :query + ORDER BY rank + LIMIT :limit`) + .all({ ":query": query, ":limit": Math.max(1, Math.min(100, limit)) }); +} + +export function listRecentSessions(limit = 20) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare(`SELECT s.session_id, s.mode, s.cwd, s.repo, s.branch, + s.summary, s.created_at, s.updated_at, + COUNT(DISTINCT t.id) AS turn_count, + COUNT(DISTINCT f.id) AS file_count + FROM sessions s + LEFT JOIN turns t ON t.session_id = s.session_id + LEFT JOIN session_file_touches f ON f.session_id = s.session_id + GROUP BY s.session_id + ORDER BY s.updated_at DESC + LIMIT :limit`) + .all({ ":limit": Math.max(1, Math.min(100, limit)) }); +} + +export function insertSessionSnapshot(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const nextIndex = (() => { + const row = currentDb + .prepare( + "SELECT COALESCE(MAX(snapshot_index), -1) + 1 AS nxt FROM session_snapshots WHERE session_id = :sid", + ) + .get({ ":sid": args.sessionId }); + return row ? Number(row["nxt"]) : 0; + })(); + currentDb + .prepare(`INSERT INTO session_snapshots + (session_id, snapshot_index, git_stash_ref, label, ts) + VALUES (:sid, :idx, :ref, :label, :ts)`) + .run({ + ":sid": args.sessionId, + ":idx": nextIndex, + ":ref": args.gitStashRef ?? null, + ":label": args.label ?? null, + ":ts": args.ts ?? new Date().toISOString(), + }); + const row = currentDb + .prepare( + "SELECT id FROM session_snapshots WHERE session_id = :sid AND snapshot_index = :idx", + ) + .get({ ":sid": args.sessionId, ":idx": nextIndex }); + return row ? Number(row["id"]) : 0; +} + +export function listSessionSnapshots(sessionId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT * FROM session_snapshots WHERE session_id = :sid ORDER BY snapshot_index ASC", + ) + .all({ ":sid": sessionId }); +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-slices.js b/src/resources/extensions/sf/sf-db/sf-db-slices.js new file mode 100644 index 000000000..56b4b8177 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-slices.js @@ -0,0 +1,464 @@ +import { _getAdapter, insertSliceSpecIfAbsent, parsePlanningMeeting, rowToSlice, safeParseJsonArray, rowToTask, transaction } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { existsSync, readdirSync, readFileSync } from 'node:fs'; +import { join } from 'node:path'; + +export function insertSlice(s) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO slices ( + milestone_id, id, title, status, risk, depends, demo, created_at, + goal, success_criteria, proof_level, integration_closure, observability_impact, + adversarial_partner, adversarial_combatant, adversarial_architect, planning_meeting_json, sequence, + is_sketch, sketch_scope + ) VALUES ( + :milestone_id, :id, :title, :status, :risk, :depends, :demo, :created_at, + :goal, :success_criteria, :proof_level, :integration_closure, :observability_impact, + :adversarial_partner, :adversarial_combatant, :adversarial_architect, :planning_meeting_json, :sequence, + :is_sketch, :sketch_scope + ) + ON CONFLICT (milestone_id, id) DO UPDATE SET + title = CASE WHEN :raw_title IS NOT NULL THEN excluded.title ELSE slices.title END, + status = CASE WHEN slices.status IN ('complete', 'done') THEN slices.status ELSE excluded.status END, + risk = CASE WHEN :raw_risk IS NOT NULL THEN excluded.risk ELSE slices.risk END, + depends = excluded.depends, + demo = CASE WHEN :raw_demo IS NOT NULL THEN excluded.demo ELSE slices.demo END, + goal = CASE WHEN :raw_goal IS NOT NULL THEN excluded.goal ELSE slices.goal END, + success_criteria = CASE WHEN :raw_success_criteria IS NOT NULL THEN excluded.success_criteria ELSE slices.success_criteria END, + proof_level = CASE WHEN :raw_proof_level IS NOT NULL THEN excluded.proof_level ELSE slices.proof_level END, + integration_closure = CASE WHEN :raw_integration_closure IS NOT NULL THEN excluded.integration_closure ELSE slices.integration_closure END, + observability_impact = CASE WHEN :raw_observability_impact IS NOT NULL THEN excluded.observability_impact ELSE slices.observability_impact END, + adversarial_partner = CASE WHEN :raw_adversarial_partner IS NOT NULL THEN excluded.adversarial_partner ELSE slices.adversarial_partner END, + adversarial_combatant = CASE WHEN :raw_adversarial_combatant IS NOT NULL THEN excluded.adversarial_combatant ELSE slices.adversarial_combatant END, + adversarial_architect = CASE WHEN :raw_adversarial_architect IS NOT NULL THEN excluded.adversarial_architect ELSE slices.adversarial_architect END, + planning_meeting_json = CASE WHEN :raw_planning_meeting_json IS NOT NULL THEN excluded.planning_meeting_json ELSE slices.planning_meeting_json END, + sequence = CASE WHEN :raw_sequence IS NOT NULL THEN excluded.sequence ELSE slices.sequence END, + is_sketch = CASE WHEN :raw_is_sketch IS NOT NULL THEN excluded.is_sketch ELSE slices.is_sketch END, + sketch_scope = CASE WHEN :raw_sketch_scope IS NOT NULL THEN excluded.sketch_scope ELSE slices.sketch_scope END`) + .run({ + ":milestone_id": s.milestoneId, + ":id": s.id, + ":title": s.title ?? "", + ":status": s.status ?? "pending", + ":risk": s.risk ?? "medium", + ":depends": JSON.stringify(s.depends ?? []), + ":demo": s.demo ?? "", + ":created_at": new Date().toISOString(), + ":goal": s.planning?.goal ?? "", + ":success_criteria": s.planning?.successCriteria ?? "", + ":proof_level": s.planning?.proofLevel ?? "", + ":integration_closure": s.planning?.integrationClosure ?? "", + ":observability_impact": s.planning?.observabilityImpact ?? "", + ":adversarial_partner": s.planning?.adversarialReview?.partner ?? "", + ":adversarial_combatant": s.planning?.adversarialReview?.combatant ?? "", + ":adversarial_architect": s.planning?.adversarialReview?.architect ?? "", + ":planning_meeting_json": s.planning?.planningMeeting + ? JSON.stringify(s.planning.planningMeeting) + : "", + ":sequence": s.sequence ?? 0, + ":is_sketch": s.isSketch === true ? 1 : 0, + ":sketch_scope": s.sketchScope ?? "", + // Raw sentinel params: NULL when caller omitted the field, used in ON CONFLICT guards + ":raw_title": s.title ?? null, + ":raw_risk": s.risk ?? null, + ":raw_demo": s.demo ?? null, + ":raw_goal": s.planning?.goal ?? null, + ":raw_success_criteria": s.planning?.successCriteria ?? null, + ":raw_proof_level": s.planning?.proofLevel ?? null, + ":raw_integration_closure": s.planning?.integrationClosure ?? null, + ":raw_observability_impact": s.planning?.observabilityImpact ?? null, + ":raw_adversarial_partner": + s.planning?.adversarialReview?.partner ?? null, + ":raw_adversarial_combatant": + s.planning?.adversarialReview?.combatant ?? null, + ":raw_adversarial_architect": + s.planning?.adversarialReview?.architect ?? null, + ":raw_planning_meeting_json": s.planning?.planningMeeting + ? JSON.stringify(s.planning.planningMeeting) + : null, + ":raw_sequence": s.sequence ?? null, + ":raw_is_sketch": s.isSketch === undefined ? null : s.isSketch ? 1 : 0, + ":raw_sketch_scope": s.sketchScope === undefined ? null : s.sketchScope, + }); + insertSliceSpecIfAbsent(s.milestoneId, s.id, s.planning ?? {}); +} + +export function insertOrIgnoreSlice(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO slices (milestone_id, id, title, status, created_at) + VALUES (:mid, :sid, :title, 'pending', :ts)`) + .run({ + ":mid": args.milestoneId, + ":sid": args.sliceId, + ":title": args.title, + ":ts": args.createdAt, + }); +} + +export function clearSliceSketch(milestoneId, sliceId) { + setSliceSketchFlag(milestoneId, sliceId, false); +} + +export function setSliceSketchFlag(milestoneId, sliceId, isSketch) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE slices SET is_sketch = :is_sketch WHERE milestone_id = :mid AND id = :sid`, + ) + .run({ + ":is_sketch": isSketch ? 1 : 0, + ":mid": milestoneId, + ":sid": sliceId, + }); +} + +export function autoHealSketchFlags(milestoneId, hasPlanFile) { + const currentDb = _getAdapter(); + if (!currentDb) return; + const rows = currentDb + .prepare( + `SELECT id FROM slices WHERE milestone_id = :mid AND is_sketch = 1`, + ) + .all({ ":mid": milestoneId }); + for (const row of rows) { + if (hasPlanFile(row.id)) { + setSliceSketchFlag(milestoneId, row.id, false); + } + } +} + +export function upsertSlicePlanning(milestoneId, sliceId, planning) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + insertSliceSpecIfAbsent(milestoneId, sliceId, planning); + currentDb + .prepare(`UPDATE slices SET + goal = COALESCE(:goal, goal), + success_criteria = COALESCE(:success_criteria, success_criteria), + proof_level = COALESCE(:proof_level, proof_level), + integration_closure = COALESCE(:integration_closure, integration_closure), + observability_impact = COALESCE(:observability_impact, observability_impact), + adversarial_partner = COALESCE(:adversarial_partner, adversarial_partner), + adversarial_combatant = COALESCE(:adversarial_combatant, adversarial_combatant), + adversarial_architect = COALESCE(:adversarial_architect, adversarial_architect), + planning_meeting_json = COALESCE(:planning_meeting_json, planning_meeting_json) + WHERE milestone_id = :milestone_id AND id = :id`) + .run({ + ":milestone_id": milestoneId, + ":id": sliceId, + ":goal": planning.goal ?? null, + ":success_criteria": planning.successCriteria ?? null, + ":proof_level": planning.proofLevel ?? null, + ":integration_closure": planning.integrationClosure ?? null, + ":observability_impact": planning.observabilityImpact ?? null, + ":adversarial_partner": planning.adversarialReview?.partner ?? null, + ":adversarial_combatant": planning.adversarialReview?.combatant ?? null, + ":adversarial_architect": planning.adversarialReview?.architect ?? null, + ":planning_meeting_json": planning.planningMeeting + ? JSON.stringify(planning.planningMeeting) + : null, + }); +} + +export function getSlice(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare("SELECT * FROM slices WHERE milestone_id = :mid AND id = :sid") + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return null; + return rowToSlice(row); +} + +export function updateSliceStatus(milestoneId, sliceId, status, completedAt) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE slices SET status = :status, completed_at = :completed_at + WHERE milestone_id = :milestone_id AND id = :id`) + .run({ + ":status": status, + ":completed_at": completedAt ?? null, + ":milestone_id": milestoneId, + ":id": sliceId, + }); +} + +export function setSliceUatVerdict(milestoneId, sliceId, verdict) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE slices SET uat_verdict = :verdict WHERE milestone_id = :mid AND id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":verdict": verdict }); +} + +export function getSliceUatVerdict(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + `SELECT uat_verdict FROM slices WHERE milestone_id = :mid AND id = :sid`, + ) + .get({ ":mid": milestoneId, ":sid": sliceId }); + return row?.uat_verdict ?? null; +} + +export function backfillUatVerdicts(basePath) { + const currentDb = _getAdapter(); + if (!currentDb) return; + // Find all slices that have no verdict yet + const rows = currentDb + .prepare(`SELECT milestone_id, id FROM slices WHERE uat_verdict IS NULL`) + .all(); + if (!rows.length) return; + // Extract verdict from content — inline to avoid cross-module import at db layer + function parseVerdictFromContent(content) { + const fmMatch = content.match(/^---\n([\s\S]*?)\n---/); + if (fmMatch) { + const m = fmMatch[1].match(/verdict:\s*([\w-]+)/i); + if (m) { + let v = m[1].toLowerCase(); + if (v === "passed") v = "pass"; + return v; + } + return null; + } + const bodyMatch = content.match( + /\*\*Verdict:?\*\*\s*(?:✅\s*)?(\w[\w-]*)/i, + ); + if (bodyMatch) { + let v = bodyMatch[1].toLowerCase(); + if (v === "passed") v = "pass"; + return v; + } + return null; + } + const stmt = currentDb.prepare( + `UPDATE slices SET uat_verdict = :verdict WHERE milestone_id = :mid AND id = :sid`, + ); + for (const row of rows) { + const mid = row["milestone_id"]; + const sid = row["id"]; + const sliceDir = join(basePath, ".sf", "milestones", mid, "slices", sid); + const candidates = [ + join(sliceDir, `${sid}-ASSESSMENT.md`), + join(sliceDir, `${sid}-UAT_RESULT.md`), + ]; + for (const candidatePath of candidates) { + if (!existsSync(candidatePath)) continue; + try { + const content = readFileSync(candidatePath, "utf8"); + const verdict = parseVerdictFromContent(content); + if (verdict) { + stmt.run({ ":mid": mid, ":sid": sid, ":verdict": verdict }); + break; + } + } catch { + // Skip unreadable files + } + } + } +} + +export function setSliceSummaryMd(milestoneId, sliceId, summaryMd, uatMd) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE slices SET full_summary_md = :summary_md, full_uat_md = :uat_md WHERE milestone_id = :mid AND id = :sid`, + ) + .run({ + ":mid": milestoneId, + ":sid": sliceId, + ":summary_md": summaryMd, + ":uat_md": uatMd, + }); +} + +export function getMilestoneSlices(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb + .prepare( + "SELECT * FROM slices WHERE milestone_id = :mid ORDER BY sequence, id", + ) + .all({ ":mid": milestoneId }); + return rows.map(rowToSlice); +} + +export function getSliceStatusSummary(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT id, status FROM slices WHERE milestone_id = :mid ORDER BY sequence, id", + ) + .all({ ":mid": milestoneId }) + .map((r) => ({ id: r["id"], status: r["status"] })); +} + +export function getSliceTaskCounts(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return { total: 0, done: 0, pending: 0 }; + const row = currentDb + .prepare(`SELECT + COUNT(*) as total, + SUM(CASE WHEN status IN ('complete', 'done') THEN 1 ELSE 0 END) as done, + SUM(CASE WHEN status NOT IN ('complete', 'done') THEN 1 ELSE 0 END) as pending + FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return { total: 0, done: 0, pending: 0 }; + return { + total: row["total"] ?? 0, + done: row["done"] ?? 0, + pending: row["pending"] ?? 0, + }; +} + +export function syncSliceDependencies(milestoneId, sliceId, depends) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare( + "DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid", + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + for (const dep of depends) { + currentDb + .prepare( + "INSERT OR IGNORE INTO slice_dependencies (milestone_id, slice_id, depends_on_slice_id) VALUES (:mid, :sid, :dep)", + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":dep": dep }); + } +} + +export function getDependentSlices(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + "SELECT slice_id FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid", + ) + .all({ ":mid": milestoneId, ":sid": sliceId }) + .map((r) => r["slice_id"]); +} + +export function getActiveSliceFromDb(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + // Find the first non-complete slice whose dependencies are all satisfied. + // Primary: uses the slice_dependencies junction table (kept in sync by syncSliceDependencies). + // Fallback: for slices with no junction rows, check the `depends` JSON column directly + // to handle legacy data or rows that were written before syncSliceDependencies ran. + const candidates = currentDb + .prepare(`SELECT s.* FROM slices s + WHERE s.milestone_id = :mid + AND s.status NOT IN ('complete', 'done', 'skipped') + AND NOT EXISTS ( + SELECT 1 FROM slice_dependencies d + WHERE d.milestone_id = :mid + AND d.slice_id = s.id + AND d.depends_on_slice_id NOT IN ( + SELECT id FROM slices WHERE milestone_id = :mid AND status IN ('complete', 'done', 'skipped') + ) + ) + ORDER BY s.sequence, s.id`) + .all({ ":mid": milestoneId }); + if (candidates.length === 0) return null; + // Collect completed slice IDs for JSON-dep fallback check. + const completedIds = new Set( + currentDb + .prepare( + "SELECT id FROM slices WHERE milestone_id = :mid AND status IN ('complete', 'done', 'skipped')", + ) + .all({ ":mid": milestoneId }) + .map((r) => r["id"]), + ); + for (const candidate of candidates) { + const hasSyncedDeps = + (currentDb + .prepare( + "SELECT COUNT(*) as c FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid", + ) + .get({ ":mid": milestoneId, ":sid": candidate["id"] })?.c ?? 0) > 0; + if (hasSyncedDeps) { + // Junction table is authoritative and candidate already passed the NOT EXISTS check. + return rowToSlice(candidate); + } + // No junction rows for this slice — fall back to JSON depends column. + const jsonDeps = safeParseJsonArray(candidate["depends"]); + if (jsonDeps.length === 0 || jsonDeps.every((d) => completedIds.has(d))) { + return rowToSlice(candidate); + } + // JSON deps not yet satisfied — continue to next candidate. + } + return null; +} + +export function updateSliceFields(milestoneId, sliceId, fields) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE slices SET + title = COALESCE(:title, title), + risk = COALESCE(:risk, risk), + depends = COALESCE(:depends, depends), + demo = COALESCE(:demo, demo) + WHERE milestone_id = :milestone_id AND id = :id`) + .run({ + ":milestone_id": milestoneId, + ":id": sliceId, + ":title": fields.title ?? null, + ":risk": fields.risk ?? null, + ":depends": fields.depends ? JSON.stringify(fields.depends) : null, + ":demo": fields.demo ?? null, + }); +} + +export function setSliceReplanTriggeredAt(milestoneId, sliceId, ts) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + "UPDATE slices SET replan_triggered_at = :ts WHERE milestone_id = :mid AND id = :sid", + ) + .run({ ":ts": ts, ":mid": milestoneId, ":sid": sliceId }); +} + +export function deleteSlice(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + // Cascade-style manual deletion: evidence → tasks → dependencies → slice + currentDb + .prepare( + `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare( + `DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare( + `DELETE FROM slice_dependencies WHERE milestone_id = :mid AND slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare( + `DELETE FROM slice_dependencies WHERE milestone_id = :mid AND depends_on_slice_id = :sid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId }); + currentDb + .prepare(`DELETE FROM slices WHERE milestone_id = :mid AND id = :sid`) + .run({ ":mid": milestoneId, ":sid": sliceId }); + }); +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-spec.js b/src/resources/extensions/sf/sf-db/sf-db-spec.js new file mode 100644 index 000000000..74f641e65 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-spec.js @@ -0,0 +1,163 @@ +import { _getAdapter } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; + +export function getMilestoneSpec(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + return currentDb + .prepare("SELECT * FROM milestone_specs WHERE id = ?") + .get(milestoneId); +} + +export function getSliceSpec(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + return currentDb + .prepare( + "SELECT * FROM slice_specs WHERE milestone_id = ? AND slice_id = ?", + ) + .get(milestoneId, sliceId); +} + +export function getTaskSpec(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + return currentDb + .prepare( + "SELECT * FROM task_specs WHERE milestone_id = ? AND slice_id = ? AND task_id = ?", + ) + .get(milestoneId, sliceId, taskId); +} + +export function startValidationRun({ milestoneId, sliceId, taskId, contract }) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const runId = crypto.randomUUID(); + currentDb + .prepare( + `INSERT INTO validation_runs + (run_id, milestone_id, slice_id, task_id, contract, status, started_at, created_at) + VALUES (:run_id, :milestone_id, :slice_id, :task_id, :contract, 'running', datetime('now'), datetime('now'))`, + ) + .run({ + ":run_id": runId, + ":milestone_id": milestoneId, + ":slice_id": sliceId ?? null, + ":task_id": taskId ?? null, + ":contract": contract ?? "", + }); + return runId; +} + +export function completeValidationRun({ + runId, + verdict, + rationale = "", + findings = "", +}) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const status = + verdict === "pass" ? "pass" : verdict === "fail" ? "fail" : "error"; + const result = currentDb + .prepare( + `UPDATE validation_runs SET + status = :status, + verdict = :verdict, + rationale = :rationale, + findings = :findings, + completed_at = datetime('now') + WHERE run_id = :run_id AND status = 'running'`, + ) + .run({ + ":run_id": runId, + ":status": status, + ":verdict": verdict ?? "", + ":rationale": rationale ?? "", + ":findings": findings ?? "", + }); + if (result.changes === 0) { + throw new SFError( + SF_STALE_STATE, + `sf-db: completeValidationRun: no running validation run found for run_id=${runId}`, + ); + } +} + +export function getLatestValidationState(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const rows = currentDb + .prepare( + `SELECT * FROM validation_runs + WHERE milestone_id = :milestone_id + AND slice_id IS :slice_id + AND task_id IS :task_id + ORDER BY created_at DESC, run_id DESC + LIMIT 1`, + ) + .all({ + ":milestone_id": milestoneId, + ":slice_id": sliceId ?? null, + ":task_id": taskId ?? null, + }); + return rows[0] ?? null; +} + +export function getValidationHistory(milestoneId, sliceId, taskId, limit = 20) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + `SELECT * FROM validation_runs + WHERE milestone_id = :milestone_id + AND slice_id IS :slice_id + AND task_id IS :task_id + ORDER BY created_at DESC, run_id DESC + LIMIT :limit`, + ) + .all({ + ":milestone_id": milestoneId, + ":slice_id": sliceId ?? null, + ":task_id": taskId ?? null, + ":limit": limit, + }); +} + +export function getValidationAttentionMarker(milestoneId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + return ( + currentDb + .prepare( + "SELECT * FROM validation_attention_markers WHERE milestone_id = ?", + ) + .get(milestoneId) ?? null + ); +} + +export function upsertValidationAttentionMarker(milestoneId, marker) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const now = new Date().toISOString(); + currentDb + .prepare( + `INSERT INTO validation_attention_markers + (milestone_id, created_at, source, remediation_round, revalidation_round, revalidation_requested_at) + VALUES (:milestone_id, :created_at, :source, :remediation_round, :revalidation_round, :revalidation_requested_at) + ON CONFLICT(milestone_id) DO UPDATE SET + source = excluded.source, + remediation_round = excluded.remediation_round, + revalidation_round = excluded.revalidation_round, + revalidation_requested_at = excluded.revalidation_requested_at`, + ) + .run({ + ":milestone_id": milestoneId, + ":created_at": marker.createdAt ?? now, + ":source": marker.source ?? null, + ":remediation_round": marker.remediationRound ?? null, + ":revalidation_round": marker.revalidationRound ?? null, + ":revalidation_requested_at": marker.revalidationRequestedAt ?? null, + }); +} + diff --git a/src/resources/extensions/sf/sf-db/sf-db-tasks.js b/src/resources/extensions/sf/sf-db/sf-db-tasks.js new file mode 100644 index 000000000..2d8ac9661 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-tasks.js @@ -0,0 +1,455 @@ +import { _getAdapter, hasTaskSpecIntent, insertTaskSpecIfAbsent, rowToTask, safeParseJsonArray, transaction } from './sf-db-core.js'; +import { SF_STALE_STATE, SFError } from '../errors.js'; +import { normalizeSchedulerStatus, normalizeTaskStatus, taskFrontmatterFromRecord, withTaskFrontmatter } from '../task-frontmatter.js'; +import { logWarning } from '../workflow-logger.js'; + +export function insertTask(t) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT INTO tasks ( + milestone_id, slice_id, id, title, status, task_status, one_liner, narrative, + verification_result, verification_status, duration, completed_at, blocker_discovered, + deviations, known_issues, key_files, key_decisions, full_summary_md, + description, estimate, files, verify, inputs, expected_output, observability_impact, sequence + ) VALUES ( + :milestone_id, :slice_id, :id, :title, :status, :task_status, :one_liner, :narrative, + :verification_result, :verification_status, :duration, :completed_at, :blocker_discovered, + :deviations, :known_issues, :key_files, :key_decisions, :full_summary_md, + :description, :estimate, :files, :verify, :inputs, :expected_output, :observability_impact, :sequence + ) + ON CONFLICT(milestone_id, slice_id, id) DO UPDATE SET + title = CASE WHEN NULLIF(:title, '') IS NOT NULL THEN :title ELSE tasks.title END, + status = :status, + task_status = :task_status, + one_liner = :one_liner, + narrative = :narrative, + verification_result = :verification_result, + verification_status = :verification_status, + duration = :duration, + completed_at = :completed_at, + blocker_discovered = :blocker_discovered, + deviations = :deviations, + known_issues = :known_issues, + key_files = :key_files, + key_decisions = :key_decisions, + full_summary_md = :full_summary_md, + description = CASE WHEN NULLIF(:description, '') IS NOT NULL THEN :description ELSE tasks.description END, + estimate = CASE WHEN NULLIF(:estimate, '') IS NOT NULL THEN :estimate ELSE tasks.estimate END, + files = CASE WHEN NULLIF(:files, '[]') IS NOT NULL THEN :files ELSE tasks.files END, + verify = CASE WHEN NULLIF(:verify, '') IS NOT NULL THEN :verify ELSE tasks.verify END, + inputs = CASE WHEN NULLIF(:inputs, '[]') IS NOT NULL THEN :inputs ELSE tasks.inputs END, + expected_output = CASE WHEN NULLIF(:expected_output, '[]') IS NOT NULL THEN :expected_output ELSE tasks.expected_output END, + observability_impact = CASE WHEN NULLIF(:observability_impact, '') IS NOT NULL THEN :observability_impact ELSE tasks.observability_impact END, + sequence = :sequence`) + .run({ + ":milestone_id": t.milestoneId, + ":slice_id": t.sliceId, + ":id": t.id, + ":title": t.title ?? "", + ":status": t.status ?? "pending", + ":task_status": normalizeTaskStatus(t.taskStatus ?? t.status) ?? "todo", + ":one_liner": t.oneLiner ?? "", + ":narrative": t.narrative ?? "", + ":verification_result": t.verificationResult ?? "", + ":verification_status": t.verificationStatus ?? "", + ":duration": t.duration ?? "", + ":completed_at": + t.status === "done" || t.status === "complete" + ? new Date().toISOString() + : null, + ":blocker_discovered": t.blockerDiscovered ? 1 : 0, + ":deviations": t.deviations ?? "", + ":known_issues": t.knownIssues ?? "", + ":key_files": JSON.stringify(t.keyFiles ?? []), + ":key_decisions": JSON.stringify(t.keyDecisions ?? []), + ":full_summary_md": t.fullSummaryMd ?? "", + ":description": t.planning?.description ?? "", + ":estimate": t.planning?.estimate ?? "", + ":files": JSON.stringify(t.planning?.files ?? []), + ":verify": t.planning?.verify ?? "", + ":inputs": JSON.stringify(t.planning?.inputs ?? []), + ":expected_output": JSON.stringify(t.planning?.expectedOutput ?? []), + ":observability_impact": t.planning?.observabilityImpact ?? "", + ":sequence": t.sequence ?? 0, + }); + if (hasTaskSpecIntent(t.planning)) { + insertTaskSpecIfAbsent(t.milestoneId, t.sliceId, t.id, t.planning ?? {}); + } + upsertTaskSchedulerStatus(t.milestoneId, t.sliceId, t.id, "queued", { + onlyIfAbsent: true, + }); +} + +export function insertOrIgnoreTask(args) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`INSERT OR IGNORE INTO tasks (milestone_id, slice_id, id, title, status, created_at) + VALUES (:mid, :sid, :tid, :title, 'pending', :ts)`) + .run({ + ":mid": args.milestoneId, + ":sid": args.sliceId, + ":tid": args.taskId, + ":title": args.title, + ":ts": args.createdAt, + }); +} + +export function upsertTaskSchedulerStatus( + milestoneId, + sliceId, + taskId, + status = "queued", + { onlyIfAbsent = false } = {}, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const schedulerStatus = normalizeSchedulerStatus(status) ?? "queued"; + const sql = onlyIfAbsent + ? `INSERT OR IGNORE INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :status, :updated_at + )` + : `INSERT INTO task_scheduler ( + milestone_id, slice_id, task_id, status, updated_at + ) VALUES ( + :milestone_id, :slice_id, :task_id, :status, :updated_at + ) + ON CONFLICT(milestone_id, slice_id, task_id) DO UPDATE SET + status = excluded.status, + updated_at = excluded.updated_at`; + currentDb.prepare(sql).run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":task_id": taskId, + ":status": schedulerStatus, + ":updated_at": new Date().toISOString(), + }); +} + +export function updateTaskStatus( + milestoneId, + sliceId, + taskId, + status, + completedAt, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + const taskStatus = normalizeTaskStatus(status) ?? "todo"; + currentDb + .prepare(`UPDATE tasks SET + status = :status, + completed_at = :completed_at, + task_status = :task_status + WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`) + .run({ + ":status": status, + ":completed_at": completedAt ?? null, + ":task_status": taskStatus, + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":id": taskId, + }); +} + +export function setTaskEscalationPending( + milestoneId, + sliceId, + taskId, + artifactPath, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE tasks + SET escalation_pending = 1, + escalation_awaiting_review = 0, + escalation_artifact_path = :path + WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) + .run({ + ":path": artifactPath, + ":mid": milestoneId, + ":sid": sliceId, + ":tid": taskId, + }); +} + +export function setTaskEscalationAwaitingReview( + milestoneId, + sliceId, + taskId, + artifactPath, +) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE tasks + SET escalation_awaiting_review = 1, + escalation_pending = 0, + escalation_artifact_path = :path + WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) + .run({ + ":path": artifactPath, + ":mid": milestoneId, + ":sid": sliceId, + ":tid": taskId, + }); +} + +export function clearTaskEscalationFlags(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare(`UPDATE tasks + SET escalation_pending = 0, + escalation_awaiting_review = 0 + WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); +} + +export function findUnappliedEscalationOverride(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare(`SELECT id, escalation_artifact_path + FROM tasks + WHERE milestone_id = :mid + AND slice_id = :sid + AND escalation_artifact_path IS NOT NULL + AND escalation_pending = 0 + AND escalation_awaiting_review = 0 + AND escalation_override_applied = 0 + ORDER BY sequence ASC, id ASC + LIMIT 1`) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row || !row.escalation_artifact_path) return null; + return { taskId: row.id, artifactPath: row.escalation_artifact_path }; +} + +export function claimEscalationOverride(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return false; + const result = currentDb + .prepare(`UPDATE tasks + SET escalation_override_applied = 1 + WHERE milestone_id = :mid + AND slice_id = :sid + AND id = :tid + AND escalation_override_applied = 0`) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + return (result?.changes ?? 0) > 0; +} + +export function setTaskBlockerDiscovered( + milestoneId, + sliceId, + taskId, + discovered, +) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare( + `UPDATE tasks SET blocker_discovered = :discovered WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, + ) + .run({ + ":discovered": discovered ? 1 : 0, + ":mid": milestoneId, + ":sid": sliceId, + ":tid": taskId, + }); +} + +export function upsertTaskPlanning(milestoneId, sliceId, taskId, planning) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + insertTaskSpecIfAbsent(milestoneId, sliceId, taskId, planning); + const { normalized: frontmatter, errors: fmErrors } = + taskFrontmatterFromRecord(planning); + if (fmErrors?.length) + logWarning( + "sf-db:upsertTaskPlanning", + `frontmatter validation errors for ${milestoneId}/${sliceId}/${taskId}: ${fmErrors.join(", ")}`, + ); + const hasTaskStatus = + planning.taskStatus !== undefined || + planning.task_status !== undefined || + planning.status !== undefined; + currentDb + .prepare(`UPDATE tasks SET + title = COALESCE(:title, title), + description = COALESCE(:description, description), + estimate = COALESCE(:estimate, estimate), + files = COALESCE(:files, files), + verify = COALESCE(:verify, verify), + inputs = COALESCE(:inputs, inputs), + expected_output = COALESCE(:expected_output, expected_output), + observability_impact = COALESCE(:observability_impact, observability_impact), + full_plan_md = COALESCE(:full_plan_md, full_plan_md), + risk = :risk, + mutation_scope = :mutation_scope, + verification_type = :verification_type, + plan_approval = :plan_approval, + task_status = CASE WHEN :has_task_status = 1 THEN :task_status ELSE task_status END, + estimated_effort = :estimated_effort, + dependencies = :dependencies, + blocks_parallel = :blocks_parallel, + requires_user_input = :requires_user_input, + auto_retry = :auto_retry, + max_retries = :max_retries, + frontmatter_version = :frontmatter_version + WHERE milestone_id = :milestone_id AND slice_id = :slice_id AND id = :id`) + .run({ + ":milestone_id": milestoneId, + ":slice_id": sliceId, + ":id": taskId, + ":title": planning.title ?? null, + ":description": planning.description ?? null, + ":estimate": planning.estimate ?? null, + ":files": planning.files ? JSON.stringify(planning.files) : null, + ":verify": planning.verify ?? null, + ":inputs": planning.inputs ? JSON.stringify(planning.inputs) : null, + ":expected_output": planning.expectedOutput + ? JSON.stringify(planning.expectedOutput) + : null, + ":observability_impact": planning.observabilityImpact ?? null, + ":full_plan_md": planning.fullPlanMd ?? null, + ":risk": frontmatter.risk, + ":mutation_scope": frontmatter.mutationScope, + ":verification_type": frontmatter.verification, + ":plan_approval": frontmatter.planApproval, + ":task_status": frontmatter.taskStatus, + ":has_task_status": hasTaskStatus ? 1 : 0, + ":estimated_effort": frontmatter.estimatedEffort, + ":dependencies": JSON.stringify(frontmatter.dependencies), + ":blocks_parallel": frontmatter.blocksParallel ? 1 : 0, + ":requires_user_input": frontmatter.requiresUserInput ? 1 : 0, + ":auto_retry": frontmatter.autoRetry ? 1 : 0, + ":max_retries": frontmatter.maxRetries, + ":frontmatter_version": frontmatter.frontmatterVersion, + }); + if ( + planning.schedulerStatus !== undefined || + planning.scheduler_status !== undefined + ) { + upsertTaskSchedulerStatus( + milestoneId, + sliceId, + taskId, + frontmatter.schedulerStatus, + ); + } else { + upsertTaskSchedulerStatus(milestoneId, sliceId, taskId, "queued", { + onlyIfAbsent: true, + }); + } + } + +export function getTask(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + `SELECT t.*, ts.status AS scheduler_status + FROM tasks t + LEFT JOIN task_scheduler ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + WHERE t.milestone_id = :mid AND t.slice_id = :sid AND t.id = :tid`, + ) + .get({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + if (!row) return null; + return rowToTask(row); +} + +export function setTaskSummaryMd(milestoneId, sliceId, taskId, md) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + currentDb + .prepare( + `UPDATE tasks SET full_summary_md = :md WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId, ":md": md }); +} + +export function getActiveTaskFromDb(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT * FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1", + ) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return null; + return rowToTask(row); +} + +export function getActiveTaskIdFromDb(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return null; + const row = currentDb + .prepare( + "SELECT id, status, title FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND status NOT IN ('complete', 'done') ORDER BY sequence, id LIMIT 1", + ) + .get({ ":mid": milestoneId, ":sid": sliceId }); + if (!row) return null; + return { + id: row["id"], + status: row["status"], + title: row["title"], + }; +} + +export function deleteTask(milestoneId, sliceId, taskId) { + const currentDb = _getAdapter(); + if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); + transaction(() => { + // Must delete verification_evidence first (FK constraint) + currentDb + .prepare( + `DELETE FROM verification_evidence WHERE milestone_id = :mid AND slice_id = :sid AND task_id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + currentDb + .prepare( + `DELETE FROM tasks WHERE milestone_id = :mid AND slice_id = :sid AND id = :tid`, + ) + .run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId }); + }); +} + +export function listEscalationArtifacts(milestoneId, includeResolved = false) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const filter = includeResolved + ? "escalation_artifact_path IS NOT NULL" + : "(escalation_pending = 1 OR escalation_awaiting_review = 1) AND escalation_artifact_path IS NOT NULL"; + const rows = currentDb + .prepare( + `SELECT * FROM tasks WHERE milestone_id = :mid AND ${filter} ORDER BY slice_id, sequence, id`, + ) + .all({ ":mid": milestoneId }); + return rows.map(rowToTask); +} + +export function getSliceTasks(milestoneId, sliceId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + const rows = currentDb + .prepare( + `SELECT t.*, ts.status AS scheduler_status + FROM tasks t + LEFT JOIN task_scheduler ts + ON t.milestone_id = ts.milestone_id + AND t.slice_id = ts.slice_id + AND t.id = ts.task_id + WHERE t.milestone_id = :mid AND t.slice_id = :sid + ORDER BY t.sequence, t.id`, + ) + .all({ ":mid": milestoneId, ":sid": sliceId }); + return rows.map(rowToTask); +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-uok.js b/src/resources/extensions/sf/sf-db/sf-db-uok.js new file mode 100644 index 000000000..956695361 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-uok.js @@ -0,0 +1,366 @@ +import { _getAdapter, capErrorForStorage, parseJsonObject, rowToUnitMetrics } from './sf-db-core.js'; +import { logWarning } from '../workflow-logger.js'; +import { readTraceEvents } from '../uok/trace-writer.js'; + +export function recordUokRunStart(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return; + const now = entry.startedAt ?? new Date().toISOString(); + currentDb + .prepare(`INSERT INTO uok_runs ( + run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at + ) VALUES ( + :run_id, :session_id, :path, 'started', :started_at, NULL, NULL, :flags_json, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + session_id = excluded.session_id, + path = excluded.path, + status = 'started', + started_at = excluded.started_at, + ended_at = NULL, + error = NULL, + flags_json = excluded.flags_json, + updated_at = excluded.updated_at`) + .run({ + ":run_id": entry.runId, + ":session_id": entry.sessionId ?? null, + ":path": entry.path ?? "", + ":started_at": now, + ":flags_json": JSON.stringify(entry.flags ?? {}), + ":updated_at": now, + }); +} + +export function recordUokRunExit(entry) { + const currentDb = _getAdapter(); + if (!currentDb) return; + const now = entry.endedAt ?? new Date().toISOString(); + currentDb + .prepare(`INSERT INTO uok_runs ( + run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at + ) VALUES ( + :run_id, :session_id, :path, :status, :started_at, :ended_at, :error, :flags_json, :updated_at + ) + ON CONFLICT(run_id) DO UPDATE SET + session_id = COALESCE(excluded.session_id, uok_runs.session_id), + path = CASE WHEN excluded.path = '' THEN uok_runs.path ELSE excluded.path END, + status = excluded.status, + ended_at = excluded.ended_at, + error = excluded.error, + flags_json = CASE WHEN excluded.flags_json = '{}' THEN uok_runs.flags_json ELSE excluded.flags_json END, + updated_at = excluded.updated_at`) + .run({ + ":run_id": entry.runId, + ":session_id": entry.sessionId ?? null, + ":path": entry.path ?? "", + ":status": entry.status ?? "ok", + ":started_at": entry.startedAt ?? now, + ":ended_at": now, + ":error": entry.error + ? capErrorForStorage(entry.error, entry.runId) + : null, + ":flags_json": JSON.stringify(entry.flags ?? {}), + ":updated_at": now, + }); +} + +export function getUokRuns(limit = 500) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + return currentDb + .prepare( + `SELECT run_id, session_id, path, status, started_at, ended_at, error, flags_json, updated_at + FROM uok_runs + ORDER BY started_at DESC + LIMIT :limit`, + ) + .all({ ":limit": limit }) + .map((row) => ({ + runId: row.run_id, + sessionId: row.session_id, + path: row.path, + status: row.status, + startedAt: row.started_at, + endedAt: row.ended_at, + error: row.error, + flags: (() => { + try { + return JSON.parse(row.flags_json || "{}"); + } catch { + return {}; + } + })(), + updatedAt: row.updated_at, + })); +} + +export function insertAuditEvent(_entry) { + // no-op: audit events now written exclusively to JSONL files +} + +export function insertUokMessage(msg) { + const currentDb = _getAdapter(); + if (!currentDb) return; + currentDb + .prepare( + `INSERT OR IGNORE INTO uok_messages (id, from_agent, to_agent, body, metadata_json, sent_at, delivered_at) + VALUES (:id, :from_agent, :to_agent, :body, :metadata_json, :sent_at, :delivered_at)`, + ) + .run({ + ":id": msg.id, + ":from_agent": msg.from, + ":to_agent": msg.to, + ":body": msg.body ?? "", + ":metadata_json": JSON.stringify(msg.metadata ?? {}), + ":sent_at": msg.sentAt, + ":delivered_at": msg.deliveredAt ?? null, + }); +} + +export function getUokMessagesForAgent( + agentId, + limit = 1000, + unreadOnly = false, +) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + try { + let sql = `SELECT m.id, m.from_agent AS "from", m.to_agent AS "to", m.body, m.metadata_json AS metadataJson, m.sent_at AS sentAt, m.delivered_at AS deliveredAt, + CASE WHEN r.agent_id IS NOT NULL THEN 1 ELSE 0 END AS read + FROM uok_messages m + LEFT JOIN uok_message_reads r ON r.message_id = m.id AND r.agent_id = :agent_id + WHERE m.to_agent = :agent_id`; + if (unreadOnly) { + sql += " AND r.agent_id IS NULL"; + } + sql += " ORDER BY m.sent_at ASC LIMIT :limit"; + const rows = currentDb.prepare(sql).all({ + ":agent_id": agentId, + ":limit": Math.max(1, Math.min(10_000, Number(limit) || 1000)), + }); + return rows.map((r) => ({ + id: r.id, + from: r.from, + to: r.to, + body: r.body, + metadata: parseJsonObject(r.metadataJson, {}), + sentAt: r.sentAt, + deliveredAt: r.deliveredAt, + read: !!r.read, + })); + } catch { + return []; + } +} + +export function getUokConversation(agentA, agentB, limit = 1000) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + try { + const rows = currentDb + .prepare( + `SELECT id, from_agent AS "from", to_agent AS "to", body, metadata_json AS metadataJson, sent_at AS sentAt, delivered_at AS deliveredAt + FROM uok_messages + WHERE (from_agent = :a AND to_agent = :b) OR (from_agent = :b AND to_agent = :a) + ORDER BY sent_at DESC + LIMIT :limit`, + ) + .all({ ":a": agentA, ":b": agentB, ":limit": limit }); + return rows.map((r) => ({ + id: r.id, + from: r.from, + to: r.to, + body: r.body, + metadata: parseJsonObject(r.metadataJson, {}), + sentAt: r.sentAt, + deliveredAt: r.deliveredAt, + })); + } catch { + return []; + } +} + +export function markUokMessageRead(messageId, agentId) { + const currentDb = _getAdapter(); + if (!currentDb) return false; + try { + currentDb + .prepare( + `INSERT OR IGNORE INTO uok_message_reads (message_id, agent_id, read_at) VALUES (:message_id, :agent_id, :read_at)`, + ) + .run({ + ":message_id": messageId, + ":agent_id": agentId, + ":read_at": new Date().toISOString(), + }); + return true; + } catch { + return false; + } +} + +export function getUokMessageUnreadCount(agentId) { + const currentDb = _getAdapter(); + if (!currentDb) return 0; + try { + const row = currentDb + .prepare( + `SELECT COUNT(*) AS cnt FROM uok_messages m + WHERE m.to_agent = :agent_id + AND NOT EXISTS ( + SELECT 1 FROM uok_message_reads r + WHERE r.message_id = m.id AND r.agent_id = :agent_id + )`, + ) + .get({ ":agent_id": agentId }); + return row?.cnt ?? 0; + } catch { + return 0; + } +} + +export function compactUokMessages(retentionDays) { + const currentDb = _getAdapter(); + if (!currentDb) return { before: 0, after: 0 }; + try { + const cutoff = new Date( + Date.now() - retentionDays * 24 * 60 * 60 * 1000, + ).toISOString(); + const beforeRow = currentDb + .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") + .get(); + currentDb + .prepare("DELETE FROM uok_messages WHERE sent_at < :cutoff") + .run({ ":cutoff": cutoff }); + const afterRow = currentDb + .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") + .get(); + return { before: beforeRow?.cnt ?? 0, after: afterRow?.cnt ?? 0 }; + } catch { + return { before: 0, after: 0 }; + } +} + +export function getUokMessageReadIds(agentId) { + const currentDb = _getAdapter(); + if (!currentDb) return []; + try { + const rows = currentDb + .prepare( + "SELECT message_id FROM uok_message_reads WHERE agent_id = :agent_id", + ) + .all({ ":agent_id": agentId }); + return rows.map((r) => r.message_id); + } catch { + return []; + } +} + +export function getUokMessageBusMetrics() { + const currentDb = _getAdapter(); + if (!currentDb) { + return { + totalMessages: 0, + totalUnread: 0, + uniqueAgents: 0, + uniqueConversations: 0, + }; + } + try { + const totalRow = currentDb + .prepare("SELECT COUNT(*) AS cnt FROM uok_messages") + .get(); + const unreadRow = currentDb + .prepare( + `SELECT COUNT(*) AS cnt FROM uok_messages m + WHERE NOT EXISTS ( + SELECT 1 FROM uok_message_reads r + WHERE r.message_id = m.id + AND r.agent_id = m.to_agent + )`, + ) + .get(); + const agentsRow = currentDb + .prepare(`SELECT COUNT(DISTINCT to_agent) AS cnt FROM uok_messages`) + .get(); + const convRow = currentDb + .prepare( + `SELECT COUNT(DISTINCT from_agent || ':' || to_agent) AS cnt FROM uok_messages`, + ) + .get(); + return { + totalMessages: totalRow?.cnt ?? 0, + totalUnread: unreadRow?.cnt ?? 0, + uniqueAgents: agentsRow?.cnt ?? 0, + uniqueConversations: convRow?.cnt ?? 0, + }; + } catch { + return { + totalMessages: 0, + totalUnread: 0, + uniqueAgents: 0, + uniqueConversations: 0, + }; + } +} + +export function upsertUnitMetrics(db, unit) { + db.prepare( + `INSERT OR REPLACE INTO unit_metrics ( + type, id, started_at, finished_at, model, auto_session_key, + tokens_input, tokens_output, tokens_cache_read, tokens_cache_write, tokens_total, + cost, tool_calls, assistant_messages, user_messages, api_requests, + tier, model_downgraded, context_window_tokens, truncation_sections, + continue_here_fired, prompt_char_count, baseline_char_count, cache_hit_rate, skills + ) VALUES ( + :type, :id, :started_at, :finished_at, :model, :auto_session_key, + :tokens_input, :tokens_output, :tokens_cache_read, :tokens_cache_write, :tokens_total, + :cost, :tool_calls, :assistant_messages, :user_messages, :api_requests, + :tier, :model_downgraded, :context_window_tokens, :truncation_sections, + :continue_here_fired, :prompt_char_count, :baseline_char_count, :cache_hit_rate, :skills + )`, + ).run({ + ":type": unit.type, + ":id": unit.id, + ":started_at": unit.startedAt, + ":finished_at": unit.finishedAt, + ":model": unit.model, + ":auto_session_key": unit.autoSessionKey ?? null, + ":tokens_input": unit.tokens.input, + ":tokens_output": unit.tokens.output, + ":tokens_cache_read": unit.tokens.cacheRead, + ":tokens_cache_write": unit.tokens.cacheWrite, + ":tokens_total": unit.tokens.total, + ":cost": unit.cost, + ":tool_calls": unit.toolCalls, + ":assistant_messages": unit.assistantMessages, + ":user_messages": unit.userMessages, + ":api_requests": unit.apiRequests ?? unit.assistantMessages, + ":tier": unit.tier ?? null, + ":model_downgraded": + unit.modelDowngraded != null ? (unit.modelDowngraded ? 1 : 0) : null, + ":context_window_tokens": unit.contextWindowTokens ?? null, + ":truncation_sections": unit.truncationSections ?? null, + ":continue_here_fired": + unit.continueHereFired != null ? (unit.continueHereFired ? 1 : 0) : null, + ":prompt_char_count": unit.promptCharCount ?? null, + ":baseline_char_count": unit.baselineCharCount ?? null, + ":cache_hit_rate": unit.cacheHitRate ?? null, + ":skills": unit.skills != null ? JSON.stringify(unit.skills) : null, + }); +} + +export function getAllUnitMetrics(db) { + return db + .prepare("SELECT * FROM unit_metrics ORDER BY started_at ASC") + .all() + .map(rowToUnitMetrics); +} + +export function pruneUnitMetrics(db, keepCount) { + db.prepare( + `DELETE FROM unit_metrics WHERE rowid NOT IN ( + SELECT rowid FROM unit_metrics ORDER BY finished_at DESC LIMIT :keepCount + )`, + ).run({ ":keepCount": keepCount }); +} diff --git a/src/resources/extensions/sf/sf-db/sf-db-worktree.js b/src/resources/extensions/sf/sf-db/sf-db-worktree.js new file mode 100644 index 000000000..67adbb941 --- /dev/null +++ b/src/resources/extensions/sf/sf-db/sf-db-worktree.js @@ -0,0 +1,265 @@ +import { _getAdapter, openDatabase } from './sf-db-core.js'; +import { copyFileSync, existsSync, mkdirSync, realpathSync } from 'node:fs'; +import { dirname } from 'node:path'; +import { logError, logWarning } from '../workflow-logger.js'; + +export function copyWorktreeDb(srcDbPath, destDbPath) { + try { + if (!existsSync(srcDbPath)) return false; + const destDir = dirname(destDbPath); + mkdirSync(destDir, { recursive: true }); + copyFileSync(srcDbPath, destDbPath); + return true; + } catch (err) { + logError("db", "failed to copy DB to worktree", { + error: err.message, + }); + return false; + } +} + +export function reconcileWorktreeDb(mainDbPath, worktreeDbPath) { + const zero = { + decisions: 0, + requirements: 0, + artifacts: 0, + milestones: 0, + slices: 0, + tasks: 0, + memories: 0, + verification_evidence: 0, + conflicts: [], + }; + if (!existsSync(worktreeDbPath)) return zero; + // Guard: bail when both paths resolve to the same physical file. + // ATTACHing a WAL-mode DB to itself corrupts the WAL (#2823). + try { + if (realpathSync(mainDbPath) === realpathSync(worktreeDbPath)) return zero; + } catch (e) { + logWarning("db", `realpathSync failed: ${e.message}`); + } + // Sanitize path: reject any characters that could break ATTACH syntax. + // ATTACH DATABASE doesn't support parameterized paths in all providers, + // so we use strict allowlist validation instead. + if (/['";\x00]/.test(worktreeDbPath)) { + logError( + "db", + "worktree DB reconciliation failed: path contains unsafe characters", + ); + return zero; + } + if (!_getAdapter()) { + const opened = openDatabase(mainDbPath); + if (!opened) { + logError("db", "worktree DB reconciliation failed: cannot open main DB"); + return zero; + } + } + const adapter = _getAdapter(); + const conflicts = []; + try { + adapter.exec(`ATTACH DATABASE '${worktreeDbPath}' AS wt`); + try { + const wtInfo = adapter.prepare("PRAGMA wt.table_info('decisions')").all(); + const hasMadeBy = wtInfo.some((col) => col["name"] === "made_by"); + const wtMilestoneInfo = adapter + .prepare("PRAGMA wt.table_info('milestones')") + .all(); + const hasProductResearch = wtMilestoneInfo.some( + (col) => col["name"] === "product_research_json", + ); + const decConf = adapter + .prepare( + `SELECT m.id FROM decisions m INNER JOIN wt.decisions w ON m.id = w.id WHERE m.decision != w.decision OR m.choice != w.choice OR m.rationale != w.rationale OR ${hasMadeBy ? "m.made_by != w.made_by" : "'agent' != 'agent'"} OR m.superseded_by IS NOT w.superseded_by`, + ) + .all(); + for (const row of decConf) + conflicts.push(`decision ${row["id"]}: modified in both`); + const reqConf = adapter + .prepare( + `SELECT m.id FROM requirements m INNER JOIN wt.requirements w ON m.id = w.id WHERE m.description != w.description OR m.status != w.status OR m.notes != w.notes OR m.superseded_by IS NOT w.superseded_by`, + ) + .all(); + for (const row of reqConf) + conflicts.push(`requirement ${row["id"]}: modified in both`); + const merged = { + decisions: 0, + requirements: 0, + artifacts: 0, + milestones: 0, + slices: 0, + tasks: 0, + memories: 0, + verification_evidence: 0, + }; + function countChanges(result) { + return typeof result === "object" && result !== null + ? (result.changes ?? 0) + : 0; + } + adapter.exec("BEGIN"); + try { + merged.decisions = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO decisions ( + id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by + ) + SELECT id, when_context, scope, decision, choice, rationale, revisable, ${hasMadeBy ? "made_by" : "'agent'"}, superseded_by FROM wt.decisions + `) + .run(), + ); + merged.requirements = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO requirements ( + id, class, status, description, why, source, primary_owner, + supporting_slices, validation, notes, full_content, superseded_by + ) + SELECT id, class, status, description, why, source, primary_owner, + supporting_slices, validation, notes, full_content, superseded_by + FROM wt.requirements + `) + .run(), + ); + merged.artifacts = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO artifacts ( + path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at + ) + SELECT path, artifact_type, milestone_id, slice_id, task_id, full_content, imported_at + FROM wt.artifacts + `) + .run(), + ); + // Merge milestones — worktree may have updated status/planning fields + merged.milestones = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO milestones ( + id, title, status, depends_on, created_at, completed_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, product_research_json + ) + SELECT id, title, status, depends_on, created_at, completed_at, + vision, success_criteria, key_risks, proof_strategy, + verification_contract, verification_integration, verification_operational, verification_uat, + definition_of_done, requirement_coverage, boundary_map_markdown, vision_meeting_json, ${hasProductResearch ? "product_research_json" : "''"} + FROM wt.milestones + `) + .run(), + ); + // Merge slices — preserve worktree progress but never downgrade completed status (#2558). + // Uses INSERT OR REPLACE with a subquery that picks the best status — if the main DB + // already has a completed slice, keep that status even if the worktree copy is stale. + merged.slices = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO slices ( + milestone_id, id, title, status, risk, depends, demo, created_at, completed_at, + full_summary_md, full_uat_md, goal, success_criteria, proof_level, + integration_closure, observability_impact, adversarial_partner, adversarial_combatant, + adversarial_architect, planning_meeting_json, sequence, replan_triggered_at + ) + SELECT w.milestone_id, w.id, w.title, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.status ELSE w.status + END, + w.risk, w.depends, w.demo, w.created_at, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.completed_at ELSE w.completed_at + END, + w.full_summary_md, w.full_uat_md, w.goal, w.success_criteria, w.proof_level, + w.integration_closure, w.observability_impact, w.adversarial_partner, w.adversarial_combatant, + w.adversarial_architect, w.planning_meeting_json, w.sequence, w.replan_triggered_at + FROM wt.slices w + LEFT JOIN slices m ON m.milestone_id = w.milestone_id AND m.id = w.id + `) + .run(), + ); + // Merge tasks — preserve execution results, never downgrade completed status (#2558) + merged.tasks = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO tasks ( + milestone_id, slice_id, id, title, status, one_liner, narrative, + verification_result, duration, completed_at, blocker_discovered, + deviations, known_issues, key_files, key_decisions, full_summary_md, + description, estimate, files, verify, inputs, expected_output, + observability_impact, full_plan_md, sequence + ) + SELECT w.milestone_id, w.slice_id, w.id, w.title, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.status ELSE w.status + END, + w.one_liner, w.narrative, + w.verification_result, w.duration, + CASE + WHEN m.status IN ('complete', 'done') AND w.status NOT IN ('complete', 'done') + THEN m.completed_at ELSE w.completed_at + END, + w.blocker_discovered, + w.deviations, w.known_issues, w.key_files, w.key_decisions, w.full_summary_md, + w.description, w.estimate, w.files, w.verify, w.inputs, w.expected_output, + w.observability_impact, w.full_plan_md, w.sequence + FROM wt.tasks w + LEFT JOIN tasks m ON m.milestone_id = w.milestone_id AND m.slice_id = w.slice_id AND m.id = w.id + `) + .run(), + ); + // Merge memories — keep worktree-learned insights + merged.memories = countChanges( + adapter + .prepare(` + INSERT OR REPLACE INTO memories ( + seq, id, category, content, confidence, source_unit_type, source_unit_id, + created_at, updated_at, superseded_by, hit_count + ) + SELECT seq, id, category, content, confidence, source_unit_type, source_unit_id, + created_at, updated_at, superseded_by, hit_count + FROM wt.memories + `) + .run(), + ); + // Merge verification evidence — append-only, use INSERT OR IGNORE to avoid duplicates + merged.verification_evidence = countChanges( + adapter + .prepare(` + INSERT OR IGNORE INTO verification_evidence ( + task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at + ) + SELECT task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at + FROM wt.verification_evidence + `) + .run(), + ); + adapter.exec("COMMIT"); + } catch (txErr) { + try { + adapter.exec("ROLLBACK"); + } catch (e) { + logWarning("db", `rollback failed: ${e.message}`); + } + throw txErr; + } + return { ...merged, conflicts }; + } finally { + try { + adapter.exec("DETACH DATABASE wt"); + } catch (e) { + logWarning("db", `detach worktree DB failed: ${e.message}`); + } + } + } catch (err) { + logError("db", "worktree DB reconciliation failed", { + error: err.message, + }); + return { ...zero, conflicts }; + } +} +