fix(web/inspect): read live .sf/sf.db SQLite instead of obsolete sf-db.json
Some checks are pending
CI / detect-changes (push) Waiting to run
CI / docs-check (push) Blocked by required conditions
CI / lint (push) Blocked by required conditions
CI / build (push) Blocked by required conditions
CI / integration-tests (push) Blocked by required conditions
CI / windows-portability (push) Blocked by required conditions
CI / rtk-portability (linux, blacksmith-4vcpu-ubuntu-2404) (push) Blocked by required conditions
CI / rtk-portability (macos, macos-15) (push) Blocked by required conditions
CI / rtk-portability (windows, blacksmith-4vcpu-windows-2025) (push) Blocked by required conditions

The earlier collectInspectData read .sf/sf-db.json, a JSON projection
file SF stopped generating after the DB-first runtime landed.
.sf/sf-db.json no longer exists in any modern repo (verified absent
in this checkout), so /api/inspect was returning an empty payload
every time.

Replace with a read-only node:sqlite query against the live database:
  - schemaVersion via MAX(version) FROM schema_version
  - counts from COUNT(*) FROM {decisions,requirements,artifacts}
  - recentDecisions ordered by decisions.seq DESC LIMIT 5
  - recentRequirements ordered by requirements.id DESC LIMIT 5

The DB is opened readOnly so the autonomous loop's writer lock isn't
contested, and any failure (corrupt / locked / schema-drift) returns
an empty payload instead of 500-ing so the operator endpoint stays
available.

This is the small surgical half of the broader web-sf-information-
drift gap: web has no API surfaces for self-feedback, memories,
reflection reports, or uok_messages bus state. That broader integration
work is filed as a separate self-feedback entry for slice planning.

Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
Mikael Hugo 2026-05-17 00:18:46 +02:00
parent bde55dfc87
commit 6481e54fec

View file

@ -1,11 +1,20 @@
import { existsSync, readFileSync } from "node:fs";
import { existsSync } from "node:fs";
import { join } from "node:path";
import { DatabaseSync } from "node:sqlite";
import type { InspectData } from "../../web/lib/remaining-command-types.ts";
import { resolveBridgeRuntimeConfig } from "./bridge-service.ts";
/**
* Collects project inspection data by reading sf-db.json directly.
* No child process needed sf-db.json is plain JSON with no .js imports.
* Collects project inspection data by reading the live .sf/sf.db SQLite
* database (decisions, requirements, artifacts, schema_version). The
* earlier implementation read a now-obsolete .sf/sf-db.json projection
* file that SF stopped generating when the DB-first runtime landed
* the endpoint returned an empty payload on every modern repo because
* the JSON file no longer exists.
*
* Read-only mode; never writes to the DB. The autonomous loop holds the
* WAL writer; this query path uses readOnly so concurrent reads are
* safe without contention.
*/
export async function collectInspectData(
projectCwdOverride?: string,
@ -13,50 +22,80 @@ export async function collectInspectData(
const config = resolveBridgeRuntimeConfig(undefined, projectCwdOverride);
const { projectCwd } = config;
const sfDir = join(projectCwd, ".sf");
const dbPath = join(sfDir, "sf-db.json");
const dbPath = join(projectCwd, ".sf", "sf.db");
let schemaVersion: number | null = null;
let decisions: Array<{
id: string;
decision: string;
choice: string;
[k: string]: unknown;
}> = [];
let requirements: Array<{
id: string;
status: string;
description: string;
[k: string]: unknown;
}> = [];
let artifacts: unknown[] = [];
if (existsSync(dbPath)) {
try {
const db = JSON.parse(readFileSync(dbPath, "utf-8"));
schemaVersion = db.schema_version ?? null;
decisions = db.decisions || [];
requirements = db.requirements || [];
artifacts = db.artifacts || [];
} catch {
// Corrupt or unreadable — return empty state
}
if (!existsSync(dbPath)) {
return {
schemaVersion: null,
counts: { decisions: 0, requirements: 0, artifacts: 0 },
recentDecisions: [],
recentRequirements: [],
};
}
return {
schemaVersion,
counts: {
decisions: decisions.length,
requirements: requirements.length,
artifacts: artifacts.length,
},
recentDecisions: decisions
.slice(-5)
.reverse()
.map((d) => ({ id: d.id, decision: d.decision, choice: d.choice })),
recentRequirements: requirements
.slice(-5)
.reverse()
.map((r) => ({ id: r.id, status: r.status, description: r.description })),
};
let db: DatabaseSync | null = null;
try {
db = new DatabaseSync(dbPath, { readOnly: true });
const schemaRow = db
.prepare("SELECT MAX(version) AS v FROM schema_version")
.get() as { v: number | null } | undefined;
const schemaVersion = schemaRow?.v ?? null;
const decisionsCount = (
db.prepare("SELECT COUNT(*) AS n FROM decisions").get() as {
n: number;
}
).n;
const requirementsCount = (
db.prepare("SELECT COUNT(*) AS n FROM requirements").get() as {
n: number;
}
).n;
const artifactsCount = (
db.prepare("SELECT COUNT(*) AS n FROM artifacts").get() as {
n: number;
}
).n;
const recentDecisions = db
.prepare(
"SELECT id, decision, choice FROM decisions ORDER BY seq DESC LIMIT 5",
)
.all() as Array<{ id: string; decision: string; choice: string }>;
// requirements has no monotonic seq column — order by id descending,
// which matches the R001/R002/.../R0NN naming so newest first.
const recentRequirements = db
.prepare(
"SELECT id, status, description FROM requirements ORDER BY id DESC LIMIT 5",
)
.all() as Array<{ id: string; status: string; description: string }>;
return {
schemaVersion,
counts: {
decisions: decisionsCount,
requirements: requirementsCount,
artifacts: artifactsCount,
},
recentDecisions,
recentRequirements,
};
} catch {
// Corrupt / locked / schema-drift — return empty payload rather than
// 500 so the operator-facing endpoint stays available.
return {
schemaVersion: null,
counts: { decisions: 0, requirements: 0, artifacts: 0 },
recentDecisions: [],
recentRequirements: [],
};
} finally {
try {
db?.close();
} catch {
// best-effort
}
}
}