refactor(gsd): enforce single-writer invariant for engine DB
Route every INSERT/UPDATE/DELETE/REPLACE against .gsd/gsd.db through typed wrappers in gsd-db.ts and add a structural test that fails CI if a new bypass appears. Previously 13 call sites across 10 modules reached into _getAdapter() and issued raw write SQL, making the "single writer" architecture unenforceable in-process. New wrappers in gsd-db.ts: deleteDecisionById, deleteRequirementById, deleteArtifactByPath, clearEngineHierarchy, insertOrIgnoreSlice, insertOrIgnoreTask, setSliceReplanTriggeredAt, upsertQualityGate, restoreManifest, bulkInsertLegacyHierarchy, readTransaction, and eight memory-store helpers (insertMemoryRow, rewriteMemoryId, etc). workflow-manifest.restore() is lifted verbatim into gsd-db.restoreManifest with a type-only import of StateManifest to avoid circular runtime deps. tools/workflow-tool-executors and workflow-manifest.snapshotState swap their manual BEGIN DEFERRED/COMMIT/ROLLBACK dance for readTransaction(). unit-ownership.ts stays outside the invariant: it writes to a separate .gsd/unit-claims.db by design. tests/single-writer-invariant.test.ts walks every .ts file under gsd/ (excluding tests/ and the allowlist) and fails with a grouped violations list on any regex match for .prepare/.exec raw writes, plus a positive assertion that gsd-db.ts still exports each expected wrapper so the structural test can't silently become a no-op. https://claude.ai/code/session_01FZgXD3bjcddoFYsTEY6JhC
This commit is contained in:
parent
2e53b3cbad
commit
5e9196e5c9
12 changed files with 744 additions and 258 deletions
|
|
@ -488,7 +488,7 @@ export async function handleCleanupProjects(args: string, ctx: ExtensionCommandC
|
|||
* Prints counts of recovered items and the resulting project phase.
|
||||
*/
|
||||
export async function handleRecover(ctx: ExtensionCommandContext, basePath: string): Promise<void> {
|
||||
const { isDbAvailable: dbAvailable, _getAdapter, transaction: dbTransaction } = await import("./gsd-db.js");
|
||||
const { isDbAvailable: dbAvailable, clearEngineHierarchy, transaction: dbTransaction } = await import("./gsd-db.js");
|
||||
const { migrateHierarchyToDb } = await import("./md-importer.js");
|
||||
const { invalidateStateCache } = await import("./state.js");
|
||||
|
||||
|
|
@ -498,12 +498,12 @@ export async function handleRecover(ctx: ExtensionCommandContext, basePath: stri
|
|||
}
|
||||
|
||||
try {
|
||||
// 1. Delete + re-populate inside a single transaction for atomicity
|
||||
const db = _getAdapter()!;
|
||||
// 1. Delete + re-populate inside a single transaction for atomicity.
|
||||
// clearEngineHierarchy() uses transaction() internally but transaction()
|
||||
// is re-entrant, so wrapping in dbTransaction() keeps the whole
|
||||
// clear+repopulate atomic.
|
||||
const counts = dbTransaction(() => {
|
||||
db.exec("DELETE FROM tasks");
|
||||
db.exec("DELETE FROM slices");
|
||||
db.exec("DELETE FROM milestones");
|
||||
clearEngineHierarchy();
|
||||
return migrateHierarchyToDb(basePath);
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -346,8 +346,7 @@ export async function saveRequirementToDb(
|
|||
} catch (diskErr) {
|
||||
logError('manifest', 'disk write failed, rolling back DB row', { fn: 'saveRequirementToDb', error: String((diskErr as Error).message) });
|
||||
try {
|
||||
const rollbackAdapter = db._getAdapter();
|
||||
rollbackAdapter?.prepare('DELETE FROM requirements WHERE id = :id').run({ ':id': id });
|
||||
db.deleteRequirementById(id);
|
||||
} catch (rollbackErr) {
|
||||
logError('manifest', 'SPLIT BRAIN: disk write failed AND DB rollback failed — DB has orphaned row', { fn: 'saveRequirementToDb', id, error: String((rollbackErr as Error).message) });
|
||||
}
|
||||
|
|
@ -471,7 +470,7 @@ export async function saveDecisionToDb(
|
|||
} catch (diskErr) {
|
||||
logError('manifest', 'disk write failed, rolling back DB row', { fn: 'saveDecisionToDb', error: String((diskErr as Error).message) });
|
||||
try {
|
||||
adapter?.prepare('DELETE FROM decisions WHERE id = :id').run({ ':id': id });
|
||||
db.deleteDecisionById(id);
|
||||
} catch (rollbackErr) {
|
||||
logError('manifest', 'SPLIT BRAIN: disk write failed AND DB rollback failed — DB has orphaned row', { fn: 'saveDecisionToDb', id, error: String((rollbackErr as Error).message) });
|
||||
}
|
||||
|
|
@ -714,8 +713,7 @@ export async function saveArtifactToDb(
|
|||
await saveFile(fullPath, opts.content);
|
||||
} catch (diskErr) {
|
||||
logError('manifest', 'disk write failed, rolling back DB row', { fn: 'saveArtifactToDb', error: String((diskErr as Error).message) });
|
||||
const rollbackAdapter = db._getAdapter();
|
||||
rollbackAdapter?.prepare('DELETE FROM artifacts WHERE path = :path').run({ ':path': opts.path });
|
||||
db.deleteArtifactByPath(opts.path);
|
||||
throw diskErr;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,21 @@
|
|||
//
|
||||
// Exposes a unified sync API for decisions and requirements storage.
|
||||
// Schema is initialized on first open with WAL mode for file-backed DBs.
|
||||
//
|
||||
// ─── Single-writer invariant ─────────────────────────────────────────────
|
||||
// This file is the ONLY place in the codebase that issues write SQL
|
||||
// (INSERT / UPDATE / DELETE / REPLACE / BEGIN-COMMIT transactions) against
|
||||
// the engine database at `.gsd/gsd.db`. All other modules must call the
|
||||
// typed wrappers exported here. The structural test
|
||||
// `tests/single-writer-invariant.test.ts` fails CI if a new bypass appears.
|
||||
//
|
||||
// `_getAdapter()` is retained for read-only SELECTs in query modules
|
||||
// (context-store, memory-store queries, doctor checks, projections).
|
||||
// Do NOT use it for writes — add a wrapper here instead.
|
||||
//
|
||||
// The separate `.gsd/unit-claims.db` managed by `unit-ownership.ts` is an
|
||||
// intentionally independent store for cross-worktree claim races and is
|
||||
// excluded from this invariant.
|
||||
|
||||
import { createRequire } from "node:module";
|
||||
import { existsSync, copyFileSync, mkdirSync, realpathSync } from "node:fs";
|
||||
|
|
@ -12,6 +27,10 @@ import type { Decision, Requirement, GateRow, GateId, GateScope, GateStatus, Gat
|
|||
import { GSDError, GSD_STALE_STATE } from "./errors.js";
|
||||
import { getGateIdsForTurn, type OwnerTurn } from "./gate-registry.js";
|
||||
import { logError, logWarning } from "./workflow-logger.js";
|
||||
// Type-only import to avoid a circular runtime dep. The runtime side of
|
||||
// workflow-manifest.ts depends on this file, but the StateManifest type is
|
||||
// pure structure with no runtime coupling.
|
||||
import type { StateManifest } from "./workflow-manifest.js";
|
||||
|
||||
const _require = createRequire(import.meta.url);
|
||||
|
||||
|
|
@ -922,6 +941,39 @@ export function transaction<T>(fn: () => T): T {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrap a block of reads in a DEFERRED transaction so that all SELECTs observe
|
||||
* a consistent snapshot of the DB even if a concurrent writer commits between
|
||||
* them. Use this for multi-query read flows (e.g. tool executors that query
|
||||
* milestone + slices + counts and want one snapshot). Re-entrant — if already
|
||||
* inside a transaction, runs fn() without starting a nested one.
|
||||
*/
|
||||
export function readTransaction<T>(fn: () => T): T {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
|
||||
if (_txDepth > 0) {
|
||||
_txDepth++;
|
||||
try {
|
||||
return fn();
|
||||
} finally {
|
||||
_txDepth--;
|
||||
}
|
||||
}
|
||||
|
||||
_txDepth++;
|
||||
currentDb.exec("BEGIN DEFERRED");
|
||||
try {
|
||||
const result = fn();
|
||||
currentDb.exec("COMMIT");
|
||||
return result;
|
||||
} catch (err) {
|
||||
try { currentDb.exec("ROLLBACK"); } catch { /* swallow */ }
|
||||
throw err;
|
||||
} finally {
|
||||
_txDepth--;
|
||||
}
|
||||
}
|
||||
|
||||
export function insertDecision(d: Omit<Decision, "seq">): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
|
|
@ -2451,3 +2503,409 @@ export function getPendingGateCountForTurn(
|
|||
): number {
|
||||
return getPendingGatesForTurn(milestoneId, sliceId, turn).length;
|
||||
}
|
||||
|
||||
// ─── Single-writer bypass wrappers ───────────────────────────────────────
|
||||
// These wrappers exist so modules outside this file never need to call
|
||||
// `_getAdapter()` for writes. Each one is a byte-equivalent replacement for
|
||||
// a raw prepare/run previously issued from another module. Keep them
|
||||
// minimal and direct — they exist to hold SQL text in one place, not to
|
||||
// add new behavior.
|
||||
|
||||
/** Delete a decision row by id. Used by db-writer.ts rollback on disk-write failure. */
|
||||
export function deleteDecisionById(id: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare("DELETE FROM decisions WHERE id = :id").run({ ":id": id });
|
||||
}
|
||||
|
||||
/** Delete a requirement row by id. Used by db-writer.ts rollback on disk-write failure. */
|
||||
export function deleteRequirementById(id: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare("DELETE FROM requirements WHERE id = :id").run({ ":id": id });
|
||||
}
|
||||
|
||||
/** Delete an artifact row by path. Used by db-writer.ts rollback on disk-write failure. */
|
||||
export function deleteArtifactByPath(path: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare("DELETE FROM artifacts WHERE path = :path").run({ ":path": path });
|
||||
}
|
||||
|
||||
/**
|
||||
* Drop all rows from tasks/slices/milestones in dependency order inside a
|
||||
* transaction. Used by `gsd recover` to rebuild engine state from markdown.
|
||||
*/
|
||||
export function clearEngineHierarchy(): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
transaction(() => {
|
||||
currentDb!.exec("DELETE FROM tasks");
|
||||
currentDb!.exec("DELETE FROM slices");
|
||||
currentDb!.exec("DELETE FROM milestones");
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* INSERT OR IGNORE a slice during event replay (workflow-reconcile.ts).
|
||||
* Strict insert-or-ignore semantics are required here to avoid the
|
||||
* `insertSlice` ON CONFLICT path that could downgrade an already-completed
|
||||
* slice back to 'pending'.
|
||||
*/
|
||||
export function insertOrIgnoreSlice(args: {
|
||||
milestoneId: string;
|
||||
sliceId: string;
|
||||
title: string;
|
||||
createdAt: string;
|
||||
}): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`INSERT OR IGNORE INTO slices (milestone_id, id, title, status, created_at)
|
||||
VALUES (:mid, :sid, :title, 'pending', :ts)`,
|
||||
).run({
|
||||
":mid": args.milestoneId,
|
||||
":sid": args.sliceId,
|
||||
":title": args.title,
|
||||
":ts": args.createdAt,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* INSERT OR IGNORE a task during event replay (workflow-reconcile.ts).
|
||||
* Same rationale as `insertOrIgnoreSlice`.
|
||||
*/
|
||||
export function insertOrIgnoreTask(args: {
|
||||
milestoneId: string;
|
||||
sliceId: string;
|
||||
taskId: string;
|
||||
title: string;
|
||||
createdAt: string;
|
||||
}): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`INSERT OR IGNORE INTO tasks (milestone_id, slice_id, id, title, status, created_at)
|
||||
VALUES (:mid, :sid, :tid, :title, 'pending', :ts)`,
|
||||
).run({
|
||||
":mid": args.milestoneId,
|
||||
":sid": args.sliceId,
|
||||
":tid": args.taskId,
|
||||
":title": args.title,
|
||||
":ts": args.createdAt,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stamp the `replan_triggered_at` column on a slice. Used by triage-resolution
|
||||
* when a user capture requests a replan so the dispatcher can detect the
|
||||
* trigger via DB in addition to the on-disk REPLAN-TRIGGER.md marker.
|
||||
*/
|
||||
export function setSliceReplanTriggeredAt(milestoneId: string, sliceId: string, ts: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
"UPDATE slices SET replan_triggered_at = :ts WHERE milestone_id = :mid AND id = :sid",
|
||||
).run({ ":ts": ts, ":mid": milestoneId, ":sid": sliceId });
|
||||
}
|
||||
|
||||
/**
|
||||
* INSERT OR REPLACE a quality_gates row. Used by milestone-validation-gates.ts
|
||||
* to persist milestone-level (MV*) gate outcomes after validate-milestone runs.
|
||||
*/
|
||||
export function upsertQualityGate(g: {
|
||||
milestoneId: string;
|
||||
sliceId: string;
|
||||
gateId: string;
|
||||
scope: string;
|
||||
taskId: string;
|
||||
status: string;
|
||||
verdict: string;
|
||||
rationale: string;
|
||||
findings: string;
|
||||
evaluatedAt: string;
|
||||
}): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`INSERT OR REPLACE INTO quality_gates
|
||||
(milestone_id, slice_id, gate_id, scope, task_id, status, verdict, rationale, findings, evaluated_at)
|
||||
VALUES (:mid, :sid, :gid, :scope, :tid, :status, :verdict, :rationale, :findings, :evaluated_at)`,
|
||||
).run({
|
||||
":mid": g.milestoneId,
|
||||
":sid": g.sliceId,
|
||||
":gid": g.gateId,
|
||||
":scope": g.scope,
|
||||
":tid": g.taskId,
|
||||
":status": g.status,
|
||||
":verdict": g.verdict,
|
||||
":rationale": g.rationale,
|
||||
":findings": g.findings,
|
||||
":evaluated_at": g.evaluatedAt,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Atomically replace all workflow state from a manifest. Lifted verbatim from
|
||||
* workflow-manifest.ts so the single-writer invariant holds. Only touches
|
||||
* engine tables + decisions. Does NOT modify artifacts or memories.
|
||||
*/
|
||||
export function restoreManifest(manifest: StateManifest): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
const db = currentDb;
|
||||
|
||||
transaction(() => {
|
||||
// Clear engine tables (order matters for foreign-key-like consistency)
|
||||
db.exec("DELETE FROM verification_evidence");
|
||||
db.exec("DELETE FROM tasks");
|
||||
db.exec("DELETE FROM slices");
|
||||
db.exec("DELETE FROM milestones");
|
||||
db.exec("DELETE FROM decisions WHERE 1=1");
|
||||
|
||||
// Restore milestones
|
||||
const msStmt = db.prepare(
|
||||
`INSERT INTO milestones (id, title, status, depends_on, created_at, completed_at,
|
||||
vision, success_criteria, key_risks, proof_strategy,
|
||||
verification_contract, verification_integration, verification_operational, verification_uat,
|
||||
definition_of_done, requirement_coverage, boundary_map_markdown)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const m of manifest.milestones) {
|
||||
msStmt.run(
|
||||
m.id, m.title, m.status,
|
||||
JSON.stringify(m.depends_on), m.created_at, m.completed_at,
|
||||
m.vision, JSON.stringify(m.success_criteria), JSON.stringify(m.key_risks),
|
||||
JSON.stringify(m.proof_strategy),
|
||||
m.verification_contract, m.verification_integration, m.verification_operational, m.verification_uat,
|
||||
JSON.stringify(m.definition_of_done), m.requirement_coverage, m.boundary_map_markdown,
|
||||
);
|
||||
}
|
||||
|
||||
// Restore slices
|
||||
const slStmt = db.prepare(
|
||||
`INSERT INTO slices (milestone_id, id, title, status, risk, depends, demo,
|
||||
created_at, completed_at, full_summary_md, full_uat_md,
|
||||
goal, success_criteria, proof_level, integration_closure, observability_impact,
|
||||
sequence, replan_triggered_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const s of manifest.slices) {
|
||||
slStmt.run(
|
||||
s.milestone_id, s.id, s.title, s.status, s.risk,
|
||||
JSON.stringify(s.depends), s.demo,
|
||||
s.created_at, s.completed_at, s.full_summary_md, s.full_uat_md,
|
||||
s.goal, s.success_criteria, s.proof_level, s.integration_closure, s.observability_impact,
|
||||
s.sequence, s.replan_triggered_at,
|
||||
);
|
||||
}
|
||||
|
||||
// Restore tasks
|
||||
const tkStmt = db.prepare(
|
||||
`INSERT INTO tasks (milestone_id, slice_id, id, title, status,
|
||||
one_liner, narrative, verification_result, duration, completed_at,
|
||||
blocker_discovered, deviations, known_issues, key_files, key_decisions,
|
||||
full_summary_md, description, estimate, files, verify,
|
||||
inputs, expected_output, observability_impact, sequence)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const t of manifest.tasks) {
|
||||
tkStmt.run(
|
||||
t.milestone_id, t.slice_id, t.id, t.title, t.status,
|
||||
t.one_liner, t.narrative, t.verification_result, t.duration, t.completed_at,
|
||||
t.blocker_discovered ? 1 : 0, t.deviations, t.known_issues,
|
||||
JSON.stringify(t.key_files), JSON.stringify(t.key_decisions),
|
||||
t.full_summary_md, t.description, t.estimate, JSON.stringify(t.files), t.verify,
|
||||
JSON.stringify(t.inputs), JSON.stringify(t.expected_output),
|
||||
t.observability_impact, t.sequence,
|
||||
);
|
||||
}
|
||||
|
||||
// Restore decisions
|
||||
const dcStmt = db.prepare(
|
||||
`INSERT INTO decisions (seq, id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const d of manifest.decisions) {
|
||||
dcStmt.run(d.seq, d.id, d.when_context, d.scope, d.decision, d.choice, d.rationale, d.revisable, d.made_by, d.superseded_by);
|
||||
}
|
||||
|
||||
// Restore verification evidence
|
||||
const evStmt = db.prepare(
|
||||
`INSERT INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const e of manifest.verification_evidence) {
|
||||
evStmt.run(e.task_id, e.slice_id, e.milestone_id, e.command, e.exit_code, e.verdict, e.duration_ms, e.created_at);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Legacy markdown → DB bulk migration ─────────────────────────────────
|
||||
|
||||
export interface LegacyMilestoneInsert {
|
||||
id: string;
|
||||
title: string;
|
||||
status: string;
|
||||
}
|
||||
|
||||
export interface LegacySliceInsert {
|
||||
id: string;
|
||||
milestoneId: string;
|
||||
title: string;
|
||||
status: string;
|
||||
risk: string;
|
||||
sequence: number;
|
||||
}
|
||||
|
||||
export interface LegacyTaskInsert {
|
||||
id: string;
|
||||
sliceId: string;
|
||||
milestoneId: string;
|
||||
title: string;
|
||||
status: string;
|
||||
sequence: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Bulk delete + insert a legacy milestone hierarchy for markdown → DB migration.
|
||||
* Used by workflow-migration.ts to populate engine tables from parsed ROADMAP/PLAN
|
||||
* files. All operations run inside a single transaction.
|
||||
*/
|
||||
export function bulkInsertLegacyHierarchy(payload: {
|
||||
milestones: LegacyMilestoneInsert[];
|
||||
slices: LegacySliceInsert[];
|
||||
tasks: LegacyTaskInsert[];
|
||||
clearMilestoneIds: string[];
|
||||
createdAt: string;
|
||||
}): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
const db = currentDb;
|
||||
const { milestones, slices, tasks, clearMilestoneIds, createdAt } = payload;
|
||||
|
||||
if (clearMilestoneIds.length === 0) return;
|
||||
const placeholders = clearMilestoneIds.map(() => "?").join(",");
|
||||
|
||||
transaction(() => {
|
||||
db.prepare(`DELETE FROM tasks WHERE milestone_id IN (${placeholders})`).run(...clearMilestoneIds);
|
||||
db.prepare(`DELETE FROM slices WHERE milestone_id IN (${placeholders})`).run(...clearMilestoneIds);
|
||||
db.prepare(`DELETE FROM milestones WHERE id IN (${placeholders})`).run(...clearMilestoneIds);
|
||||
|
||||
const insertMilestone = db.prepare(
|
||||
"INSERT INTO milestones (id, title, status, created_at) VALUES (?, ?, ?, ?)",
|
||||
);
|
||||
for (const m of milestones) {
|
||||
insertMilestone.run(m.id, m.title, m.status, createdAt);
|
||||
}
|
||||
|
||||
const insertSliceStmt = db.prepare(
|
||||
"INSERT INTO slices (id, milestone_id, title, status, risk, depends, sequence, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
);
|
||||
for (const s of slices) {
|
||||
insertSliceStmt.run(s.id, s.milestoneId, s.title, s.status, s.risk, "[]", s.sequence, createdAt);
|
||||
}
|
||||
|
||||
const insertTaskStmt = db.prepare(
|
||||
"INSERT INTO tasks (id, slice_id, milestone_id, title, description, status, estimate, files, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
|
||||
);
|
||||
for (const t of tasks) {
|
||||
insertTaskStmt.run(t.id, t.sliceId, t.milestoneId, t.title, "", t.status, "", "[]", t.sequence);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ─── Memory store writers ────────────────────────────────────────────────
|
||||
// All memory writes go through gsd-db.ts so the single-writer invariant
|
||||
// holds. These are direct pass-throughs to the SQL previously in
|
||||
// memory-store.ts — same bindings, same behavior.
|
||||
|
||||
export function insertMemoryRow(args: {
|
||||
id: string;
|
||||
category: string;
|
||||
content: string;
|
||||
confidence: number;
|
||||
sourceUnitType: string | null;
|
||||
sourceUnitId: string | null;
|
||||
createdAt: string;
|
||||
updatedAt: string;
|
||||
}): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`INSERT INTO memories (id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at)
|
||||
VALUES (:id, :category, :content, :confidence, :source_unit_type, :source_unit_id, :created_at, :updated_at)`,
|
||||
).run({
|
||||
":id": args.id,
|
||||
":category": args.category,
|
||||
":content": args.content,
|
||||
":confidence": args.confidence,
|
||||
":source_unit_type": args.sourceUnitType,
|
||||
":source_unit_id": args.sourceUnitId,
|
||||
":created_at": args.createdAt,
|
||||
":updated_at": args.updatedAt,
|
||||
});
|
||||
}
|
||||
|
||||
export function rewriteMemoryId(placeholderId: string, realId: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare("UPDATE memories SET id = :real_id WHERE id = :placeholder").run({
|
||||
":real_id": realId,
|
||||
":placeholder": placeholderId,
|
||||
});
|
||||
}
|
||||
|
||||
export function updateMemoryContentRow(
|
||||
id: string,
|
||||
content: string,
|
||||
confidence: number | undefined,
|
||||
updatedAt: string,
|
||||
): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
if (confidence != null) {
|
||||
currentDb.prepare(
|
||||
"UPDATE memories SET content = :content, confidence = :confidence, updated_at = :updated_at WHERE id = :id",
|
||||
).run({ ":content": content, ":confidence": confidence, ":updated_at": updatedAt, ":id": id });
|
||||
} else {
|
||||
currentDb.prepare(
|
||||
"UPDATE memories SET content = :content, updated_at = :updated_at WHERE id = :id",
|
||||
).run({ ":content": content, ":updated_at": updatedAt, ":id": id });
|
||||
}
|
||||
}
|
||||
|
||||
export function incrementMemoryHitCount(id: string, updatedAt: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
"UPDATE memories SET hit_count = hit_count + 1, updated_at = :updated_at WHERE id = :id",
|
||||
).run({ ":updated_at": updatedAt, ":id": id });
|
||||
}
|
||||
|
||||
export function supersedeMemoryRow(oldId: string, newId: string, updatedAt: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
"UPDATE memories SET superseded_by = :new_id, updated_at = :updated_at WHERE id = :old_id",
|
||||
).run({ ":new_id": newId, ":updated_at": updatedAt, ":old_id": oldId });
|
||||
}
|
||||
|
||||
export function markMemoryUnitProcessed(
|
||||
unitKey: string,
|
||||
activityFile: string,
|
||||
processedAt: string,
|
||||
): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`INSERT OR IGNORE INTO memory_processed_units (unit_key, activity_file, processed_at)
|
||||
VALUES (:key, :file, :at)`,
|
||||
).run({ ":key": unitKey, ":file": activityFile, ":at": processedAt });
|
||||
}
|
||||
|
||||
export function decayMemoriesBefore(cutoffTs: string, now: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`UPDATE memories
|
||||
SET confidence = MAX(0.1, confidence - 0.1), updated_at = :now
|
||||
WHERE superseded_by IS NULL AND updated_at < :cutoff AND confidence > 0.1`,
|
||||
).run({ ":now": now, ":cutoff": cutoffTs });
|
||||
}
|
||||
|
||||
export function supersedeLowestRankedMemories(limit: number, now: string): void {
|
||||
if (!currentDb) throw new GSDError(GSD_STALE_STATE, "gsd-db: No database open");
|
||||
currentDb.prepare(
|
||||
`UPDATE memories SET superseded_by = 'CAP_EXCEEDED', updated_at = :now
|
||||
WHERE id IN (
|
||||
SELECT id FROM memories
|
||||
WHERE superseded_by IS NULL
|
||||
ORDER BY (confidence * (1.0 + hit_count * 0.1)) ASC
|
||||
LIMIT :limit
|
||||
)`,
|
||||
).run({ ":now": now, ":limit": limit });
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import {
|
|||
insertTask,
|
||||
openDatabase,
|
||||
transaction,
|
||||
updateSliceStatus,
|
||||
_getAdapter,
|
||||
} from './gsd-db.js';
|
||||
import {
|
||||
|
|
@ -672,11 +673,8 @@ export function migrateHierarchyToDb(basePath: string): {
|
|||
return t.done && existsSync(summaryFile);
|
||||
});
|
||||
if (allTasksDone && hasSliceSummary) {
|
||||
const adapter = _getAdapter();
|
||||
if (adapter) {
|
||||
adapter.prepare(
|
||||
`UPDATE slices SET status = 'complete' WHERE id = :sid AND milestone_id = :mid`,
|
||||
).run({ ':sid': sliceEntry.id, ':mid': milestoneId });
|
||||
if (_getAdapter()) {
|
||||
updateSliceStatus(milestoneId, sliceEntry.id, 'complete');
|
||||
process.stderr.write(
|
||||
`gsd-migrate: ${milestoneId}/${sliceEntry.id} all tasks + slice summary complete — upgrading slice to complete\n`,
|
||||
);
|
||||
|
|
|
|||
|
|
@ -3,7 +3,19 @@
|
|||
// Storage layer for auto-learned project memories. Follows context-store.ts patterns.
|
||||
// All functions degrade gracefully: return empty results when DB unavailable, never throw.
|
||||
|
||||
import { isDbAvailable, _getAdapter, transaction } from './gsd-db.js';
|
||||
import {
|
||||
isDbAvailable,
|
||||
_getAdapter,
|
||||
transaction,
|
||||
insertMemoryRow,
|
||||
rewriteMemoryId,
|
||||
updateMemoryContentRow,
|
||||
incrementMemoryHitCount,
|
||||
supersedeMemoryRow,
|
||||
markMemoryUnitProcessed,
|
||||
decayMemoriesBefore,
|
||||
supersedeLowestRankedMemories,
|
||||
} from './gsd-db.js';
|
||||
|
||||
// ─── Types ──────────────────────────────────────────────────────────────────
|
||||
|
||||
|
|
@ -170,28 +182,22 @@ export function createMemory(fields: {
|
|||
const now = new Date().toISOString();
|
||||
// Insert with a temporary placeholder ID — seq is auto-assigned
|
||||
const placeholder = `_TMP_${Date.now()}_${Math.random().toString(36).slice(2, 8)}`;
|
||||
adapter.prepare(
|
||||
`INSERT INTO memories (id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at)
|
||||
VALUES (:id, :category, :content, :confidence, :source_unit_type, :source_unit_id, :created_at, :updated_at)`,
|
||||
).run({
|
||||
':id': placeholder,
|
||||
':category': fields.category,
|
||||
':content': fields.content,
|
||||
':confidence': fields.confidence ?? 0.8,
|
||||
':source_unit_type': fields.source_unit_type ?? null,
|
||||
':source_unit_id': fields.source_unit_id ?? null,
|
||||
':created_at': now,
|
||||
':updated_at': now,
|
||||
insertMemoryRow({
|
||||
id: placeholder,
|
||||
category: fields.category,
|
||||
content: fields.content,
|
||||
confidence: fields.confidence ?? 0.8,
|
||||
sourceUnitType: fields.source_unit_type ?? null,
|
||||
sourceUnitId: fields.source_unit_id ?? null,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
});
|
||||
// Derive the real ID from the assigned seq
|
||||
// Derive the real ID from the assigned seq (SELECT is still fine via adapter)
|
||||
const row = adapter.prepare('SELECT seq FROM memories WHERE id = :id').get({ ':id': placeholder });
|
||||
if (!row) return placeholder; // fallback — should not happen
|
||||
const seq = row['seq'] as number;
|
||||
const realId = `MEM${String(seq).padStart(3, '0')}`;
|
||||
adapter.prepare('UPDATE memories SET id = :real_id WHERE id = :placeholder').run({
|
||||
':real_id': realId,
|
||||
':placeholder': placeholder,
|
||||
});
|
||||
rewriteMemoryId(placeholder, realId);
|
||||
return realId;
|
||||
} catch {
|
||||
return null;
|
||||
|
|
@ -203,20 +209,9 @@ export function createMemory(fields: {
|
|||
*/
|
||||
export function updateMemoryContent(id: string, content: string, confidence?: number): boolean {
|
||||
if (!isDbAvailable()) return false;
|
||||
const adapter = _getAdapter();
|
||||
if (!adapter) return false;
|
||||
|
||||
try {
|
||||
const now = new Date().toISOString();
|
||||
if (confidence != null) {
|
||||
adapter.prepare(
|
||||
'UPDATE memories SET content = :content, confidence = :confidence, updated_at = :updated_at WHERE id = :id',
|
||||
).run({ ':content': content, ':confidence': confidence, ':updated_at': now, ':id': id });
|
||||
} else {
|
||||
adapter.prepare(
|
||||
'UPDATE memories SET content = :content, updated_at = :updated_at WHERE id = :id',
|
||||
).run({ ':content': content, ':updated_at': now, ':id': id });
|
||||
}
|
||||
updateMemoryContentRow(id, content, confidence, new Date().toISOString());
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
|
|
@ -228,13 +223,9 @@ export function updateMemoryContent(id: string, content: string, confidence?: nu
|
|||
*/
|
||||
export function reinforceMemory(id: string): boolean {
|
||||
if (!isDbAvailable()) return false;
|
||||
const adapter = _getAdapter();
|
||||
if (!adapter) return false;
|
||||
|
||||
try {
|
||||
adapter.prepare(
|
||||
'UPDATE memories SET hit_count = hit_count + 1, updated_at = :updated_at WHERE id = :id',
|
||||
).run({ ':updated_at': new Date().toISOString(), ':id': id });
|
||||
incrementMemoryHitCount(id, new Date().toISOString());
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
|
|
@ -246,13 +237,9 @@ export function reinforceMemory(id: string): boolean {
|
|||
*/
|
||||
export function supersedeMemory(oldId: string, newId: string): boolean {
|
||||
if (!isDbAvailable()) return false;
|
||||
const adapter = _getAdapter();
|
||||
if (!adapter) return false;
|
||||
|
||||
try {
|
||||
adapter.prepare(
|
||||
'UPDATE memories SET superseded_by = :new_id, updated_at = :updated_at WHERE id = :old_id',
|
||||
).run({ ':new_id': newId, ':updated_at': new Date().toISOString(), ':old_id': oldId });
|
||||
supersedeMemoryRow(oldId, newId, new Date().toISOString());
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
|
|
@ -284,14 +271,9 @@ export function isUnitProcessed(unitKey: string): boolean {
|
|||
*/
|
||||
export function markUnitProcessed(unitKey: string, activityFile: string): boolean {
|
||||
if (!isDbAvailable()) return false;
|
||||
const adapter = _getAdapter();
|
||||
if (!adapter) return false;
|
||||
|
||||
try {
|
||||
adapter.prepare(
|
||||
`INSERT OR IGNORE INTO memory_processed_units (unit_key, activity_file, processed_at)
|
||||
VALUES (:key, :file, :at)`,
|
||||
).run({ ':key': unitKey, ':file': activityFile, ':at': new Date().toISOString() });
|
||||
markMemoryUnitProcessed(unitKey, activityFile, new Date().toISOString());
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
|
|
@ -310,7 +292,7 @@ export function decayStaleMemories(thresholdUnits = 20): void {
|
|||
if (!adapter) return;
|
||||
|
||||
try {
|
||||
// Find the timestamp of the Nth most recent processed unit
|
||||
// Find the timestamp of the Nth most recent processed unit (read-only SELECT)
|
||||
const row = adapter.prepare(
|
||||
`SELECT processed_at FROM memory_processed_units
|
||||
ORDER BY processed_at DESC
|
||||
|
|
@ -320,11 +302,7 @@ export function decayStaleMemories(thresholdUnits = 20): void {
|
|||
if (!row) return; // not enough processed units yet
|
||||
|
||||
const cutoff = row['processed_at'] as string;
|
||||
adapter.prepare(
|
||||
`UPDATE memories
|
||||
SET confidence = MAX(0.1, confidence - 0.1), updated_at = :now
|
||||
WHERE superseded_by IS NULL AND updated_at < :cutoff AND confidence > 0.1`,
|
||||
).run({ ':now': new Date().toISOString(), ':cutoff': cutoff });
|
||||
decayMemoriesBefore(cutoff, new Date().toISOString());
|
||||
} catch {
|
||||
// non-fatal
|
||||
}
|
||||
|
|
@ -346,16 +324,7 @@ export function enforceMemoryCap(max = 50): void {
|
|||
if (count <= max) return;
|
||||
|
||||
const excess = count - max;
|
||||
// Batch update: supersede lowest-ranked active memories in a single statement
|
||||
adapter.prepare(
|
||||
`UPDATE memories SET superseded_by = 'CAP_EXCEEDED', updated_at = :now
|
||||
WHERE id IN (
|
||||
SELECT id FROM memories
|
||||
WHERE superseded_by IS NULL
|
||||
ORDER BY (confidence * (1.0 + hit_count * 0.1)) ASC
|
||||
LIMIT :limit
|
||||
)`,
|
||||
).run({ ':now': new Date().toISOString(), ':limit': excess });
|
||||
supersedeLowestRankedMemories(excess, new Date().toISOString());
|
||||
} catch {
|
||||
// non-fatal
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@
|
|||
* dispatch rules, and state derivation. See gate-registry.ts.
|
||||
*/
|
||||
|
||||
import { _getAdapter } from "./gsd-db.js";
|
||||
import { isDbAvailable, upsertQualityGate } from "./gsd-db.js";
|
||||
import { getGatesForTurn } from "./gate-registry.js";
|
||||
|
||||
/**
|
||||
|
|
@ -31,24 +31,23 @@ export function insertMilestoneValidationGates(
|
|||
verdict: string,
|
||||
evaluatedAt: string,
|
||||
): void {
|
||||
const db = _getAdapter();
|
||||
if (!db) return;
|
||||
if (!isDbAvailable()) return;
|
||||
|
||||
const gateVerdict = verdict === "pass" ? "pass" : "flag";
|
||||
const milestoneGates = getGatesForTurn("validate-milestone");
|
||||
|
||||
for (const def of milestoneGates) {
|
||||
db.prepare(
|
||||
`INSERT OR REPLACE INTO quality_gates
|
||||
(milestone_id, slice_id, gate_id, scope, task_id, status, verdict, rationale, findings, evaluated_at)
|
||||
VALUES (:mid, :sid, :gid, 'milestone', '', 'complete', :verdict, :rationale, '', :evaluated_at)`,
|
||||
).run({
|
||||
":mid": milestoneId,
|
||||
":sid": sliceId,
|
||||
":gid": def.id,
|
||||
":verdict": gateVerdict,
|
||||
":rationale": `${def.promptSection} — milestone validation verdict: ${verdict}`,
|
||||
":evaluated_at": evaluatedAt,
|
||||
upsertQualityGate({
|
||||
milestoneId,
|
||||
sliceId,
|
||||
gateId: def.id,
|
||||
scope: "milestone",
|
||||
taskId: "",
|
||||
status: "complete",
|
||||
verdict: gateVerdict,
|
||||
rationale: `${def.promptSection} — milestone validation verdict: ${verdict}`,
|
||||
findings: "",
|
||||
evaluatedAt,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,180 @@
|
|||
// Structural invariant: gsd-db.ts is the single writer for .gsd/gsd.db.
|
||||
//
|
||||
// No file under src/resources/extensions/gsd/ may issue raw write SQL
|
||||
// (INSERT/UPDATE/DELETE/REPLACE) or raw transaction control (BEGIN/COMMIT/
|
||||
// ROLLBACK via `.exec(...)`) against the engine database. Every bypass must
|
||||
// route through a typed wrapper exported from gsd-db.ts.
|
||||
//
|
||||
// Allowlist:
|
||||
// - gsd-db.ts itself — the single writer
|
||||
// - unit-ownership.ts — manages a separate .gsd/unit-claims.db for
|
||||
// cross-worktree claim races; intentionally outside this invariant
|
||||
// - tests/** — fixtures and direct DB inspection are fair game
|
||||
//
|
||||
// When this test fails, do not add a new suppression. Instead:
|
||||
// 1. Add a typed wrapper to gsd-db.ts that captures the SQL
|
||||
// 2. Switch the flagged site to call the wrapper
|
||||
//
|
||||
// See `.claude/plans/joyful-doodling-pony.md` for the full rationale.
|
||||
|
||||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { readFileSync, readdirSync } from "node:fs";
|
||||
import { join, relative } from "node:path";
|
||||
|
||||
const gsdDir = join(process.cwd(), "src/resources/extensions/gsd");
|
||||
|
||||
const ALLOWLIST = new Set([
|
||||
"gsd-db.ts",
|
||||
"unit-ownership.ts",
|
||||
]);
|
||||
|
||||
/** Walk the gsd extension dir and return all .ts files outside tests/. */
|
||||
function walkTsFiles(root: string): string[] {
|
||||
const out: string[] = [];
|
||||
const stack: string[] = [root];
|
||||
|
||||
while (stack.length > 0) {
|
||||
const dir = stack.pop()!;
|
||||
let entries;
|
||||
try {
|
||||
entries = readdirSync(dir, { withFileTypes: true });
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const ent of entries) {
|
||||
const full = join(dir, ent.name);
|
||||
if (ent.isDirectory()) {
|
||||
// Skip tests/ — fixtures and direct DB inspection are expected there
|
||||
if (ent.name === "tests") continue;
|
||||
stack.push(full);
|
||||
continue;
|
||||
}
|
||||
if (!ent.isFile()) continue;
|
||||
if (!ent.name.endsWith(".ts")) continue;
|
||||
// Skip dotfiles and backup/generated files
|
||||
if (ent.name.startsWith(".")) continue;
|
||||
out.push(full);
|
||||
}
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
interface Violation {
|
||||
file: string;
|
||||
line: number;
|
||||
snippet: string;
|
||||
kind: string;
|
||||
}
|
||||
|
||||
// Match .prepare("... INSERT|UPDATE|DELETE|REPLACE ...") in any quoting style.
|
||||
const PREPARE_WRITE_RE = /\.prepare\s*\(\s*[`'"][^`'"]*\b(INSERT|UPDATE|DELETE|REPLACE)\b/i;
|
||||
|
||||
// Match .exec("... INSERT|UPDATE|DELETE|REPLACE ...") or raw BEGIN/COMMIT/ROLLBACK.
|
||||
const EXEC_WRITE_RE = /\.exec\s*\(\s*[`'"][^`'"]*\b(INSERT|UPDATE|DELETE|REPLACE|BEGIN|COMMIT|ROLLBACK)\b/i;
|
||||
|
||||
test("no module outside gsd-db.ts issues raw write SQL against the engine DB", () => {
|
||||
const files = walkTsFiles(gsdDir);
|
||||
assert.ok(files.length >= 20, `Expected at least 20 .ts files under gsd/, found ${files.length}`);
|
||||
|
||||
const violations: Violation[] = [];
|
||||
|
||||
for (const abs of files) {
|
||||
const rel = relative(gsdDir, abs);
|
||||
const base = rel.split("/").pop()!;
|
||||
if (ALLOWLIST.has(base)) continue;
|
||||
|
||||
let content: string;
|
||||
try {
|
||||
content = readFileSync(abs, "utf-8");
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
const lines = content.split("\n");
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i];
|
||||
|
||||
const prepareMatch = PREPARE_WRITE_RE.exec(line);
|
||||
if (prepareMatch) {
|
||||
violations.push({
|
||||
file: rel,
|
||||
line: i + 1,
|
||||
snippet: line.trim(),
|
||||
kind: `prepare(${prepareMatch[1].toUpperCase()})`,
|
||||
});
|
||||
}
|
||||
|
||||
const execMatch = EXEC_WRITE_RE.exec(line);
|
||||
if (execMatch) {
|
||||
violations.push({
|
||||
file: rel,
|
||||
line: i + 1,
|
||||
snippet: line.trim(),
|
||||
kind: `exec(${execMatch[1].toUpperCase()})`,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (violations.length > 0) {
|
||||
const lines = violations.map(
|
||||
(v) => ` ${v.file}:${v.line} [${v.kind}] — ${v.snippet}`,
|
||||
);
|
||||
assert.fail(
|
||||
`Found ${violations.length} raw write SQL bypass(es) outside gsd-db.ts:\n` +
|
||||
lines.join("\n") +
|
||||
"\n\nEach of these must be replaced with a typed wrapper exported from gsd-db.ts.",
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test("gsd-db.ts exports the expected single-writer wrappers", async () => {
|
||||
// Positive assertion — fail loudly if the module layout changes so this
|
||||
// structural test can't silently become a no-op.
|
||||
const db = await import("../gsd-db.js");
|
||||
|
||||
const expected = [
|
||||
"deleteDecisionById",
|
||||
"deleteRequirementById",
|
||||
"deleteArtifactByPath",
|
||||
"clearEngineHierarchy",
|
||||
"insertOrIgnoreSlice",
|
||||
"insertOrIgnoreTask",
|
||||
"setSliceReplanTriggeredAt",
|
||||
"upsertQualityGate",
|
||||
"restoreManifest",
|
||||
"bulkInsertLegacyHierarchy",
|
||||
"readTransaction",
|
||||
"insertMemoryRow",
|
||||
"rewriteMemoryId",
|
||||
"updateMemoryContentRow",
|
||||
"incrementMemoryHitCount",
|
||||
"supersedeMemoryRow",
|
||||
"markMemoryUnitProcessed",
|
||||
"decayMemoriesBefore",
|
||||
"supersedeLowestRankedMemories",
|
||||
];
|
||||
|
||||
for (const name of expected) {
|
||||
assert.ok(
|
||||
typeof (db as Record<string, unknown>)[name] === "function",
|
||||
`gsd-db.ts must export ${name} as a function`,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
test("the invariant test touches every .ts module under gsd/ (sanity check)", () => {
|
||||
const files = walkTsFiles(gsdDir);
|
||||
// Rough sanity: ensure we're not accidentally walking an empty tree
|
||||
assert.ok(files.length >= 30, `Expected to scan at least 30 .ts files, scanned ${files.length}`);
|
||||
|
||||
// Spot-check a couple of known files that must be included
|
||||
const rels = files.map((f) => relative(gsdDir, f));
|
||||
assert.ok(rels.includes("gsd-db.ts"), "walker must include gsd-db.ts");
|
||||
assert.ok(rels.includes("memory-store.ts"), "walker must include memory-store.ts");
|
||||
assert.ok(rels.includes("workflow-manifest.ts"), "walker must include workflow-manifest.ts");
|
||||
});
|
||||
|
||||
|
|
@ -5,7 +5,7 @@ import {
|
|||
getMilestone,
|
||||
getSliceStatusSummary,
|
||||
getSliceTaskCounts,
|
||||
_getAdapter,
|
||||
readTransaction,
|
||||
saveGateResult,
|
||||
} from "../gsd-db.js";
|
||||
import { GATE_REGISTRY } from "../gate-registry.js";
|
||||
|
|
@ -616,12 +616,9 @@ export async function executeMilestoneStatus(
|
|||
};
|
||||
}
|
||||
|
||||
const adapter = _getAdapter()!;
|
||||
adapter.exec("BEGIN");
|
||||
try {
|
||||
return readTransaction(() => {
|
||||
const milestone = getMilestone(params.milestoneId);
|
||||
if (!milestone) {
|
||||
adapter.exec("COMMIT");
|
||||
return {
|
||||
content: [{ type: "text", text: `Milestone ${params.milestoneId} not found in database.` }],
|
||||
details: { operation: "milestone_status", milestoneId: params.milestoneId, found: false },
|
||||
|
|
@ -635,8 +632,6 @@ export async function executeMilestoneStatus(
|
|||
taskCounts: getSliceTaskCounts(params.milestoneId, s.id),
|
||||
}));
|
||||
|
||||
adapter.exec("COMMIT");
|
||||
|
||||
const result = {
|
||||
milestoneId: milestone.id,
|
||||
title: milestone.title,
|
||||
|
|
@ -651,10 +646,7 @@ export async function executeMilestoneStatus(
|
|||
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
||||
details: { operation: "milestone_status", milestoneId: milestone.id, sliceCount: slices.length },
|
||||
};
|
||||
} catch (txErr) {
|
||||
try { adapter.exec("ROLLBACK"); } catch { /* swallow */ }
|
||||
throw txErr;
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
logWarning("tool", `gsd_milestone_status tool failed: ${msg}`);
|
||||
|
|
|
|||
|
|
@ -111,14 +111,9 @@ export function executeReplan(
|
|||
// Also write replan_triggered_at column for DB-backed detection
|
||||
try {
|
||||
const req = createRequire(import.meta.url);
|
||||
const { isDbAvailable, _getAdapter } = req("./gsd-db.js");
|
||||
const { isDbAvailable, setSliceReplanTriggeredAt } = req("./gsd-db.js");
|
||||
if (isDbAvailable()) {
|
||||
const adapter = _getAdapter();
|
||||
if (adapter) {
|
||||
adapter.prepare(
|
||||
"UPDATE slices SET replan_triggered_at = :ts WHERE milestone_id = :mid AND id = :sid",
|
||||
).run({ ":ts": ts, ":mid": mid, ":sid": sid });
|
||||
}
|
||||
setSliceReplanTriggeredAt(mid, sid, ts);
|
||||
}
|
||||
} catch {
|
||||
// DB write is best-effort — disk file is the primary trigger for fallback path
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import {
|
||||
_getAdapter,
|
||||
transaction,
|
||||
readTransaction,
|
||||
restoreManifest,
|
||||
type MilestoneRow,
|
||||
type SliceRow,
|
||||
type TaskRow,
|
||||
|
|
@ -74,9 +75,7 @@ export function snapshotState(): StateManifest {
|
|||
|
||||
// Wrap all reads in a deferred transaction so the snapshot is consistent
|
||||
// (all SELECTs see the same DB state even if a concurrent write lands between them).
|
||||
db.exec("BEGIN DEFERRED");
|
||||
|
||||
try {
|
||||
return readTransaction(() => {
|
||||
const rawMilestones = db.prepare("SELECT * FROM milestones ORDER BY id").all() as Record<string, unknown>[];
|
||||
const milestones: MilestoneRow[] = rawMilestones.map((r) => ({
|
||||
id: r["id"] as string,
|
||||
|
|
@ -186,109 +185,15 @@ export function snapshotState(): StateManifest {
|
|||
verification_evidence,
|
||||
};
|
||||
|
||||
db.exec("COMMIT");
|
||||
return result;
|
||||
} catch (err) {
|
||||
try { db.exec("ROLLBACK"); } catch { /* ignore rollback failure */ }
|
||||
throw err;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ─── restore ─────────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Atomically replace all workflow state from a manifest.
|
||||
* Runs inside a transaction — if any insert fails, no tables are modified.
|
||||
* Only touches engine tables + decisions. Does NOT modify artifacts or memories.
|
||||
*/
|
||||
function restore(manifest: StateManifest): void {
|
||||
const db = requireDb();
|
||||
|
||||
transaction(() => {
|
||||
// Clear engine tables (order matters for foreign-key-like consistency)
|
||||
db.exec("DELETE FROM verification_evidence");
|
||||
db.exec("DELETE FROM tasks");
|
||||
db.exec("DELETE FROM slices");
|
||||
db.exec("DELETE FROM milestones");
|
||||
db.exec("DELETE FROM decisions WHERE 1=1");
|
||||
|
||||
// Restore milestones
|
||||
const msStmt = db.prepare(
|
||||
`INSERT INTO milestones (id, title, status, depends_on, created_at, completed_at,
|
||||
vision, success_criteria, key_risks, proof_strategy,
|
||||
verification_contract, verification_integration, verification_operational, verification_uat,
|
||||
definition_of_done, requirement_coverage, boundary_map_markdown)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const m of manifest.milestones) {
|
||||
msStmt.run(
|
||||
m.id, m.title, m.status,
|
||||
JSON.stringify(m.depends_on), m.created_at, m.completed_at,
|
||||
m.vision, JSON.stringify(m.success_criteria), JSON.stringify(m.key_risks),
|
||||
JSON.stringify(m.proof_strategy),
|
||||
m.verification_contract, m.verification_integration, m.verification_operational, m.verification_uat,
|
||||
JSON.stringify(m.definition_of_done), m.requirement_coverage, m.boundary_map_markdown,
|
||||
);
|
||||
}
|
||||
|
||||
// Restore slices
|
||||
const slStmt = db.prepare(
|
||||
`INSERT INTO slices (milestone_id, id, title, status, risk, depends, demo,
|
||||
created_at, completed_at, full_summary_md, full_uat_md,
|
||||
goal, success_criteria, proof_level, integration_closure, observability_impact,
|
||||
sequence, replan_triggered_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const s of manifest.slices) {
|
||||
slStmt.run(
|
||||
s.milestone_id, s.id, s.title, s.status, s.risk,
|
||||
JSON.stringify(s.depends), s.demo,
|
||||
s.created_at, s.completed_at, s.full_summary_md, s.full_uat_md,
|
||||
s.goal, s.success_criteria, s.proof_level, s.integration_closure, s.observability_impact,
|
||||
s.sequence, s.replan_triggered_at,
|
||||
);
|
||||
}
|
||||
|
||||
// Restore tasks
|
||||
const tkStmt = db.prepare(
|
||||
`INSERT INTO tasks (milestone_id, slice_id, id, title, status,
|
||||
one_liner, narrative, verification_result, duration, completed_at,
|
||||
blocker_discovered, deviations, known_issues, key_files, key_decisions,
|
||||
full_summary_md, description, estimate, files, verify,
|
||||
inputs, expected_output, observability_impact, sequence)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const t of manifest.tasks) {
|
||||
tkStmt.run(
|
||||
t.milestone_id, t.slice_id, t.id, t.title, t.status,
|
||||
t.one_liner, t.narrative, t.verification_result, t.duration, t.completed_at,
|
||||
t.blocker_discovered ? 1 : 0, t.deviations, t.known_issues,
|
||||
JSON.stringify(t.key_files), JSON.stringify(t.key_decisions),
|
||||
t.full_summary_md, t.description, t.estimate, JSON.stringify(t.files), t.verify,
|
||||
JSON.stringify(t.inputs), JSON.stringify(t.expected_output),
|
||||
t.observability_impact, t.sequence,
|
||||
);
|
||||
}
|
||||
|
||||
// Restore decisions
|
||||
const dcStmt = db.prepare(
|
||||
`INSERT INTO decisions (seq, id, when_context, scope, decision, choice, rationale, revisable, made_by, superseded_by)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const d of manifest.decisions) {
|
||||
dcStmt.run(d.seq, d.id, d.when_context, d.scope, d.decision, d.choice, d.rationale, d.revisable, d.made_by, d.superseded_by);
|
||||
}
|
||||
|
||||
// Restore verification evidence
|
||||
const evStmt = db.prepare(
|
||||
`INSERT INTO verification_evidence (task_id, slice_id, milestone_id, command, exit_code, verdict, duration_ms, created_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
);
|
||||
for (const e of manifest.verification_evidence) {
|
||||
evStmt.run(e.task_id, e.slice_id, e.milestone_id, e.command, e.exit_code, e.verdict, e.duration_ms, e.created_at);
|
||||
}
|
||||
});
|
||||
}
|
||||
//
|
||||
// The actual restore() implementation lives in gsd-db.ts (single-writer
|
||||
// invariant). This module only orchestrates reading the manifest file
|
||||
// and handing it to the writer.
|
||||
|
||||
// ─── writeManifest ───────────────────────────────────────────────────────
|
||||
|
||||
|
|
@ -346,6 +251,6 @@ export function bootstrapFromManifest(basePath: string): boolean {
|
|||
return false;
|
||||
}
|
||||
|
||||
restore(manifest);
|
||||
restoreManifest(manifest);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
|
||||
import { existsSync, readdirSync, readFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { _getAdapter, transaction } from "./gsd-db.js";
|
||||
import { _getAdapter, bulkInsertLegacyHierarchy } from "./gsd-db.js";
|
||||
import { parseRoadmap, parsePlan } from "./parsers-legacy.js";
|
||||
import { logWarning } from "./workflow-logger.js";
|
||||
|
||||
|
|
@ -219,34 +219,26 @@ export function migrateFromMarkdown(basePath: string): void {
|
|||
return;
|
||||
}
|
||||
|
||||
const placeholders = migratedMilestoneIds.map(() => "?").join(",");
|
||||
transaction(() => {
|
||||
// Clear existing data to handle stale DB shape (DELETE ... IN (...))
|
||||
db.prepare(`DELETE FROM tasks WHERE milestone_id IN (${placeholders})`).run(...migratedMilestoneIds);
|
||||
db.prepare(`DELETE FROM slices WHERE milestone_id IN (${placeholders})`).run(...migratedMilestoneIds);
|
||||
db.prepare(`DELETE FROM milestones WHERE id IN (${placeholders})`).run(...migratedMilestoneIds);
|
||||
|
||||
// Insert milestones
|
||||
const insertMilestone = db.prepare("INSERT INTO milestones (id, title, status, created_at) VALUES (?, ?, ?, ?)");
|
||||
for (const m of milestoneInserts) {
|
||||
insertMilestone.run(m.id, m.title, m.status, now);
|
||||
}
|
||||
|
||||
// Insert slices (using v10 column names: depends, sequence)
|
||||
const insertSlice = db.prepare(
|
||||
"INSERT INTO slices (id, milestone_id, title, status, risk, depends, sequence, created_at) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"
|
||||
);
|
||||
for (const s of sliceInserts) {
|
||||
insertSlice.run(s.id, s.milestoneId, s.title, s.status, s.risk, "[]", s.sequence, now);
|
||||
}
|
||||
|
||||
// Insert tasks (using v10 column names: sequence, blocker_discovered, full_summary_md)
|
||||
const insertTask = db.prepare(
|
||||
"INSERT INTO tasks (id, slice_id, milestone_id, title, description, status, estimate, files, sequence) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)"
|
||||
);
|
||||
for (const t of taskInserts) {
|
||||
insertTask.run(t.id, t.sliceId, t.milestoneId, t.title, "", t.status, "", "[]", t.sequence);
|
||||
}
|
||||
bulkInsertLegacyHierarchy({
|
||||
milestones: milestoneInserts,
|
||||
slices: sliceInserts.map(s => ({
|
||||
id: s.id,
|
||||
milestoneId: s.milestoneId,
|
||||
title: s.title,
|
||||
status: s.status,
|
||||
risk: s.risk,
|
||||
sequence: s.sequence,
|
||||
})),
|
||||
tasks: taskInserts.map(t => ({
|
||||
id: t.id,
|
||||
sliceId: t.sliceId,
|
||||
milestoneId: t.milestoneId,
|
||||
title: t.title,
|
||||
status: t.status,
|
||||
sequence: t.sequence,
|
||||
})),
|
||||
clearMilestoneIds: migratedMilestoneIds,
|
||||
createdAt: now,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,12 +10,13 @@ import {
|
|||
updateMilestoneStatus,
|
||||
getSliceTasks,
|
||||
insertMilestone,
|
||||
_getAdapter,
|
||||
getMilestoneSlices,
|
||||
insertVerificationEvidence,
|
||||
upsertDecision,
|
||||
openDatabase,
|
||||
setTaskBlockerDiscovered,
|
||||
insertOrIgnoreSlice,
|
||||
insertOrIgnoreTask,
|
||||
} from "./gsd-db.js";
|
||||
import { isClosedStatus } from "./status-guards.js";
|
||||
import { invalidateStateCache } from "./state.js";
|
||||
|
|
@ -164,13 +165,12 @@ function replayEvents(events: WorkflowEvent[]): void {
|
|||
const milestoneId = p["milestoneId"] as string;
|
||||
const sliceId = p["sliceId"] as string;
|
||||
if (milestoneId && sliceId) {
|
||||
const adapter = _getAdapter();
|
||||
if (adapter) {
|
||||
adapter.prepare(
|
||||
`INSERT OR IGNORE INTO slices (milestone_id, id, title, status, created_at)
|
||||
VALUES (:mid, :sid, :title, 'pending', :ts)`,
|
||||
).run({ ":mid": milestoneId, ":sid": sliceId, ":title": (p["title"] as string) ?? sliceId, ":ts": event.ts });
|
||||
}
|
||||
insertOrIgnoreSlice({
|
||||
milestoneId,
|
||||
sliceId,
|
||||
title: (p["title"] as string) ?? sliceId,
|
||||
createdAt: event.ts,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
@ -182,13 +182,13 @@ function replayEvents(events: WorkflowEvent[]): void {
|
|||
const sliceId = p["sliceId"] as string;
|
||||
const taskId = p["taskId"] as string;
|
||||
if (milestoneId && sliceId && taskId) {
|
||||
const adapter = _getAdapter();
|
||||
if (adapter) {
|
||||
adapter.prepare(
|
||||
`INSERT OR IGNORE INTO tasks (milestone_id, slice_id, id, title, status, created_at)
|
||||
VALUES (:mid, :sid, :tid, :title, 'pending', :ts)`,
|
||||
).run({ ":mid": milestoneId, ":sid": sliceId, ":tid": taskId, ":title": (p["title"] as string) ?? taskId, ":ts": event.ts });
|
||||
}
|
||||
insertOrIgnoreTask({
|
||||
milestoneId,
|
||||
sliceId,
|
||||
taskId,
|
||||
title: (p["title"] as string) ?? taskId,
|
||||
createdAt: event.ts,
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue