fix: prevent stale state loop on auto-mode restart with existing worktree (#759)

Two compounding bugs caused auto-mode to loop infinitely after stopping
and restarting when a worktree with committed progress existed:

Bug 1: copyPlanningArtifacts overwrites worktree state on restart

When auto-mode restarts and the milestone branch exists (worktree dir was
removed but branch preserved), createAutoWorktree re-attaches the worktree
to the existing branch — git correctly checks out the committed state with
[x] checkboxes. But then copyPlanningArtifacts unconditionally copies the
project root's .gsd/milestones/ into the worktree, overwriting the correct
[x] with stale [ ] from the root (which isn't always fully synced).

Fix: Skip copyPlanningArtifacts when branchExists is true. The branch
checkout already has the correct artifacts from committed work.

Bug 2: deriveState reads stale content from SQLite DB

deriveState had a DB-first content loading path that read artifact content
from the SQLite artifacts table. This table was populated once during
migrateFromMarkdown and never updated when files changed on disk (roadmap
checkbox updates, plan changes, etc.). Even after fixing files on disk,
deriveState returned stale DB content, keeping the state machine stuck.

Fix: Remove the DB content loading path from deriveState entirely. The
native Rust batch parser (nativeBatchParseGsdFiles) reads all .md files
in one call and is fast enough. The DB is still used for structured queries
(decisions, requirements) but no longer as a content cache for state
derivation.

Updated derive-state-db.test.ts Test 5 to write requirements to disk
instead of testing the now-removed DB-only content path.
This commit is contained in:
Tom Boucher 2026-03-16 22:20:30 -04:00
parent ebde7501dd
commit aafd254f45
3 changed files with 23 additions and 42 deletions

View file

@ -158,7 +158,15 @@ export function createAutoWorktree(basePath: string, milestoneId: string): strin
// Planning artifacts may be untracked if the project's .gitignore had a
// blanket .gsd/ rule (pre-v2.14.0). Without this copy, auto-mode loops
// on plan-slice because the plan file doesn't exist in the worktree.
copyPlanningArtifacts(basePath, info.path);
//
// IMPORTANT: Skip when re-attaching to an existing branch (#759).
// The branch checkout already has committed artifacts with correct state
// (e.g. [x] for completed slices). Copying from the project root would
// overwrite them with stale data ([ ] checkboxes) because the root is
// not always fully synced.
if (!branchExists) {
copyPlanningArtifacts(basePath, info.path);
}
// Run user-configured post-create hook (#597) — e.g. copy .env, symlink assets
const hookError = runWorktreePostCreateHook(basePath, info.path);

View file

@ -32,7 +32,6 @@ import {
import { milestoneIdSort, findMilestoneIds } from './guided-flow.js';
import { nativeBatchParseGsdFiles, type BatchParsedFile } from './native-parser-bridge.js';
import { isDbAvailable, _getAdapter } from './gsd-db.js';
import { join, resolve } from 'path';
import { debugCount, debugTime } from './debug-logger.js';
@ -149,30 +148,12 @@ async function _deriveStateImpl(basePath: string): Promise<GSDState> {
const fileContentCache = new Map<string, string>();
const gsdDir = gsdRoot(basePath);
// ── DB-first content loading ──
// When the DB is available, load artifact content from the artifacts table
// (indexed SELECT instead of O(N) file I/O). Falls back to native Rust batch
// parser, which in turn falls back to sequential JS reads via cachedLoadFile.
let dbContentLoaded = false;
if (isDbAvailable()) {
const adapter = _getAdapter();
if (adapter) {
try {
const rows = adapter.prepare('SELECT path, full_content FROM artifacts').all();
for (const row of rows) {
const relPath = (row as Record<string, unknown>)['path'] as string;
const content = (row as Record<string, unknown>)['full_content'] as string;
const absPath = resolve(gsdDir, relPath);
fileContentCache.set(absPath, content);
}
dbContentLoaded = rows.length > 0;
} catch {
// DB query failed — fall through to native batch parse
}
}
}
if (!dbContentLoaded) {
// NOTE: We intentionally do NOT load from the SQLite DB here (#759).
// The DB's artifacts table is populated once during migrateFromMarkdown
// and is never updated when files change on disk (e.g. roadmap [x] updates,
// plan checkbox changes). Using stale DB content causes deriveState to
// return incorrect phase/slice state, leading to infinite skip loops.
// The native Rust batch parser is fast enough for state derivation.
const batchFiles = nativeBatchParseGsdFiles(gsdDir);
if (batchFiles) {
for (const f of batchFiles) {
@ -180,7 +161,6 @@ async function _deriveStateImpl(basePath: string): Promise<GSDState> {
fileContentCache.set(absPath, f.rawContent);
}
}
}
/**
* Load file content from batch cache first, falling back to disk read.

View file

@ -248,31 +248,24 @@ async function main(): Promise<void> {
}
}
// ─── Test 5: Requirements counting from DB content ───────────────────
console.log('\n=== derive-state-db: requirements from DB content ===');
// ─── Test 5: Requirements counting from disk (DB no longer used for content)
console.log('\n=== derive-state-db: requirements from disk content ===');
{
const base = createFixtureBase();
try {
// Write minimal milestone dir (needed for milestone discovery)
mkdirSync(join(base, '.gsd', 'milestones', 'M001'), { recursive: true });
// Do NOT write REQUIREMENTS.md to disk — only in DB
openDatabase(':memory:');
insertArtifactRow('REQUIREMENTS.md', REQUIREMENTS_CONTENT, {
artifact_type: 'requirements',
});
// Write REQUIREMENTS.md to disk (DB content is no longer used by deriveState)
writeFile(base, 'REQUIREMENTS.md', REQUIREMENTS_CONTENT);
invalidateStateCache();
const state = await deriveState(base);
// Requirements should come from DB
assertEq(state.requirements?.active, 2, 'req-from-db: requirements.active = 2');
assertEq(state.requirements?.validated, 1, 'req-from-db: requirements.validated = 1');
assertEq(state.requirements?.total, 3, 'req-from-db: requirements.total = 3');
closeDatabase();
// Requirements should come from disk
assertEq(state.requirements?.active, 2, 'req-from-disk: requirements.active = 2');
assertEq(state.requirements?.validated, 1, 'req-from-disk: requirements.validated = 1');
assertEq(state.requirements?.total, 3, 'req-from-disk: requirements.total = 3');
} finally {
closeDatabase();
cleanup(base);
}
}