fix(gsd): remove stale observability validator + fix greenfield worktree check

The observability validator checked for markdown headings (## Observability / Diagnostics,
## Observability Impact) that the DB-backed renderer never produces, causing false-positive
warnings on every dispatch. Removed entirely — the DB schema enforces structure at write time.

The worktree health check blocked execution in directories without recognized project files
(package.json, Cargo.toml, etc.), preventing greenfield projects from scaffolding. Downgraded
to a warning — .git check remains as the hard gate.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Lex Christopherson 2026-03-24 07:27:48 -06:00
parent d3173d6512
commit 7ca3ce04a4
15 changed files with 47 additions and 1238 deletions

View file

@ -482,7 +482,6 @@
| gsd/auto-loop.ts | Auto Engine, State Machine | Execution loop state and cycle management |
| gsd/auto-supervisor.ts | Auto Engine | Supervision and oversight of autonomous runs |
| gsd/auto-budget.ts | Auto Engine | Token/cost budgeting and tracking |
| gsd/auto-observability.ts | Auto Engine | Observability hooks and telemetry |
| gsd/auto-tool-tracking.ts | Auto Engine | Tool usage instrumentation |
| gsd/doctor.ts | Doctor/Diagnostics | Health check and system diagnostics |
| gsd/doctor-checks.ts | Doctor/Diagnostics | Individual diagnostic checks |
@ -978,7 +977,7 @@ Quick lookup: which files are part of each system?
| **Config** | src/app-paths.ts, src/models-resolver.ts, src/remote-questions-config.ts, src/wizard.ts, core/defaults.ts, core/constants.ts, config.ts |
| **Context7** | src/resources/extensions/context7/index.ts |
| **Doctor / Diagnostics** | gsd/doctor*.ts, gsd/collision-diagnostics.ts, core/diagnostics.ts, web/lib/diagnostics-types.ts, web/app/api/doctor/*, forensics/* |
| **Event System** | pi-coding-agent/src/core/event-bus.ts, gsd/auto-observability.ts |
| **Event System** | pi-coding-agent/src/core/event-bus.ts |
| **Extension Registry** | src/extension-discovery.ts, src/extension-registry.ts, src/bundled-extension-paths.ts |
| **Extensions** | pi-coding-agent/src/core/extensions/*, src/resource-loader.ts |
| **File Search** | native/crates/engine/src/grep.rs, glob.rs, fd.rs, fs_cache.rs, packages/native/src/grep/*, fd/*, core/tools/grep.ts, find.ts |

View file

@ -1,74 +0,0 @@
/**
* Pre-dispatch observability checks for auto-mode units.
* Validates plan/summary file quality and builds repair instructions
* for the agent to fix gaps before proceeding with the unit.
*/
import type { ExtensionContext } from "@gsd/pi-coding-agent";
import {
validatePlanBoundary,
validateExecuteBoundary,
validateCompleteBoundary,
formatValidationIssues,
} from "./observability-validator.js";
import type { ValidationIssue } from "./observability-validator.js";
export async function collectObservabilityWarnings(
ctx: ExtensionContext,
basePath: string,
unitType: string,
unitId: string,
): Promise<ValidationIssue[]> {
// Hook units have custom artifacts — skip standard observability checks
if (unitType.startsWith("hook/")) return [];
const parts = unitId.split("/");
const mid = parts[0];
const sid = parts[1];
const tid = parts[2];
if (!mid || !sid) return [];
let issues = [] as Awaited<ReturnType<typeof validatePlanBoundary>>;
if (unitType === "plan-slice") {
issues = await validatePlanBoundary(basePath, mid, sid);
} else if (unitType === "execute-task" && tid) {
issues = await validateExecuteBoundary(basePath, mid, sid, tid);
} else if (unitType === "complete-slice") {
issues = await validateCompleteBoundary(basePath, mid, sid);
}
if (issues.length > 0) {
ctx.ui.notify(
`Observability check (${unitType}) found ${issues.length} warning${issues.length === 1 ? "" : "s"}:\n${formatValidationIssues(issues)}`,
"warning",
);
}
return issues;
}
export function buildObservabilityRepairBlock(issues: ValidationIssue[]): string {
if (issues.length === 0) return "";
const items = issues.map(issue => {
const fileName = issue.file.split("/").pop() || issue.file;
let line = `- **${fileName}**: ${issue.message}`;
if (issue.suggestion) line += `${issue.suggestion}`;
return line;
});
return [
"",
"---",
"",
"## Pre-flight: Observability gaps to fix FIRST",
"",
"The following issues were detected in plan/summary files for this unit.",
"**Read each flagged file, apply the fix described, then proceed with the unit.**",
"",
...items,
"",
"---",
"",
].join("\n");
}

View file

@ -79,10 +79,6 @@ import {
getOldestInFlightToolStart,
clearInFlightTools,
} from "./auto-tool-tracking.js";
import {
collectObservabilityWarnings as _collectObservabilityWarnings,
buildObservabilityRepairBlock,
} from "./auto-observability.js";
import { closeoutUnit } from "./auto-unit-closeout.js";
import { recoverTimedOutUnit } from "./auto-timeout-recovery.js";
import { selfHealRuntimeRecords } from "./auto-recovery.js";
@ -961,9 +957,6 @@ function buildLoopDeps(): LoopDeps {
runPreDispatchHooks,
getPriorSliceCompletionBlocker,
getMainBranch,
collectObservabilityWarnings: _collectObservabilityWarnings,
buildObservabilityRepairBlock,
// Unit closeout + runtime records
closeoutUnit,
verifyExpectedArtifact,

View file

@ -171,14 +171,6 @@ export interface LoopDeps {
unitId: string,
) => string | null;
getMainBranch: (basePath: string) => string;
collectObservabilityWarnings: (
ctx: ExtensionContext,
basePath: string,
unitType: string,
unitId: string,
) => Promise<unknown[]>;
buildObservabilityRepairBlock: (issues: unknown[]) => string | null;
// Unit closeout + runtime records
closeoutUnit: (
ctx: ExtensionContext,

View file

@ -161,7 +161,6 @@ export async function autoLoop(
prompt: step.prompt,
finalPrompt: step.prompt,
pauseAfterUatDispatch: false,
observabilityIssues: [],
state: gsdState,
mid: s.currentMilestoneId ?? "workflow",
midTitle: "Workflow",
@ -234,7 +233,6 @@ export async function autoLoop(
prompt: sidecarItem.prompt,
finalPrompt: sidecarItem.prompt,
pauseAfterUatDispatch: false,
observabilityIssues: [],
state: sidecarState,
mid: sidecarState.activeMilestone?.id,
midTitle: sidecarState.activeMilestone?.title,

View file

@ -637,18 +637,11 @@ export async function runDispatch(
return { action: "break", reason: "prior-slice-blocker" };
}
const observabilityIssues = await deps.collectObservabilityWarnings(
ctx,
s.basePath,
unitType,
unitId,
);
return {
action: "next",
data: {
unitType, unitId, prompt, finalPrompt: prompt,
pauseAfterUatDispatch, observabilityIssues,
pauseAfterUatDispatch,
state, mid, midTitle,
isRetry: false, previousTier: undefined,
hookModelOverride: preDispatchResult.model,
@ -809,7 +802,7 @@ export async function runUnitPhase(
sidecarItem?: SidecarItem,
): Promise<PhaseResult<{ unitStartedAt: number }>> {
const { ctx, pi, s, deps, prefs } = ic;
const { unitType, unitId, prompt, observabilityIssues, state, mid } = iterData;
const { unitType, unitId, prompt, state, mid } = iterData;
debugLog("autoLoop", {
phase: "unit-execution",
@ -837,11 +830,11 @@ export async function runUnitPhase(
const hasProjectFile = PROJECT_FILES.some((f) => deps.existsSync(join(s.basePath, f)));
const hasSrcDir = deps.existsSync(join(s.basePath, "src"));
if (!hasProjectFile && !hasSrcDir) {
const msg = `Worktree health check failed: ${s.basePath} has no recognized project files — refusing to dispatch ${unitType} ${unitId}`;
debugLog("runUnitPhase", { phase: "worktree-health-fail", basePath: s.basePath, hasProjectFile, hasSrcDir });
ctx.ui.notify(msg, "error");
await deps.stopAuto(ctx, pi, msg);
return { action: "break", reason: "worktree-invalid" };
// Greenfield projects won't have project files yet — the first task creates them.
// Log a warning but allow execution to proceed. The .git check above is sufficient
// to ensure we're in a valid working directory.
debugLog("runUnitPhase", { phase: "worktree-health-warn-greenfield", basePath: s.basePath, hasProjectFile, hasSrcDir });
ctx.ui.notify(`Warning: ${s.basePath} has no recognized project files — proceeding as greenfield project`, "warn");
}
}
@ -914,12 +907,6 @@ export async function runUnitPhase(
}
}
const repairBlock =
deps.buildObservabilityRepairBlock(observabilityIssues);
if (repairBlock) {
finalPrompt = `${finalPrompt}${repairBlock}`;
}
// Prompt char measurement
s.lastPromptCharCount = finalPrompt.length;
s.lastBaselineCharCount = undefined;

View file

@ -92,7 +92,6 @@ export interface IterationData {
prompt: string;
finalPrompt: string;
pauseAfterUatDispatch: boolean;
observabilityIssues: unknown[];
state: GSDState;
mid: string | undefined;
midTitle: string | undefined;

View file

@ -1,456 +0,0 @@
import { loadFile } from "./files.js";
import { resolveSliceFile, resolveTaskFile, resolveTasksDir, resolveTaskFiles } from "./paths.js";
export interface ValidationIssue {
severity: "info" | "warning" | "error";
scope: "slice-plan" | "task-plan" | "task-summary" | "slice-summary";
file: string;
ruleId: string;
message: string;
suggestion?: string;
}
function getSection(content: string, heading: string, level: number = 2): string | null {
const prefix = "#".repeat(level) + " ";
const escaped = heading.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
const regex = new RegExp(`^${prefix}${escaped}\\s*$`, "m");
const match = regex.exec(content);
if (!match) return null;
const start = match.index + match[0].length;
const rest = content.slice(start);
const nextHeading = rest.match(new RegExp(`^#{1,${level}} `, "m"));
const end = nextHeading ? nextHeading.index! : rest.length;
return rest.slice(0, end).trim();
}
function getFrontmatter(content: string): string | null {
const trimmed = content.trimStart();
if (!trimmed.startsWith("---")) return null;
const afterFirst = trimmed.indexOf("\n");
if (afterFirst === -1) return null;
const rest = trimmed.slice(afterFirst + 1);
const endIdx = rest.indexOf("\n---");
if (endIdx === -1) return null;
return rest.slice(0, endIdx);
}
function hasFrontmatterKey(content: string, key: string): boolean {
const fm = getFrontmatter(content);
if (!fm) return false;
return new RegExp(`^${key}:`, "m").test(fm);
}
function normalizeMeaningfulLines(text: string): string[] {
return text
.split("\n")
.map(line => line.trim())
.filter(line => line.length > 0)
.filter(line => !line.startsWith("<!--"))
.filter(line => !line.endsWith("-->"))
.filter(line => !/^[-*]\s*\{\{.+\}\}$/.test(line))
.filter(line => !/^\{\{.+\}\}$/.test(line));
}
function sectionLooksPlaceholderOnly(text: string | null): boolean {
if (!text) return true;
const lines = normalizeMeaningfulLines(text)
.map(line => line.replace(/^[-*]\s+/, "").trim())
.filter(line => line.length > 0);
if (lines.length === 0) return true;
return lines.every(line => {
const lower = line.toLowerCase();
return lower === "none" ||
lower.endsWith(": none") ||
lower.includes("{{") ||
lower.includes("}}") ||
lower.startsWith("required for non-trivial") ||
lower.startsWith("describe how a future agent") ||
lower.startsWith("prefer:") ||
lower.startsWith("keep this section concise");
});
}
function textSuggestsObservabilityRelevant(content: string): boolean {
const lower = content.toLowerCase();
const needles = [
" api", "route", "server", "worker", "queue", "job", "sync", "import",
"webhook", "auth", "db", "database", "migration", "cache", "background",
"polling", "realtime", "socket", "stateful", "integration", "ui", "form",
"submit", "status", "service", "pipeline", "health endpoint", "error path"
];
return needles.some(needle => lower.includes(needle));
}
function verificationMentionsDiagnostics(section: string | null): boolean {
if (!section) return false;
const lower = section.toLowerCase();
const needles = [
"error", "failure", "diagnostic", "status", "health", "inspect", "log",
"network", "console", "retry", "last error", "correlation", "readiness"
];
return needles.some(needle => lower.includes(needle));
}
export function validateSlicePlanContent(file: string, content: string): ValidationIssue[] {
const issues: ValidationIssue[] = [];
// ── Plan quality rules (always run, not gated by runtime relevance) ──
const tasksSection = getSection(content, "Tasks", 2);
if (tasksSection) {
const lines = tasksSection.split("\n");
const taskLinePattern = /^- \[[ x]\] \*\*T\d+:/;
const taskLineIndices: number[] = [];
for (let i = 0; i < lines.length; i++) {
if (taskLinePattern.test(lines[i])) taskLineIndices.push(i);
}
for (let t = 0; t < taskLineIndices.length; t++) {
const start = taskLineIndices[t];
const end = t + 1 < taskLineIndices.length ? taskLineIndices[t + 1] : lines.length;
// Check lines between this task header and the next (or section end)
const bodyLines = lines.slice(start + 1, end);
const meaningful = bodyLines.filter(l => l.trim().length > 0);
if (meaningful.length === 0) {
issues.push({
severity: "warning",
scope: "slice-plan",
file,
ruleId: "empty_task_entry",
message: "Inline task entry has no description content beneath the checkbox line.",
suggestion: "Add at least a Why/Files/Do/Verify summary so the task is self-describing.",
});
}
}
}
// ── Observability rules (gated by runtime relevance) ──
const relevant = textSuggestsObservabilityRelevant(content);
if (!relevant) return issues;
const obs = getSection(content, "Observability / Diagnostics", 2);
const verification = getSection(content, "Verification", 2);
if (!obs) {
issues.push({
severity: "warning",
scope: "slice-plan",
file,
ruleId: "missing_observability_section",
message: "Slice plan appears non-trivial but is missing `## Observability / Diagnostics`.",
suggestion: "Add runtime signals, inspection surfaces, failure visibility, and redaction constraints.",
});
} else if (sectionLooksPlaceholderOnly(obs)) {
issues.push({
severity: "warning",
scope: "slice-plan",
file,
ruleId: "observability_section_placeholder_only",
message: "Slice plan has `## Observability / Diagnostics` but it still looks like placeholder text.",
suggestion: "Replace placeholders with concrete signals and inspection surfaces a future agent should trust.",
});
}
if (!verificationMentionsDiagnostics(verification)) {
issues.push({
severity: "warning",
scope: "slice-plan",
file,
ruleId: "verification_missing_diagnostic_check",
message: "Slice verification does not appear to include any diagnostic or failure-path check.",
suggestion: "Add at least one verification step for inspectable failure state, structured error output, status surface, or equivalent.",
});
}
return issues;
}
export function validateTaskPlanContent(file: string, content: string): ValidationIssue[] {
const issues: ValidationIssue[] = [];
// ── Plan quality rules (always run, not gated by runtime relevance) ──
// Rule: empty or missing Steps section
const stepsSection = getSection(content, "Steps", 2);
if (stepsSection === null || sectionLooksPlaceholderOnly(stepsSection)) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "empty_steps_section",
message: "Task plan has an empty or missing `## Steps` section.",
suggestion: "Add concrete numbered implementation steps so execution has a clear sequence.",
});
}
// Rule: placeholder-only Verification section
const verificationSection = getSection(content, "Verification", 2);
if (verificationSection !== null && sectionLooksPlaceholderOnly(verificationSection)) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "placeholder_verification",
message: "Task plan has `## Verification` but it still looks like placeholder text.",
suggestion: "Replace placeholders with concrete verification commands, test runs, or observable checks.",
});
}
// Rule: scope estimate thresholds
const fm = getFrontmatter(content);
if (fm) {
const stepsMatch = fm.match(/^estimated_steps:\s*(\d+)/m);
const filesMatch = fm.match(/^estimated_files:\s*(\d+)/m);
if (stepsMatch) {
const estimatedSteps = parseInt(stepsMatch[1], 10);
if (estimatedSteps >= 10) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "scope_estimate_steps_high",
message: `Task plan estimates ${estimatedSteps} steps (threshold: 10). Consider splitting into smaller tasks.`,
suggestion: "Break the task into sub-tasks or reduce scope so each task stays focused and completable in one pass.",
});
}
}
if (filesMatch) {
const estimatedFiles = parseInt(filesMatch[1], 10);
if (estimatedFiles >= 12) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "scope_estimate_files_high",
message: `Task plan estimates ${estimatedFiles} files (threshold: 12). Consider splitting into smaller tasks.`,
suggestion: "Break the task into sub-tasks or reduce scope to keep the change footprint manageable.",
});
}
}
}
// Rule: Inputs and Expected Output should contain backtick-wrapped file paths
const inputsSection = getSection(content, "Inputs", 2);
const outputSection = getSection(content, "Expected Output", 2);
const backtickPathPattern = /`[^`]*[./][^`]*`/;
if (outputSection === null || !backtickPathPattern.test(outputSection)) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "missing_output_file_paths",
message: "Task plan `## Expected Output` is missing or has no backtick-wrapped file paths.",
suggestion: "List concrete output file paths in backticks (e.g. `src/types.ts`). These are machine-parsed to derive task dependencies.",
});
}
if (inputsSection !== null && inputsSection.trim().length > 0 && !backtickPathPattern.test(inputsSection)) {
issues.push({
severity: "info",
scope: "task-plan",
file,
ruleId: "missing_input_file_paths",
message: "Task plan `## Inputs` has content but no backtick-wrapped file paths.",
suggestion: "List input file paths in backticks (e.g. `src/config.json`). These are machine-parsed to derive task dependencies.",
});
}
// ── Observability rules (gated by runtime relevance) ──
const relevant = textSuggestsObservabilityRelevant(content);
if (!relevant) return issues;
const obs = getSection(content, "Observability Impact", 2);
if (!obs) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "missing_observability_impact",
message: "Task plan appears runtime-relevant but is missing `## Observability Impact`.",
suggestion: "Explain what signals change, how a future agent inspects this task, and what failure state becomes visible.",
});
} else if (sectionLooksPlaceholderOnly(obs)) {
issues.push({
severity: "warning",
scope: "task-plan",
file,
ruleId: "observability_impact_placeholder_only",
message: "Task plan has `## Observability Impact` but it still looks empty or placeholder-only.",
suggestion: "Fill in concrete inspection surfaces or explicitly justify why observability is not applicable.",
});
}
return issues;
}
export function validateTaskSummaryContent(file: string, content: string): ValidationIssue[] {
const issues: ValidationIssue[] = [];
if (!hasFrontmatterKey(content, "observability_surfaces")) {
issues.push({
severity: "warning",
scope: "task-summary",
file,
ruleId: "missing_observability_frontmatter",
message: "Task summary is missing `observability_surfaces` in frontmatter.",
suggestion: "List the durable status/log/error surfaces a future agent should use.",
});
}
const diagnostics = getSection(content, "Diagnostics", 2);
if (!diagnostics) {
issues.push({
severity: "warning",
scope: "task-summary",
file,
ruleId: "missing_diagnostics_section",
message: "Task summary is missing `## Diagnostics`.",
suggestion: "Document how to inspect what this task built later.",
});
} else if (sectionLooksPlaceholderOnly(diagnostics)) {
issues.push({
severity: "warning",
scope: "task-summary",
file,
ruleId: "diagnostics_placeholder_only",
message: "Task summary diagnostics section still looks like placeholder text.",
suggestion: "Replace placeholders with concrete commands, endpoints, logs, error shapes, or failure artifacts.",
});
}
const evidence = getSection(content, "Verification Evidence", 2);
if (!evidence) {
issues.push({
severity: "warning",
scope: "task-summary",
file,
ruleId: "evidence_block_missing",
message: "Task summary is missing `## Verification Evidence`.",
suggestion: "Add a verification evidence table showing gate check results (command, exit code, verdict, duration).",
});
} else if (sectionLooksPlaceholderOnly(evidence)) {
issues.push({
severity: "warning",
scope: "task-summary",
file,
ruleId: "evidence_block_placeholder",
message: "Task summary verification evidence section still looks like placeholder text.",
suggestion: "Replace placeholders with actual gate results or note that no verification commands were discovered.",
});
}
return issues;
}
export function validateSliceSummaryContent(file: string, content: string): ValidationIssue[] {
const issues: ValidationIssue[] = [];
if (!hasFrontmatterKey(content, "observability_surfaces")) {
issues.push({
severity: "warning",
scope: "slice-summary",
file,
ruleId: "missing_observability_frontmatter",
message: "Slice summary is missing `observability_surfaces` in frontmatter.",
suggestion: "List the authoritative diagnostics and durable inspection surfaces for this slice.",
});
}
const diagnostics = getSection(content, "Authoritative diagnostics", 3);
if (!diagnostics) {
issues.push({
severity: "warning",
scope: "slice-summary",
file,
ruleId: "missing_authoritative_diagnostics",
message: "Slice summary is missing `### Authoritative diagnostics` in Forward Intelligence.",
suggestion: "Tell future agents where to look first and why that signal is trustworthy.",
});
} else if (sectionLooksPlaceholderOnly(diagnostics)) {
issues.push({
severity: "warning",
scope: "slice-summary",
file,
ruleId: "authoritative_diagnostics_placeholder_only",
message: "Slice summary includes authoritative diagnostics but it still looks like placeholder text.",
suggestion: "Replace placeholders with the real first-stop diagnostic surface for this slice.",
});
}
return issues;
}
export async function validatePlanBoundary(basePath: string, milestoneId: string, sliceId: string): Promise<ValidationIssue[]> {
const issues: ValidationIssue[] = [];
const slicePlan = resolveSliceFile(basePath, milestoneId, sliceId, "PLAN");
if (slicePlan) {
const content = await loadFile(slicePlan);
if (content) issues.push(...validateSlicePlanContent(slicePlan, content));
}
const tasksDir = resolveTasksDir(basePath, milestoneId, sliceId);
const taskPlans = tasksDir ? resolveTaskFiles(tasksDir, "PLAN") : [];
for (const file of taskPlans) {
const taskId = file.split("-")[0];
const taskPlan = resolveTaskFile(basePath, milestoneId, sliceId, taskId, "PLAN");
if (!taskPlan) continue;
const content = await loadFile(taskPlan);
if (content) issues.push(...validateTaskPlanContent(taskPlan, content));
}
return issues;
}
export async function validateExecuteBoundary(basePath: string, milestoneId: string, sliceId: string, taskId: string): Promise<ValidationIssue[]> {
const issues: ValidationIssue[] = [];
const slicePlan = resolveSliceFile(basePath, milestoneId, sliceId, "PLAN");
if (slicePlan) {
const content = await loadFile(slicePlan);
if (content) issues.push(...validateSlicePlanContent(slicePlan, content));
}
const taskPlan = resolveTaskFile(basePath, milestoneId, sliceId, taskId, "PLAN");
if (taskPlan) {
const content = await loadFile(taskPlan);
if (content) issues.push(...validateTaskPlanContent(taskPlan, content));
}
return issues;
}
export async function validateCompleteBoundary(basePath: string, milestoneId: string, sliceId: string): Promise<ValidationIssue[]> {
const issues: ValidationIssue[] = [];
const tasksDir = resolveTasksDir(basePath, milestoneId, sliceId);
const taskSummaries = tasksDir ? resolveTaskFiles(tasksDir, "SUMMARY") : [];
for (const file of taskSummaries) {
const taskId = file.split("-")[0];
const taskSummary = resolveTaskFile(basePath, milestoneId, sliceId, taskId, "SUMMARY");
if (!taskSummary) continue;
const content = await loadFile(taskSummary);
if (content) issues.push(...validateTaskSummaryContent(taskSummary, content));
}
const sliceSummary = resolveSliceFile(basePath, milestoneId, sliceId, "SUMMARY");
if (sliceSummary) {
const content = await loadFile(sliceSummary);
if (content) issues.push(...validateSliceSummaryContent(sliceSummary, content));
}
return issues;
}
export function formatValidationIssues(issues: ValidationIssue[], limit: number = 4): string {
if (issues.length === 0) return "";
const lines = issues.slice(0, limit).map(issue => {
const fileName = issue.file.split("/").pop() || issue.file;
return `- ${fileName}: ${issue.message}`;
});
if (issues.length > limit) lines.push(`- ...and ${issues.length - limit} more`);
return lines.join("\n");
}

View file

@ -366,8 +366,6 @@ function makeMockDeps(
runPreDispatchHooks: () => ({ firedHooks: [], action: "proceed" }),
getPriorSliceCompletionBlocker: () => null,
getMainBranch: () => "main",
collectObservabilityWarnings: async () => [],
buildObservabilityRepairBlock: () => null,
closeoutUnit: async () => {},
verifyExpectedArtifact: () => true,
clearUnitRuntimeRecord: () => {},
@ -2069,7 +2067,7 @@ test("autoLoop stops when worktree has no .git for execute-task (#1833)", async
);
});
test("autoLoop stops when worktree has no project files for execute-task (#1833)", async () => {
test("autoLoop warns but proceeds for greenfield project (no project files) (#1833)", async () => {
_resetPendingResolve();
const ctx = makeMockCtx();
@ -2078,10 +2076,17 @@ test("autoLoop stops when worktree has no project files for execute-task (#1833)
const pi = makeMockPi();
const notifications: string[] = [];
ctx.ui.notify = (msg: string) => { notifications.push(msg); };
const s = makeLoopSession({ basePath: "/tmp/empty-worktree" });
ctx.ui.notify = (msg: string) => {
notifications.push(msg);
// Terminate the loop after the greenfield warning fires,
// so we don't hang waiting for dispatch resolution.
if (msg.includes("greenfield")) {
s.active = false;
}
};
const deps = makeMockDeps({
deriveState: async () => {
deps.callLog.push("deriveState");
@ -2100,15 +2105,19 @@ test("autoLoop stops when worktree has no project files for execute-task (#1833)
await autoLoop(ctx, pi, s, deps);
assert.ok(
deps.callLog.includes("stopAuto"),
"should stop auto-mode when worktree has no project files",
);
const healthNotification = notifications.find(
(n) => n.includes("Worktree health check failed") && n.includes("no recognized project files"),
// Should NOT have stopped auto-mode due to health check — greenfield is allowed
const stoppedForHealth = notifications.find(
(n) => n.includes("Worktree health check failed"),
);
assert.ok(
healthNotification,
"should notify about missing project files in worktree",
!stoppedForHealth,
"should not stop with health check failure for greenfield project",
);
const greenfieldWarning = notifications.find(
(n) => n.includes("no recognized project files") && n.includes("greenfield"),
);
assert.ok(
greenfieldWarning,
"should warn about greenfield project (no project files)",
);
});

View file

@ -194,8 +194,6 @@ function makeMockDeps(overrides?: Partial<LoopDeps>): LoopDeps & { callLog: stri
runPreDispatchHooks: () => ({ firedHooks: [], action: "proceed" }),
getPriorSliceCompletionBlocker: () => null,
getMainBranch: () => "main",
collectObservabilityWarnings: async () => [],
buildObservabilityRepairBlock: () => null,
closeoutUnit: async () => {},
verifyExpectedArtifact: () => true,
clearUnitRuntimeRecord: () => {},

View file

@ -91,8 +91,6 @@ function makeMockDeps(
runPreDispatchHooks: () => ({ firedHooks: [], action: "proceed" }),
getPriorSliceCompletionBlocker: () => null,
getMainBranch: () => "main",
collectObservabilityWarnings: async () => [],
buildObservabilityRepairBlock: () => null,
closeoutUnit: async () => {},
verifyExpectedArtifact: () => true,
clearUnitRuntimeRecord: () => {},

View file

@ -1,474 +0,0 @@
import { validateTaskPlanContent, validateSlicePlanContent } from '../observability-validator.ts';
import { createTestContext } from './test-helpers.ts';
const { assertEq, assertTrue, report } = createTestContext();
// ═══════════════════════════════════════════════════════════════════════════
// validateTaskPlanContent — empty/missing Steps section
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateTaskPlanContent: empty Steps section ===');
{
const content = `# T01: Some Task
## Description
Do something useful.
## Steps
## Verification
- Run the tests and confirm output.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const stepsIssues = issues.filter(i => i.ruleId === 'empty_steps_section');
assertTrue(stepsIssues.length >= 1, 'empty Steps section produces empty_steps_section issue');
if (stepsIssues.length > 0) {
assertEq(stepsIssues[0].severity, 'warning', 'empty_steps_section severity is warning');
assertEq(stepsIssues[0].scope, 'task-plan', 'empty_steps_section scope is task-plan');
}
}
console.log('\n=== validateTaskPlanContent: missing Steps section entirely ===');
{
const content = `# T01: Some Task
## Description
Do something useful.
## Verification
- Run the tests.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const stepsIssues = issues.filter(i => i.ruleId === 'empty_steps_section');
assertTrue(stepsIssues.length >= 1, 'missing Steps section produces empty_steps_section issue');
}
// ═══════════════════════════════════════════════════════════════════════════
// validateTaskPlanContent — placeholder-only Verification
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateTaskPlanContent: placeholder-only Verification ===');
{
const content = `# T01: Some Task
## Steps
1. Do the thing.
2. Do the other thing.
## Verification
- {{placeholder verification step}}
- {{another placeholder}}
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const verifyIssues = issues.filter(i => i.ruleId === 'placeholder_verification');
assertTrue(verifyIssues.length >= 1, 'placeholder-only Verification produces placeholder_verification issue');
if (verifyIssues.length > 0) {
assertEq(verifyIssues[0].severity, 'warning', 'placeholder_verification severity is warning');
assertEq(verifyIssues[0].scope, 'task-plan', 'placeholder_verification scope is task-plan');
}
}
console.log('\n=== validateTaskPlanContent: Verification with only template text ===');
{
const content = `# T01: Some Task
## Steps
1. Do the thing.
## Verification
{{whatWasVerifiedAndHow commands run, tests passed, behavior confirmed}}
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const verifyIssues = issues.filter(i => i.ruleId === 'placeholder_verification');
assertTrue(verifyIssues.length >= 1, 'template-text-only Verification produces placeholder_verification issue');
}
// ═══════════════════════════════════════════════════════════════════════════
// validateSlicePlanContent — empty inline task entries
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateSlicePlanContent: empty inline task entries ===');
{
const content = `# S01: Some Slice
**Goal:** Build the thing.
**Demo:** It works.
## Tasks
- [ ] **T01: First Task** \`est:20m\`
- [ ] **T02: Second Task** \`est:15m\`
## Verification
- Run the tests.
`;
const issues = validateSlicePlanContent('S01-PLAN.md', content);
const emptyTaskIssues = issues.filter(i => i.ruleId === 'empty_task_entry');
assertTrue(emptyTaskIssues.length >= 1, 'task entries with no description produce empty_task_entry issue');
if (emptyTaskIssues.length > 0) {
assertEq(emptyTaskIssues[0].severity, 'warning', 'empty_task_entry severity is warning');
assertEq(emptyTaskIssues[0].scope, 'slice-plan', 'empty_task_entry scope is slice-plan');
}
}
console.log('\n=== validateSlicePlanContent: task entries with content are fine ===');
{
const content = `# S01: Some Slice
**Goal:** Build the thing.
**Demo:** It works.
## Tasks
- [ ] **T01: First Task** \`est:20m\`
- Why: Because it matters.
- Files: \`src/index.ts\`
- Do: Implement the feature.
- [ ] **T02: Second Task** \`est:15m\`
- Why: Also important.
- Do: Add tests.
## Verification
- Run the tests.
`;
const issues = validateSlicePlanContent('S01-PLAN.md', content);
const emptyTaskIssues = issues.filter(i => i.ruleId === 'empty_task_entry');
assertEq(emptyTaskIssues.length, 0, 'task entries with description content produce no empty_task_entry issues');
}
// ═══════════════════════════════════════════════════════════════════════════
// validateTaskPlanContent — scope_estimate over threshold
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateTaskPlanContent: scope_estimate over threshold ===');
{
const content = `---
estimated_steps: 12
estimated_files: 15
---
# T01: Big Task
## Steps
1. Step one.
2. Step two.
3. Step three.
## Verification
- Check it works.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const stepsOverIssues = issues.filter(i => i.ruleId === 'scope_estimate_steps_high');
const filesOverIssues = issues.filter(i => i.ruleId === 'scope_estimate_files_high');
assertTrue(stepsOverIssues.length >= 1, 'estimated_steps=12 (>=10) produces scope_estimate_steps_high issue');
assertTrue(filesOverIssues.length >= 1, 'estimated_files=15 (>=12) produces scope_estimate_files_high issue');
if (stepsOverIssues.length > 0) {
assertEq(stepsOverIssues[0].severity, 'warning', 'scope_estimate_steps_high severity is warning');
assertEq(stepsOverIssues[0].scope, 'task-plan', 'scope_estimate_steps_high scope is task-plan');
}
if (filesOverIssues.length > 0) {
assertEq(filesOverIssues[0].severity, 'warning', 'scope_estimate_files_high severity is warning');
}
}
// ═══════════════════════════════════════════════════════════════════════════
// validateTaskPlanContent — scope_estimate within limits
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateTaskPlanContent: scope_estimate within limits ===');
{
const content = `---
estimated_steps: 4
estimated_files: 6
---
# T01: Small Task
## Steps
1. Do the thing.
## Verification
- Verify it works.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const scopeIssues = issues.filter(i =>
i.ruleId === 'scope_estimate_steps_high' || i.ruleId === 'scope_estimate_files_high'
);
assertEq(scopeIssues.length, 0, 'scope_estimate within limits produces no scope issues');
}
// ═══════════════════════════════════════════════════════════════════════════
// validateTaskPlanContent — missing scope_estimate (no warning)
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateTaskPlanContent: missing scope_estimate ===');
{
const content = `# T01: No Frontmatter Task
## Steps
1. Do the thing.
## Verification
- Verify it works.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const scopeIssues = issues.filter(i =>
i.ruleId === 'scope_estimate_steps_high' || i.ruleId === 'scope_estimate_files_high'
);
assertEq(scopeIssues.length, 0, 'missing scope_estimate produces no scope issues');
}
console.log('\n=== validateTaskPlanContent: frontmatter without scope keys ===');
{
const content = `---
id: T01
parent: S01
---
# T01: Task With Other Frontmatter
## Steps
1. Do the thing.
## Verification
- Verify it works.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const scopeIssues = issues.filter(i =>
i.ruleId === 'scope_estimate_steps_high' || i.ruleId === 'scope_estimate_files_high'
);
assertEq(scopeIssues.length, 0, 'frontmatter without scope keys produces no scope issues');
}
// ═══════════════════════════════════════════════════════════════════════════
// Clean plans — no false positives
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== Clean task plan: no plan-quality issues ===');
{
const content = `---
estimated_steps: 5
estimated_files: 3
---
# T01: Well-Formed Task
## Description
A real task with real content.
## Steps
1. Read the input files.
2. Parse the configuration.
3. Transform the data.
4. Write the output.
5. Verify the results.
## Must-Haves
- [ ] Output file is valid JSON
- [ ] All input records are processed
## Verification
- Run \`node --test tests/transform.test.ts\` — all assertions pass
- Manually inspect output.json for correct structure
## Observability Impact
- Signals added/changed: structured error log on parse failure
- How a future agent inspects this: check stderr for JSON parse errors
- Failure state exposed: exit code 1 + error message on invalid input
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const planQualityIssues = issues.filter(i =>
i.ruleId === 'empty_steps_section' ||
i.ruleId === 'placeholder_verification' ||
i.ruleId === 'scope_estimate_steps_high' ||
i.ruleId === 'scope_estimate_files_high'
);
assertEq(planQualityIssues.length, 0, 'clean task plan produces no plan-quality issues');
}
console.log('\n=== Clean slice plan: no plan-quality issues ===');
{
const content = `# S01: Well-Formed Slice
**Goal:** Build a complete feature.
**Demo:** Run the test suite and see all green.
## Tasks
- [ ] **T01: Create tests** \`est:20m\`
- Why: Tests define the contract before implementation.
- Files: \`tests/feature.test.ts\`
- Do: Write comprehensive test assertions.
- Verify: Test file runs without syntax errors.
- [ ] **T02: Implement feature** \`est:30m\`
- Why: Core implementation.
- Files: \`src/feature.ts\`
- Do: Build the feature to make tests pass.
- Verify: All tests pass.
## Verification
- \`node --test tests/feature.test.ts\` — all assertions pass
- Check error output for diagnostic messages
## Observability / Diagnostics
- Runtime signals: structured error objects with error codes
- Inspection surfaces: test output shows pass/fail counts
- Failure visibility: exit code 1 on failure with descriptive message
- Redaction constraints: none
`;
const issues = validateSlicePlanContent('S01-PLAN.md', content);
const planQualityIssues = issues.filter(i => i.ruleId === 'empty_task_entry');
assertEq(planQualityIssues.length, 0, 'clean slice plan produces no empty_task_entry issues');
}
// ═══════════════════════════════════════════════════════════════════════════
// validateTaskPlanContent — missing output file paths
// ═══════════════════════════════════════════════════════════════════════════
console.log('\n=== validateTaskPlanContent: missing output file paths ===');
{
const content = `# T01: Some Task
## Description
Do something.
## Steps
1. Do the thing
## Verification
- Check it works
## Expected Output
This task produces the main output.
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const outputIssues = issues.filter(i => i.ruleId === 'missing_output_file_paths');
assertTrue(outputIssues.length >= 1, 'Expected Output without file paths triggers missing_output_file_paths');
}
console.log('\n=== validateTaskPlanContent: valid output file paths ===');
{
const content = `# T01: Some Task
## Description
Do something.
## Steps
1. Do the thing
## Verification
- Check it works
## Expected Output
- \`src/types.ts\` — New type definitions
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const outputIssues = issues.filter(i => i.ruleId === 'missing_output_file_paths');
assertEq(outputIssues.length, 0, 'Expected Output with file paths does not trigger warning');
}
console.log('\n=== validateTaskPlanContent: missing input file paths (info severity) ===');
{
const content = `# T01: Some Task
## Description
Do something.
## Steps
1. Do the thing
## Verification
- Check it works
## Inputs
Prior task summary insights about the architecture.
## Expected Output
- \`src/output.ts\` — Output file
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const inputIssues = issues.filter(i => i.ruleId === 'missing_input_file_paths');
assertTrue(inputIssues.length >= 1, 'Inputs without file paths triggers missing_input_file_paths');
if (inputIssues.length > 0) {
assertEq(inputIssues[0].severity, 'info', 'missing_input_file_paths is info severity (not warning)');
}
}
console.log('\n=== validateTaskPlanContent: no Expected Output section at all ===');
{
const content = `# T01: Some Task
## Description
Do something.
## Steps
1. Do the thing
## Verification
- Check it works
`;
const issues = validateTaskPlanContent('T01-PLAN.md', content);
const outputIssues = issues.filter(i => i.ruleId === 'missing_output_file_paths');
assertTrue(outputIssues.length >= 1, 'Missing Expected Output section triggers missing_output_file_paths');
}
report();

View file

@ -240,148 +240,6 @@ test("verification-evidence: formatEvidenceTable uses ✅/❌ emoji for pass/fai
assert.ok(table.includes("❌ fail"), "failing check should have ❌ fail");
});
// ─── Validator Rule Tests (T03) ──────────────────────────────────────────────
import { validateTaskSummaryContent } from "../observability-validator.ts";
const MINIMAL_SUMMARY_WITH_EVIDENCE = `---
observability_surfaces:
- gate-output
---
# T03 Summary
## Diagnostics
Run \`npm test\` to verify.
## Verification Evidence
| # | Command | Exit Code | Verdict | Duration |
|---|---------|-----------|---------|----------|
| 1 | npm run typecheck | 0 | pass | 2.3s |
`;
const MINIMAL_SUMMARY_NO_EVIDENCE = `---
observability_surfaces:
- gate-output
---
# T03 Summary
## Diagnostics
Run \`npm test\` to verify.
`;
const MINIMAL_SUMMARY_PLACEHOLDER_EVIDENCE = `---
observability_surfaces:
- gate-output
---
# T03 Summary
## Diagnostics
Run \`npm test\` to verify.
## Verification Evidence
{{evidence_table}}
`;
const MINIMAL_SUMMARY_NO_CHECKS_EVIDENCE = `---
observability_surfaces:
- gate-output
---
# T03 Summary
## Diagnostics
Run \`npm test\` to verify.
## Verification Evidence
_No verification checks discovered._
`;
test("verification-evidence: validator accepts summary with real evidence table", () => {
const issues = validateTaskSummaryContent("T03-SUMMARY.md", MINIMAL_SUMMARY_WITH_EVIDENCE);
const evidenceIssues = issues.filter(
(i) => i.ruleId === "evidence_block_missing" || i.ruleId === "evidence_block_placeholder",
);
assert.equal(evidenceIssues.length, 0, "no evidence warnings for real table");
});
test("verification-evidence: validator warns when evidence section is missing", () => {
const issues = validateTaskSummaryContent("T03-SUMMARY.md", MINIMAL_SUMMARY_NO_EVIDENCE);
const match = issues.find((i) => i.ruleId === "evidence_block_missing");
assert.ok(match, "should produce evidence_block_missing warning");
assert.equal(match!.severity, "warning");
assert.equal(match!.scope, "task-summary");
});
test("verification-evidence: validator warns when evidence section has only placeholder text", () => {
const issues = validateTaskSummaryContent("T03-SUMMARY.md", MINIMAL_SUMMARY_PLACEHOLDER_EVIDENCE);
const match = issues.find((i) => i.ruleId === "evidence_block_placeholder");
assert.ok(match, "should produce evidence_block_placeholder warning");
assert.equal(match!.severity, "warning");
});
test("verification-evidence: validator accepts 'no checks discovered' as valid content", () => {
const issues = validateTaskSummaryContent("T03-SUMMARY.md", MINIMAL_SUMMARY_NO_CHECKS_EVIDENCE);
const evidenceIssues = issues.filter(
(i) => i.ruleId === "evidence_block_missing" || i.ruleId === "evidence_block_placeholder",
);
assert.equal(evidenceIssues.length, 0, "no evidence warnings for 'no checks discovered'");
});
// ─── Integration Test: Full Chain (T03) ──────────────────────────────────────
test("verification-evidence: integration — VerificationResult → JSON → table → validator accepts", () => {
const tmp = makeTempDir("ve-integration");
try {
// 1. Create a VerificationResult with 2 checks (1 pass, 1 fail)
const result = makeResult({
passed: false,
checks: [
{ command: "npm run typecheck", exitCode: 0, stdout: "ok", stderr: "", durationMs: 1500 },
{ command: "npm run test:unit", exitCode: 1, stdout: "", stderr: "1 failed", durationMs: 3200 },
],
discoverySource: "package-json",
});
// 2. Write JSON to temp dir and read it back
writeVerificationJSON(result, tmp, "T03");
const jsonPath = join(tmp, "T03-VERIFY.json");
assert.ok(existsSync(jsonPath), "JSON file should exist");
const json = JSON.parse(readFileSync(jsonPath, "utf-8"));
assert.equal(json.schemaVersion, 1, "schemaVersion should be 1");
assert.equal(json.passed, false, "passed should be false");
assert.equal(json.checks.length, 2, "should have 2 checks");
assert.equal(json.checks[0].verdict, "pass", "first check should pass");
assert.equal(json.checks[1].verdict, "fail", "second check should fail");
// 3. Generate evidence table and embed in a mock summary
const table = formatEvidenceTable(result);
assert.ok(table.includes("npm run typecheck"), "table should contain first command");
assert.ok(table.includes("npm run test:unit"), "table should contain second command");
const fullSummary = `---
observability_surfaces:
- gate-output
---
# T03 Summary
## Diagnostics
Run \`npm test\` to verify.
## Verification Evidence
${table}
`;
// 4. Validate — no evidence warnings
const issues = validateTaskSummaryContent("T03-SUMMARY.md", fullSummary);
const evidenceIssues = issues.filter(
(i) => i.ruleId === "evidence_block_missing" || i.ruleId === "evidence_block_placeholder",
);
assert.equal(evidenceIssues.length, 0, "validator should accept real evidence from formatEvidenceTable");
} finally {
rmSync(tmp, { recursive: true, force: true });
}
});
// ─── Retry Evidence Field Tests (S03/T01) ─────────────────────────────────────
test("verification-evidence: writeVerificationJSON with retryAttempt and maxRetries includes them in output", () => {

View file

@ -36,18 +36,24 @@ function createGitRepo(): string {
* Returns true when the directory would PASS the health check (dispatch
* proceeds), false when it would FAIL (dispatch blocked).
*
* This mirrors the fixed logic: .git must exist, AND at least one
* PROJECT_FILES entry or a src/ directory must exist.
* The only hard gate is .git project files are advisory (greenfield
* projects won't have them yet). Returns { pass, greenfield } to
* distinguish "pass with project files" from "pass as greenfield".
*/
function wouldPassHealthCheck(basePath: string, existsSyncFn: (p: string) => boolean): boolean {
const hasGit = existsSyncFn(join(basePath, ".git"));
if (!hasGit) return false;
// .git is sufficient — greenfield projects proceed with a warning
return true;
}
/** Whether the directory has recognized project files (used for greenfield detection). */
function hasRecognizedProjectFiles(basePath: string, existsSyncFn: (p: string) => boolean): boolean {
for (const file of PROJECT_FILES) {
if (existsSyncFn(join(basePath, file))) return true;
}
if (existsSyncFn(join(basePath, "src"))) return true;
return false;
}
@ -168,10 +174,11 @@ test("health check fails for directory with no .git", () => {
}
});
test("health check fails for empty git repo with no project files", () => {
test("health check passes for empty git repo (greenfield project)", () => {
const dir = createGitRepo();
try {
assert.ok(!wouldPassHealthCheck(dir, existsSync), "empty git repo should fail health check");
assert.ok(wouldPassHealthCheck(dir, existsSync), "empty git repo should pass health check (greenfield)");
assert.ok(!hasRecognizedProjectFiles(dir, existsSync), "empty git repo has no recognized project files");
} finally {
rmSync(dir, { recursive: true, force: true });
}

View file

@ -12,7 +12,6 @@ import {
import { deriveState } from "./state.js";
import { milestoneIdSort, findMilestoneIds } from "./guided-flow.js";
import type { RiskLevel } from "./types.js";
import { type ValidationIssue, validateCompleteBoundary, validatePlanBoundary } from "./observability-validator.js";
import { getSliceBranchName, detectWorktreeName } from "./worktree.js";
export interface WorkspaceTaskTarget {
@ -60,7 +59,7 @@ export interface GSDWorkspaceIndex {
phase: string;
};
scopes: WorkspaceScopeTarget[];
validationIssues: ValidationIssue[];
validationIssues: Array<Record<string, unknown>>;
}
// Extract milestone title from roadmap header without using parsers.
@ -113,20 +112,12 @@ async function indexSlice(basePath: string, milestoneId: string, sliceId: string
}
export interface IndexWorkspaceOptions {
/**
* When true, run validatePlanBoundary and validateCompleteBoundary for each slice.
* Skipped by default validation is expensive (content analysis) and only needed
* for explicit doctor/audit flows. The /gsd status dashboard and scope pickers
* don't need the full issue list.
*/
validate?: boolean;
}
export async function indexWorkspace(basePath: string, opts: IndexWorkspaceOptions = {}): Promise<GSDWorkspaceIndex> {
const milestoneIds = findMilestoneIds(basePath);
const milestones: WorkspaceMilestoneTarget[] = [];
const validationIssues: ValidationIssue[] = [];
const runValidation = opts.validate === true;
for (const milestoneId of milestoneIds) {
const roadmapPath = resolveMilestoneFile(basePath, milestoneId, "ROADMAP") ?? undefined;
@ -149,27 +140,13 @@ export async function indexWorkspace(basePath: string, opts: IndexWorkspaceOptio
}
if (normSlices!.length > 0) {
// Parallelise all per-slice I/O: indexSlice + (optional) validation calls run concurrently.
// Order is preserved via Promise.all on an array built from normalized slices.
const sliceResults = await Promise.all(
normSlices!.map(async (slice) => {
if (runValidation) {
const [indexedSlice, planIssues, completeIssues] = await Promise.all([
indexSlice(basePath, milestoneId, slice.id, slice.title, slice.done, { risk: slice.risk as RiskLevel, depends: slice.depends, demo: slice.demo }),
validatePlanBoundary(basePath, milestoneId, slice.id),
validateCompleteBoundary(basePath, milestoneId, slice.id),
]);
return { indexedSlice, issues: [...planIssues, ...completeIssues] };
}
const indexedSlice = await indexSlice(basePath, milestoneId, slice.id, slice.title, slice.done, { risk: slice.risk as RiskLevel, depends: slice.depends, demo: slice.demo });
return { indexedSlice, issues: [] as ValidationIssue[] };
return indexSlice(basePath, milestoneId, slice.id, slice.title, slice.done, { risk: slice.risk as RiskLevel, depends: slice.depends, demo: slice.demo });
}),
);
for (const { indexedSlice, issues } of sliceResults) {
slices.push(indexedSlice);
validationIssues.push(...issues);
}
slices.push(...sliceResults);
}
}
@ -199,7 +176,7 @@ export async function indexWorkspace(basePath: string, opts: IndexWorkspaceOptio
}
}
return { milestones, active, scopes, validationIssues };
return { milestones, active, scopes, validationIssues: [] };
}
export async function listDoctorScopeSuggestions(basePath: string): Promise<Array<{ value: string; label: string }>> {
@ -219,8 +196,7 @@ export async function listDoctorScopeSuggestions(basePath: string): Promise<Arra
}
export async function getSuggestedNextCommands(basePath: string): Promise<string[]> {
// Run validation here since we surface a /gsd doctor audit hint when issues exist.
const index = await indexWorkspace(basePath, { validate: true });
const index = await indexWorkspace(basePath);
const scope = index.active.milestoneId && index.active.sliceId
? `${index.active.milestoneId}/${index.active.sliceId}`
: index.active.milestoneId;
@ -230,7 +206,6 @@ export async function getSuggestedNextCommands(basePath: string): Promise<string
if (index.active.phase === "executing" || index.active.phase === "summarizing") commands.add("/gsd auto");
if (scope) commands.add(`/gsd doctor ${scope}`);
if (scope) commands.add(`/gsd doctor fix ${scope}`);
if (index.validationIssues.length > 0 && scope) commands.add(`/gsd doctor audit ${scope}`);
commands.add("/gsd status");
return [...commands];
}