From ad380d56026cc8637f5b551eee342369708adbfc Mon Sep 17 00:00:00 2001 From: Mikael Hugo Date: Sun, 10 May 2026 20:22:55 +0200 Subject: [PATCH] fix(db-first): remove all .sf/*.md direct-write instructions from prompts; requirement-promoter uses DB only MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Prompts: replace 'append to .sf/DECISIONS.md' → 'call save_decision' in plan-slice, heal-skill (KNOWLEDGE.md), refine-slice, queue, guided-execute-task - Prompts: replace 'Read .sf/DECISIONS.md if it exists' / 'Read .sf/REQUIREMENTS.md if it exists' with 'injected from DB into system context' in guided-plan-slice, guided-research-slice - requirement-promoter: remove dead appendRequirementRow() and readHighestRNumber(file) that read/wrote REQUIREMENTS.md; replace with DB-only readHighestRNumber() using getActiveRequirements(); remove sfRoot import, mkdirSync, writeFileSync - requirement-promoter: pre-compute highestNum once per sweep loop instead of re-reading for each cluster (fixes ID collision when promoting multiple at once) Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- .../sf/prompts/guided-execute-task.md | 2 +- .../sf/prompts/guided-plan-slice.md | 2 +- .../sf/prompts/guided-research-slice.md | 2 +- .../extensions/sf/prompts/heal-skill.md | 2 +- .../extensions/sf/prompts/plan-slice.md | 2 +- src/resources/extensions/sf/prompts/queue.md | 2 +- .../extensions/sf/prompts/refine-slice.md | 2 +- .../extensions/sf/requirement-promoter.js | 79 +++++-------------- 8 files changed, 25 insertions(+), 68 deletions(-) diff --git a/src/resources/extensions/sf/prompts/guided-execute-task.md b/src/resources/extensions/sf/prompts/guided-execute-task.md index 648cca11a..b4d8e5e17 100644 --- a/src/resources/extensions/sf/prompts/guided-execute-task.md +++ b/src/resources/extensions/sf/prompts/guided-execute-task.md @@ -1,4 +1,4 @@ -Execute the next task: {{taskId}} ("{{taskTitle}}") in slice {{sliceId}} of milestone {{milestoneId}}. Read the task plan (`{{taskId}}-PLAN.md`), load relevant summaries from prior tasks, and execute each step. Before implementation, run the swarm opportunity check: use a 2-3 worker same-model `subagent({ tasks: [...] })` swarm only when the task splits into independent shards with explicit disjoint file/directory ownership, no shared-interface or lockfile edits, shard-local verification, and clear wall-clock savings; otherwise execute single-agent. If you swarm, give each worker its write scope and expected output files, then inspect `git status --short`, synthesize, resolve conflicts, and run final verification yourself. Verify must-haves when done. If the task touches UI, browser flows, DOM behavior, or user-visible web state, exercise the real flow in the browser, prefer `browser_batch` for obvious sequences, prefer `browser_assert` for explicit pass/fail verification, use `browser_diff` when an action's effect is ambiguous, and use browser diagnostics when validating async or failure-prone UI. If you made an architectural, pattern, or library decision, append it to `.sf/DECISIONS.md`. Use the **Task Summary** output template below. Call `complete_task` to record completion (it writes the summary, toggles the checkbox, and persists to DB atomically). {{skillActivation}} If running long and not all steps are finished, stop implementing and prioritize writing a clean partial summary over attempting one more step — a recoverable handoff is more valuable than a half-finished step with no documentation. If verification fails, debug methodically: form a hypothesis and test that specific theory before changing anything, change one variable at a time, read entire functions not just the suspect line, distinguish observable facts from assumptions, and if 3+ fixes fail without progress stop and reassess your mental model — list what you know for certain, what you've ruled out, and form fresh hypotheses. Don't fix symptoms — understand why something fails before changing code. If the task plan includes Failure Modes, Load Profile, or Negative Tests sections, implement and verify them: handle each dependency's error/timeout/malformed paths (Q5), protect against identified 10x breakpoints (Q6), and write specified negative test cases (Q7). +Execute the next task: {{taskId}} ("{{taskTitle}}") in slice {{sliceId}} of milestone {{milestoneId}}. Read the task plan (`{{taskId}}-PLAN.md`), load relevant summaries from prior tasks, and execute each step. Before implementation, run the swarm opportunity check: use a 2-3 worker same-model `subagent({ tasks: [...] })` swarm only when the task splits into independent shards with explicit disjoint file/directory ownership, no shared-interface or lockfile edits, shard-local verification, and clear wall-clock savings; otherwise execute single-agent. If you swarm, give each worker its write scope and expected output files, then inspect `git status --short`, synthesize, resolve conflicts, and run final verification yourself. Verify must-haves when done. If the task touches UI, browser flows, DOM behavior, or user-visible web state, exercise the real flow in the browser, prefer `browser_batch` for obvious sequences, prefer `browser_assert` for explicit pass/fail verification, use `browser_diff` when an action's effect is ambiguous, and use browser diagnostics when validating async or failure-prone UI. If you made an architectural, pattern, or library decision, call `save_decision` — the tool auto-assigns IDs and regenerates `.sf/DECISIONS.md` automatically. Use the **Task Summary** output template below. Call `complete_task` to record completion (it writes the summary, toggles the checkbox, and persists to DB atomically). {{skillActivation}} If running long and not all steps are finished, stop implementing and prioritize writing a clean partial summary over attempting one more step — a recoverable handoff is more valuable than a half-finished step with no documentation. If verification fails, debug methodically: form a hypothesis and test that specific theory before changing anything, change one variable at a time, read entire functions not just the suspect line, distinguish observable facts from assumptions, and if 3+ fixes fail without progress stop and reassess your mental model — list what you know for certain, what you've ruled out, and form fresh hypotheses. Don't fix symptoms — understand why something fails before changing code. If the task plan includes Failure Modes, Load Profile, or Negative Tests sections, implement and verify them: handle each dependency's error/timeout/malformed paths (Q5), protect against identified 10x breakpoints (Q6), and write specified negative test cases (Q7). ### Report sf-internal observations diff --git a/src/resources/extensions/sf/prompts/guided-plan-slice.md b/src/resources/extensions/sf/prompts/guided-plan-slice.md index 8603cbe07..5f4d3e791 100644 --- a/src/resources/extensions/sf/prompts/guided-plan-slice.md +++ b/src/resources/extensions/sf/prompts/guided-plan-slice.md @@ -1,4 +1,4 @@ -Plan slice {{sliceId}} ("{{sliceTitle}}") of milestone {{milestoneId}}. Read `.sf/DECISIONS.md` if it exists — respect existing decisions. Read `.sf/REQUIREMENTS.md` if it exists — identify which Active requirements the roadmap says this slice owns or supports, and ensure the plan delivers them. Read the roadmap boundary map, any existing context/research files, and dependency summaries. Use the **Slice Plan** and **Task Plan** output templates below. Decompose into tasks with must-haves. Fill the `Proof Level` and `Integration Closure` sections truthfully so the plan says what class of proof this slice really delivers and what end-to-end wiring still remains. If the slice changes how SF is driven, observed, integrated, or automated, fill `Interface Axes` and keep surface (TUI/CLI/web/editor/machine), protocol (ACP/RPC/stdio/HTTP/wire), output format (text/json/stream-json), run control (manual/assisted/autonomous), and permission profile (restricted/normal/trusted/unrestricted) separate. For each task, decide whether execution can safely swarm: mark it swarmable only if it can split into 2-3 independent shards with disjoint file/directory ownership, shard-local verification, and no shared-interface, lockfile, migration, generated-artifact, or sequencing conflict; otherwise make the task explicitly single-agent. Call `plan_slice` to persist the slice plan — the tool writes `{{sliceId}}-PLAN.md` and individual `T##-PLAN.md` files to disk and persists to DB. The `plan_slice` payload MUST include `planningMeeting` as a populated object; empty, null, or missing planningMeeting is not acceptable. Use the canonical M004 meeting roles: Trigger, Product Manager, User Advocate, Customer Panel, Business, Researcher, Delivery Lead, Partner, Combatant, Architect, Moderator, Recommended Route, and Confidence. The tool's Product Manager field is named `pm`, and the Confidence field is named `confidenceSummary`; keep existing tool field names while covering the canonical roles. If you are tempted to skip the meeting because the slice is simple, write a brief one-line per role explaining why it is simple. Do **not** write plan files manually — use the DB-backed tool so state stays consistent. If planning produces structural decisions, call `save_decision` for each — the tool auto-assigns IDs and regenerates `.sf/DECISIONS.md` automatically. {{skillActivation}} Before finishing, self-audit the plan: every must-have maps to at least one task, every task has complete sections (steps, must-haves, verification, observability impact, inputs, and expected output), task ordering is consistent with no circular references, every pair of artifacts that must connect has an explicit wiring step, task scope targets 2–5 steps and 3–8 files (6–8 steps or 8–10 files — consider splitting; 10+ steps or 12+ files — must split), any swarmable task has disjoint Expected Output paths/directories and explains shard ownership, the plan honors locked decisions from context/research/decisions artifacts, the proof-level wording does not overclaim live integration if only fixture/contract proof is planned, every Active requirement this slice owns has at least one task with verification that proves it is met, and every task produces real user-facing progress — if the slice has a UI surface at least one task builds the real UI, if it has an API at least one task connects it to a real data source, and showing the completed result to a non-technical stakeholder would demonstrate real product progress rather than developer artifacts, and quality gate coverage — for non-trivial slices, Threat Surface (Q3: abuse, data exposure, input trust) and Requirement Impact (Q4: requirements touched, re-verify, decisions revisited) sections are present. For non-trivial tasks, Failure Modes (Q5), Load Profile (Q6), Negative Tests (Q7), and Interface Impact when relevant are filled in task plans. +Plan slice {{sliceId}} ("{{sliceTitle}}") of milestone {{milestoneId}}. Decisions are injected from DB into system context — respect existing decisions. Requirements are injected from DB into system context — identify which Active requirements the roadmap says this slice owns or supports, and ensure the plan delivers them. Read the roadmap boundary map, any existing context/research files, and dependency summaries. Use the **Slice Plan** and **Task Plan** output templates below. Decompose into tasks with must-haves. Fill the `Proof Level` and `Integration Closure` sections truthfully so the plan says what class of proof this slice really delivers and what end-to-end wiring still remains. If the slice changes how SF is driven, observed, integrated, or automated, fill `Interface Axes` and keep surface (TUI/CLI/web/editor/machine), protocol (ACP/RPC/stdio/HTTP/wire), output format (text/json/stream-json), run control (manual/assisted/autonomous), and permission profile (restricted/normal/trusted/unrestricted) separate. For each task, decide whether execution can safely swarm: mark it swarmable only if it can split into 2-3 independent shards with disjoint file/directory ownership, shard-local verification, and no shared-interface, lockfile, migration, generated-artifact, or sequencing conflict; otherwise make the task explicitly single-agent. Call `plan_slice` to persist the slice plan — the tool writes `{{sliceId}}-PLAN.md` and individual `T##-PLAN.md` files to disk and persists to DB. The `plan_slice` payload MUST include `planningMeeting` as a populated object; empty, null, or missing planningMeeting is not acceptable. Use the canonical M004 meeting roles: Trigger, Product Manager, User Advocate, Customer Panel, Business, Researcher, Delivery Lead, Partner, Combatant, Architect, Moderator, Recommended Route, and Confidence. The tool's Product Manager field is named `pm`, and the Confidence field is named `confidenceSummary`; keep existing tool field names while covering the canonical roles. If you are tempted to skip the meeting because the slice is simple, write a brief one-line per role explaining why it is simple. Do **not** write plan files manually — use the DB-backed tool so state stays consistent. If planning produces structural decisions, call `save_decision` for each — the tool auto-assigns IDs and regenerates `.sf/DECISIONS.md` automatically. {{skillActivation}} Before finishing, self-audit the plan: every must-have maps to at least one task, every task has complete sections (steps, must-haves, verification, observability impact, inputs, and expected output), task ordering is consistent with no circular references, every pair of artifacts that must connect has an explicit wiring step, task scope targets 2–5 steps and 3–8 files (6–8 steps or 8–10 files — consider splitting; 10+ steps or 12+ files — must split), any swarmable task has disjoint Expected Output paths/directories and explains shard ownership, the plan honors locked decisions from context/research/decisions artifacts, the proof-level wording does not overclaim live integration if only fixture/contract proof is planned, every Active requirement this slice owns has at least one task with verification that proves it is met, and every task produces real user-facing progress — if the slice has a UI surface at least one task builds the real UI, if it has an API at least one task connects it to a real data source, and showing the completed result to a non-technical stakeholder would demonstrate real product progress rather than developer artifacts, and quality gate coverage — for non-trivial slices, Threat Surface (Q3: abuse, data exposure, input trust) and Requirement Impact (Q4: requirements touched, re-verify, decisions revisited) sections are present. For non-trivial tasks, Failure Modes (Q5), Load Profile (Q6), Negative Tests (Q7), and Interface Impact when relevant are filled in task plans. ### Report sf-internal observations diff --git a/src/resources/extensions/sf/prompts/guided-research-slice.md b/src/resources/extensions/sf/prompts/guided-research-slice.md index 83029f3e7..99659d847 100644 --- a/src/resources/extensions/sf/prompts/guided-research-slice.md +++ b/src/resources/extensions/sf/prompts/guided-research-slice.md @@ -1,4 +1,4 @@ -Research slice {{sliceId}} ("{{sliceTitle}}") of milestone {{milestoneId}}. Read `.sf/DECISIONS.md` if it exists — respect existing decisions, don't contradict them. Read `.sf/REQUIREMENTS.md` if it exists — identify which Active requirements this slice owns or supports and target research toward risks, unknowns, and constraints that could affect delivery of those requirements. {{skillActivation}} Use native `lsp` first for symbol lookup, references, and cross-file navigation. For direct text inspection use `rg`/`find` for targeted reads, or `scout` if the area is broad or unfamiliar. If the repository is checked out locally, GitHub code search is a scarce remote-only fallback: do not use GitHub `/search/code` for that local repo; use `git grep` for tracked-file global search, `rg` for broader worktree text search, plus `lsp`, `sift_search`, or `codebase_search` instead. GitHub's `code_search` bucket is small and separate from normal REST/GraphQL quotas, so use it only for repositories that are not on disk, dedupe repeated queries, and treat `403` rate-limit responses as a signal to wait for reset or continue with local evidence. If there are 2-3 independent unknowns, use a research swarm with parallel `scout`/`researcher` subagents and synthesize their findings here; do not swarm narrow sequence-dependent research. Check libraries DeepWiki-first: `ask_question` / `read_wiki_structure` / `read_wiki_contents` for any GitHub-hosted library; fall back to `resolve_library` / `get_library_docs` (Context7, capped at 1000 req/month free) for npm/pypi/crates packages DeepWiki doesn't have. Skip both for libraries already used in this codebase. Use the **Research** output template below. Call `save_summary` with `milestone_id: {{milestoneId}}`, `slice_id: {{sliceId}}`, `artifact_type: "RESEARCH"`, and the research content — the tool writes the file to disk and persists to DB. After `save_summary` succeeds, stop immediately; do **not** call `new_milestone_id`, `plan_milestone`, `plan_slice`, `plan_task`, or any planning/creation tool. +Research slice {{sliceId}} ("{{sliceTitle}}") of milestone {{milestoneId}}. Read `.sf/DECISIONS.md` if it exists — respect existing decisions, don't contradict them. Requirements are injected from DB into system context — identify which Active requirements this slice owns or supports and target research toward risks, unknowns, and constraints that could affect delivery of those requirements. {{skillActivation}} Use native `lsp` first for symbol lookup, references, and cross-file navigation. For direct text inspection use `rg`/`find` for targeted reads, or `scout` if the area is broad or unfamiliar. If the repository is checked out locally, GitHub code search is a scarce remote-only fallback: do not use GitHub `/search/code` for that local repo; use `git grep` for tracked-file global search, `rg` for broader worktree text search, plus `lsp`, `sift_search`, or `codebase_search` instead. GitHub's `code_search` bucket is small and separate from normal REST/GraphQL quotas, so use it only for repositories that are not on disk, dedupe repeated queries, and treat `403` rate-limit responses as a signal to wait for reset or continue with local evidence. If there are 2-3 independent unknowns, use a research swarm with parallel `scout`/`researcher` subagents and synthesize their findings here; do not swarm narrow sequence-dependent research. Check libraries DeepWiki-first: `ask_question` / `read_wiki_structure` / `read_wiki_contents` for any GitHub-hosted library; fall back to `resolve_library` / `get_library_docs` (Context7, capped at 1000 req/month free) for npm/pypi/crates packages DeepWiki doesn't have. Skip both for libraries already used in this codebase. Use the **Research** output template below. Call `save_summary` with `milestone_id: {{milestoneId}}`, `slice_id: {{sliceId}}`, `artifact_type: "RESEARCH"`, and the research content — the tool writes the file to disk and persists to DB. After `save_summary` succeeds, stop immediately; do **not** call `new_milestone_id`, `plan_milestone`, `plan_slice`, `plan_task`, or any planning/creation tool. **You are the scout.** A planner agent reads your output in a fresh context to decompose this slice into tasks. Write for the planner — surface key files, where the work divides naturally, what to build first, and how to verify. If the research doc is vague, the planner re-explores code you already read. If it's precise, the planner decomposes immediately. diff --git a/src/resources/extensions/sf/prompts/heal-skill.md b/src/resources/extensions/sf/prompts/heal-skill.md index 0dbb503c6..ad8eca6ca 100644 --- a/src/resources/extensions/sf/prompts/heal-skill.md +++ b/src/resources/extensions/sf/prompts/heal-skill.md @@ -17,7 +17,7 @@ Analyze the just-completed unit ({{unitId}}) for skill drift. 4. **Assess drift severity**: - **None**: Agent followed skill correctly → write "No drift detected" to {{healArtifact}} and stop - - **Minor**: Agent found a better approach but skill isn't wrong → append a note to `.sf/KNOWLEDGE.md` and stop + - **Minor**: Agent found a better approach but skill isn't wrong → call `save_knowledge` with the finding and stop - **Significant**: Skill has outdated or incorrect guidance → continue to step 5 5. **If significant drift found**, append a heal suggestion to `.sf/skill-review-queue.md`: diff --git a/src/resources/extensions/sf/prompts/plan-slice.md b/src/resources/extensions/sf/prompts/plan-slice.md index e63f23386..f904b3f57 100644 --- a/src/resources/extensions/sf/prompts/plan-slice.md +++ b/src/resources/extensions/sf/prompts/plan-slice.md @@ -122,7 +122,7 @@ Then: - **Quality gate coverage:** For non-trivial slices, Threat Surface and Requirement Impact sections are present and specific (not placeholder text). For non-trivial tasks, Failure Modes, Load Profile, and Negative Tests are addressed in the task plan. - **Adversarial completeness:** The persisted plan includes non-placeholder Partner, Combatant, and Architect review sections. If combatant only agrees, you did not push hard enough. - **Meeting honesty:** If a planningMeeting exists and its route is `discussing` or `researching`, the moderator decision and confidence must explain why. Do not write a “planning” route just to get unstuck. -11. If planning produced structural decisions, append them to `.sf/DECISIONS.md` +11. If planning produced structural decisions, call `save_decision` for each — the tool auto-assigns IDs and regenerates `.sf/DECISIONS.md` automatically. 12. {{commitInstruction}} The slice directory and tasks/ subdirectory already exist. Do NOT mkdir. All work stays in your working directory: `{{workingDirectory}}`. diff --git a/src/resources/extensions/sf/prompts/queue.md b/src/resources/extensions/sf/prompts/queue.md index bd973ab18..b837e4990 100644 --- a/src/resources/extensions/sf/prompts/queue.md +++ b/src/resources/extensions/sf/prompts/queue.md @@ -123,7 +123,7 @@ Then, after all milestone directories and context files are written: 3. Update `.sf/PROJECT.md` — add the new milestones to the Milestone Sequence. Keep existing entries exactly as they are. Only add new lines. 4. If `.sf/REQUIREMENTS.md` exists and the queued work introduces new in-scope capabilities or promotes Deferred items, update it. -5. If discussion produced decisions relevant to existing work, append to `.sf/DECISIONS.md`. +5. If discussion produced decisions relevant to existing work, call `save_decision` for each — the tool auto-assigns IDs and regenerates `.sf/DECISIONS.md` automatically. 6. Append to `.sf/QUEUE.md`. 7. {{commitInstruction}} diff --git a/src/resources/extensions/sf/prompts/refine-slice.md b/src/resources/extensions/sf/prompts/refine-slice.md index 4ce25b432..881ec8a4a 100644 --- a/src/resources/extensions/sf/prompts/refine-slice.md +++ b/src/resources/extensions/sf/prompts/refine-slice.md @@ -67,7 +67,7 @@ Then: 5. Decompose the slice into tasks that fit one context window each. Every task must have Why / Files / Do / Verify / Done-when, plus a task plan with description, steps, must-haves, verification, inputs (backtick-wrapped paths), and expected output (backtick-wrapped paths). 6. **Persist planning state through `plan_slice`.** Call it with the full payload. The tool writes to the DB and renders `{{outputPath}}` and `{{slicePath}}/tasks/T##-PLAN.md` automatically. Do NOT rely on direct `PLAN.md` writes. 7. **Self-audit the plan.** If every task were completed exactly as written, the slice goal/demo should actually be true. Every must-have maps to at least one task. Inputs and Expected Output are backtick-wrapped file paths. -8. If refinement produced structural decisions that diverge from the sketch, append them to `.sf/DECISIONS.md`. +8. If refinement produced structural decisions that diverge from the sketch, call `save_decision` for each — the tool auto-assigns IDs and regenerates `.sf/DECISIONS.md` automatically. 9. {{commitInstruction}} The slice directory and tasks/ subdirectory already exist. Do NOT mkdir. diff --git a/src/resources/extensions/sf/requirement-promoter.js b/src/resources/extensions/sf/requirement-promoter.js index 9ec260b6c..73d5280ed 100644 --- a/src/resources/extensions/sf/requirement-promoter.js +++ b/src/resources/extensions/sf/requirement-promoter.js @@ -3,28 +3,23 @@ * * When feedback entries cluster (e.g., 5 instances of `git-empty-pathspec`, * or `runaway-guard-hard-pause` recurring across 3 milestones), this module - * auto-promotes to a row in `.sf/REQUIREMENTS.md`. + * auto-promotes to a row in the requirements DB table. * * Requirements flow into prompt context via the existing planning pipeline, - * so promotion turns "noise that piles up in SELF-FEEDBACK.md" into "something - * the next planning round naturally addresses." + * so promotion turns recurring noise into structured requirements the next + * planning round naturally addresses. * * Consumer: session_start drain hook in register-hooks.ts (wired separately). */ -import { existsSync, mkdirSync, readFileSync, writeFileSync } from "node:fs"; +import { existsSync, readFileSync } from "node:fs"; import { join } from "node:path"; -import { sfRoot } from "./paths.js"; import { markResolved, readAllSelfFeedback } from "./self-feedback.js"; -import { isDbAvailable, upsertRequirement } from "./sf-db.js"; +import { getActiveRequirements, isDbAvailable, upsertRequirement } from "./sf-db.js"; // ─── Constants ─────────────────────────────────────────────────────────────── const COUNT_THRESHOLD = 5; const MILESTONE_THRESHOLD = 3; const LOOKBACK_DAYS = 90; -const REQUIREMENTS_HEADER = - "# Requirements\n\n" + - "This file is the explicit capability and coverage contract for the project.\n\n" + - "## Active\n\n"; // ─── Forge detection (local — isForgeRepo is not exported) ─────────────────── function isForgeRepo(basePath) { try { @@ -36,65 +31,27 @@ function isForgeRepo(basePath) { return false; } } -// ─── REQUIREMENTS.md helpers ───────────────────────────────────────────────── -function requirementsPath(basePath) { - return join(sfRoot(basePath), "REQUIREMENTS.md"); -} /** - * Read the highest R-number present in REQUIREMENTS.md. - * Returns 0 if the file is absent or contains no R-IDs. + * Return the highest R-number present in the requirements DB table. + * Returns 0 when the DB is unavailable or contains no R-IDs. */ -function readHighestRNumber(filePath) { +function readHighestRNumber() { try { - if (!existsSync(filePath)) return 0; - const content = readFileSync(filePath, "utf-8"); - const matches = content.matchAll(/\bR(\d+)\b/g); + if (!isDbAvailable()) return 0; + const rows = getActiveRequirements(); let max = 0; - for (const m of matches) { - const n = parseInt(m[1], 10); - if (n > max) max = n; + for (const row of rows) { + const m = /\bR(\d+)\b/.exec(row.id ?? ""); + if (m) { + const n = parseInt(m[1], 10); + if (n > max) max = n; + } } return max; } catch { return 0; } } -/** - * Append one requirement block to REQUIREMENTS.md. - * Creates the file with header if it does not exist. - * Appends into the ## Active section (or at end if already structured). - */ -function appendRequirementRow(filePath, id, title, notes) { - const dir = join(filePath, ".."); - if (!existsSync(dir)) mkdirSync(dir, { recursive: true }); - const block = - `### ${id} — ${title}\n` + - `- Class: operational\n` + - `- Status: active\n` + - `- Description: ${title}\n` + - `- Source: sf-promoter\n` + - `- Notes: ${notes}\n\n`; - if (!existsSync(filePath)) { - writeFileSync(filePath, REQUIREMENTS_HEADER + block, "utf-8"); - } else { - // Append before any ## Traceability or ## Coverage Summary section if - // present; otherwise just append at the end. - const content = readFileSync(filePath, "utf-8"); - const insertionMarker = content.match( - /\n## (?:Traceability|Coverage Summary)/, - ); - if (insertionMarker && insertionMarker.index !== undefined) { - const before = content.slice(0, insertionMarker.index); - const after = content.slice(insertionMarker.index); - writeFileSync(filePath, before + "\n" + block + after, "utf-8"); - } else { - const appended = content.endsWith("\n") - ? content + block - : content + "\n" + block; - writeFileSync(filePath, appended, "utf-8"); - } - } -} /** * Promote feedback entries to REQUIREMENTS.md when they cross threshold. * @@ -137,10 +94,10 @@ export function promoteFeedbackToRequirements(basePath = process.cwd()) { c.distinctMilestones.size >= MILESTONE_THRESHOLD, ); if (promotable.length === 0) return empty; - const reqPath = requirementsPath(basePath); + let highestNum = readHighestRNumber(); const promotedIds = []; for (const cluster of promotable) { - const nextNum = readHighestRNumber(reqPath) + 1; + const nextNum = ++highestNum; const reqId = `R${String(nextNum).padStart(3, "0")}`; const count = cluster.entries.length; const milestoneCount = cluster.distinctMilestones.size;