feat: wire requirement promoter + triage-backlog prompt
- register-hooks.ts: wires promoteFeedbackToRequirements into session_start drain - prompts/triage-backlog.md: new prompt for backlog triage agent - tests/requirement-promoter.test.ts: 7 tests covering forge-gate, count threshold, milestone threshold, idempotency, R-ID increment, 90d filtering, and resolved-skip
This commit is contained in:
parent
f9116f5514
commit
07d7e99e1e
3 changed files with 352 additions and 0 deletions
|
|
@ -261,6 +261,23 @@ export function registerHooks(
|
|||
} catch {
|
||||
/* non-fatal — upstream bridge must never block session start */
|
||||
}
|
||||
// Promote recurring feedback clusters to REQUIREMENTS.md
|
||||
try {
|
||||
const { promoteFeedbackToRequirements } = await import(
|
||||
"../requirement-promoter.js"
|
||||
);
|
||||
const { promoted, requirementIds } = promoteFeedbackToRequirements(
|
||||
process.cwd(),
|
||||
);
|
||||
if (promoted > 0) {
|
||||
ctx.ui?.notify?.(
|
||||
`Promoted ${promoted} cluster${promoted === 1 ? "" : "s"} to requirements: ${requirementIds.join(", ")}`,
|
||||
"info",
|
||||
);
|
||||
}
|
||||
} catch {
|
||||
/* non-fatal — requirement promoter must never block session start */
|
||||
}
|
||||
});
|
||||
|
||||
pi.on("session_switch", async (_event, ctx) => {
|
||||
|
|
|
|||
47
src/resources/extensions/sf/prompts/triage-backlog.md
Normal file
47
src/resources/extensions/sf/prompts/triage-backlog.md
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
# Triage Backlog
|
||||
|
||||
You are a backlog triage agent. Your job is to read `.sf/BACKLOG.md`, group related entries, and decide what to do with each cluster.
|
||||
|
||||
## Input
|
||||
|
||||
- `.sf/BACKLOG.md` — the self-feedback backlog
|
||||
- `.sf/REQUIREMENTS.md` — existing requirements (don't duplicate)
|
||||
|
||||
## Process
|
||||
|
||||
1. **Read** BACKLOG.md and identify open (unresolved) entries
|
||||
2. **Group** by kind + pattern:
|
||||
- Same `kind` → same cluster
|
||||
- Similar summary text (share 6-word prefix) → same cluster
|
||||
3. **For each cluster**, decide:
|
||||
- **Fix now** if: severity ≥ high, or ≥3 occurrences, or affects current milestone
|
||||
- **Promote to requirement** if: cross-cutting, needs design, or spans >1 milestone
|
||||
- **Defer** if: low severity, isolated, or needs more data
|
||||
- **Duplicate** if: same root cause as another entry
|
||||
4. **Output** a triage report:
|
||||
- Cluster ID
|
||||
- Kind + count
|
||||
- Decision (fix / promote / defer / duplicate)
|
||||
- Rationale (1-2 sentences)
|
||||
- Action item (what happens next)
|
||||
|
||||
## Rules
|
||||
|
||||
- Do NOT fix code yourself — this is triage, not execution
|
||||
- Do NOT mark entries resolved — that's for the fixing agent
|
||||
- Do create a new requirement row if promoting
|
||||
- Do reference existing requirements if duplicate
|
||||
- Cap output at 50 lines
|
||||
|
||||
## Output Format
|
||||
|
||||
```markdown
|
||||
## Backlog Triage Report
|
||||
|
||||
### Cluster 1: {kind} ({count} entries)
|
||||
- **Decision:** fix / promote / defer / duplicate
|
||||
- **Rationale:** {why}
|
||||
- **Action:** {what happens next}
|
||||
|
||||
### Cluster 2: ...
|
||||
```
|
||||
288
src/resources/extensions/sf/tests/requirement-promoter.test.ts
Normal file
288
src/resources/extensions/sf/tests/requirement-promoter.test.ts
Normal file
|
|
@ -0,0 +1,288 @@
|
|||
/**
|
||||
* Tests for requirement-promoter.ts
|
||||
*
|
||||
* Uses a tmpdir with package.json `name: "singularity-forge"` but WITHOUT
|
||||
* `src/resources/extensions/sf/loader.ts`, so:
|
||||
* - isForgeRepo(basePath) → true (package.json name match)
|
||||
* - isRunningOnSelf(basePath) → false (no loader.ts)
|
||||
* => sfRuntimeRoot(basePath) = sfRoot(basePath) = basePath/.sf
|
||||
*
|
||||
* This means self-feedback.jsonl and REQUIREMENTS.md both land under
|
||||
* basePath/.sf/, making the test fully self-contained.
|
||||
*/
|
||||
|
||||
import assert from "node:assert/strict";
|
||||
import {
|
||||
existsSync,
|
||||
mkdirSync,
|
||||
mkdtempSync,
|
||||
readFileSync,
|
||||
rmSync,
|
||||
writeFileSync,
|
||||
} from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { afterEach, beforeEach, describe, test } from "vitest";
|
||||
|
||||
import { promoteFeedbackToRequirements } from "../requirement-promoter.ts";
|
||||
import {
|
||||
type PersistedSelfFeedbackEntry,
|
||||
readAllSelfFeedback,
|
||||
} from "../self-feedback.ts";
|
||||
import { _clearSfRootCache, _resetSelfDetectionCache } from "../paths.ts";
|
||||
|
||||
// ─── Helpers ─────────────────────────────────────────────────────────────────
|
||||
|
||||
function makeTmpDir(): string {
|
||||
return mkdtempSync(join(tmpdir(), "sf-req-promoter-"));
|
||||
}
|
||||
|
||||
function makeForgeProject(base: string): void {
|
||||
mkdirSync(join(base, ".sf"), { recursive: true });
|
||||
writeFileSync(
|
||||
join(base, "package.json"),
|
||||
JSON.stringify({ name: "singularity-forge", version: "0.0.0" }),
|
||||
"utf-8",
|
||||
);
|
||||
}
|
||||
|
||||
function makeNonForgeProject(base: string): void {
|
||||
mkdirSync(join(base, ".sf"), { recursive: true });
|
||||
writeFileSync(
|
||||
join(base, "package.json"),
|
||||
JSON.stringify({ name: "some-other-project", version: "1.0.0" }),
|
||||
"utf-8",
|
||||
);
|
||||
}
|
||||
|
||||
function seedEntry(
|
||||
base: string,
|
||||
overrides: Partial<PersistedSelfFeedbackEntry> = {},
|
||||
): void {
|
||||
const entry: PersistedSelfFeedbackEntry = {
|
||||
id: `sf-test-${Math.random().toString(36).slice(2, 10)}`,
|
||||
ts: new Date().toISOString(),
|
||||
kind: "test-kind",
|
||||
severity: "medium",
|
||||
summary: "test summary",
|
||||
source: "agent",
|
||||
basePath: base,
|
||||
repoIdentity: "forge",
|
||||
sfVersion: "0.0.0",
|
||||
blocking: false,
|
||||
...overrides,
|
||||
};
|
||||
const jsonlPath = join(base, ".sf", "self-feedback.jsonl");
|
||||
const line = JSON.stringify(entry) + "\n";
|
||||
if (!existsSync(join(base, ".sf"))) mkdirSync(join(base, ".sf"), { recursive: true });
|
||||
try {
|
||||
const existing = existsSync(jsonlPath) ? readFileSync(jsonlPath, "utf-8") : "";
|
||||
writeFileSync(jsonlPath, existing + line, "utf-8");
|
||||
} catch {
|
||||
writeFileSync(jsonlPath, line, "utf-8");
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Cache reset between tests ───────────────────────────────────────────────
|
||||
|
||||
beforeEach(() => {
|
||||
_clearSfRootCache();
|
||||
_resetSelfDetectionCache();
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
_clearSfRootCache();
|
||||
_resetSelfDetectionCache();
|
||||
});
|
||||
|
||||
// ─── Tests ────────────────────────────────────────────────────────────────────
|
||||
|
||||
describe("promoteFeedbackToRequirements", () => {
|
||||
test("1. bails silently when basePath is not a forge repo", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeNonForgeProject(dir);
|
||||
// Seed entries that would otherwise trigger promotion
|
||||
for (let i = 0; i < 6; i++) {
|
||||
seedEntry(dir, { kind: "git-empty-pathspec" });
|
||||
}
|
||||
const result = promoteFeedbackToRequirements(dir);
|
||||
assert.deepEqual(result, { promoted: 0, requirementIds: [] });
|
||||
// No REQUIREMENTS.md should have been created
|
||||
assert.equal(existsSync(join(dir, ".sf", "REQUIREMENTS.md")), false);
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("2. promotes when ≥5 same-kind open forge entries exist", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeForgeProject(dir);
|
||||
for (let i = 0; i < 5; i++) {
|
||||
seedEntry(dir, {
|
||||
kind: "git-empty-pathspec",
|
||||
repoIdentity: "forge",
|
||||
});
|
||||
}
|
||||
|
||||
const result = promoteFeedbackToRequirements(dir);
|
||||
assert.equal(result.promoted, 1);
|
||||
assert.equal(result.requirementIds.length, 1);
|
||||
const reqId = result.requirementIds[0];
|
||||
assert.match(reqId, /^R\d+$/);
|
||||
|
||||
// REQUIREMENTS.md row should be present
|
||||
const reqPath = join(dir, ".sf", "REQUIREMENTS.md");
|
||||
assert.ok(existsSync(reqPath));
|
||||
const content = readFileSync(reqPath, "utf-8");
|
||||
assert.ok(content.includes(reqId));
|
||||
assert.ok(content.includes("git-empty-pathspec"));
|
||||
|
||||
// All contributing entries should be resolved with promoted-to-requirement
|
||||
const allEntries = readAllSelfFeedback(dir);
|
||||
const resolved = allEntries.filter(
|
||||
(e) =>
|
||||
e.resolvedEvidence?.kind === "promoted-to-requirement" &&
|
||||
(e.resolvedEvidence as { kind: string; requirementId: string })
|
||||
.requirementId === reqId,
|
||||
);
|
||||
assert.equal(resolved.length, 5);
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("3. promotes when same-kind appears across ≥3 distinct milestones (count < 5)", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeForgeProject(dir);
|
||||
// 3 entries, each in a different milestone — total count < 5
|
||||
for (let i = 1; i <= 3; i++) {
|
||||
seedEntry(dir, {
|
||||
kind: "runaway-guard-hard-pause",
|
||||
repoIdentity: "forge",
|
||||
occurredIn: { milestone: `M00${i}` },
|
||||
});
|
||||
}
|
||||
|
||||
const result = promoteFeedbackToRequirements(dir);
|
||||
assert.equal(result.promoted, 1);
|
||||
assert.equal(result.requirementIds.length, 1);
|
||||
|
||||
const reqPath = join(dir, ".sf", "REQUIREMENTS.md");
|
||||
const content = readFileSync(reqPath, "utf-8");
|
||||
assert.ok(content.includes("runaway-guard-hard-pause"));
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("4. does NOT re-promote entries already resolved with promoted-to-requirement", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeForgeProject(dir);
|
||||
// Seed 5 entries that are already resolved
|
||||
for (let i = 0; i < 5; i++) {
|
||||
seedEntry(dir, {
|
||||
kind: "already-promoted-kind",
|
||||
repoIdentity: "forge",
|
||||
resolvedAt: new Date().toISOString(),
|
||||
resolvedEvidence: {
|
||||
kind: "promoted-to-requirement",
|
||||
requirementId: "R001",
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const result = promoteFeedbackToRequirements(dir);
|
||||
assert.deepEqual(result, { promoted: 0, requirementIds: [] });
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("5. R-ID generation increments past existing rows in REQUIREMENTS.md", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeForgeProject(dir);
|
||||
|
||||
// Pre-write a REQUIREMENTS.md with R001 and R002 already present
|
||||
const reqPath = join(dir, ".sf", "REQUIREMENTS.md");
|
||||
writeFileSync(
|
||||
reqPath,
|
||||
"# Requirements\n\n## Active\n\n" +
|
||||
"### R001 — Existing requirement one\n- Source: design\n\n" +
|
||||
"### R002 — Existing requirement two\n- Source: design\n\n",
|
||||
"utf-8",
|
||||
);
|
||||
|
||||
// Seed enough entries to trigger promotion
|
||||
for (let i = 0; i < 5; i++) {
|
||||
seedEntry(dir, { kind: "new-cluster-kind", repoIdentity: "forge" });
|
||||
}
|
||||
|
||||
const result = promoteFeedbackToRequirements(dir);
|
||||
assert.equal(result.promoted, 1);
|
||||
// Should be R003, not R001
|
||||
assert.equal(result.requirementIds[0], "R003");
|
||||
|
||||
const content = readFileSync(reqPath, "utf-8");
|
||||
assert.ok(content.includes("R003"));
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("6. filters out entries older than 90 days", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeForgeProject(dir);
|
||||
|
||||
// 5 entries, all older than 90 days
|
||||
const oldDate = new Date(
|
||||
Date.now() - 91 * 24 * 60 * 60 * 1000,
|
||||
).toISOString();
|
||||
for (let i = 0; i < 5; i++) {
|
||||
seedEntry(dir, {
|
||||
kind: "stale-kind",
|
||||
repoIdentity: "forge",
|
||||
ts: oldDate,
|
||||
});
|
||||
}
|
||||
|
||||
const result = promoteFeedbackToRequirements(dir);
|
||||
assert.deepEqual(result, { promoted: 0, requirementIds: [] });
|
||||
assert.equal(existsSync(join(dir, ".sf", "REQUIREMENTS.md")), false);
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("7. idempotent: running twice files no duplicate requirement rows", () => {
|
||||
const dir = makeTmpDir();
|
||||
try {
|
||||
makeForgeProject(dir);
|
||||
for (let i = 0; i < 5; i++) {
|
||||
seedEntry(dir, { kind: "repeat-kind", repoIdentity: "forge" });
|
||||
}
|
||||
|
||||
const first = promoteFeedbackToRequirements(dir);
|
||||
assert.equal(first.promoted, 1);
|
||||
|
||||
// Second run: all entries are now resolved — should not promote again
|
||||
const second = promoteFeedbackToRequirements(dir);
|
||||
assert.deepEqual(second, { promoted: 0, requirementIds: [] });
|
||||
|
||||
// Exactly one requirement row with the R-ID from the first run
|
||||
const reqPath = join(dir, ".sf", "REQUIREMENTS.md");
|
||||
const content = readFileSync(reqPath, "utf-8");
|
||||
const matches = [...content.matchAll(/### (R\d+)/g)];
|
||||
// Only one row for this kind
|
||||
const forKind = matches.filter(([_, id]) => content.includes(`${id}`));
|
||||
assert.equal(matches.length, 1, "should have exactly one requirement row");
|
||||
} finally {
|
||||
rmSync(dir, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
});
|
||||
Loading…
Add table
Reference in a new issue