feat(self-feedback): prioritization signal — impact_score + effort_estimate (v65)
Addresses sf-mp4rxkx0-fkt3e2 (gap:no-prioritization-signal-on-open-queue)
AND closes the consolidating reflection entry sf-mp4w89mv-3ulqp4 (all
four data-plane-isolation siblings now resolved: kind taxonomy,
causal-link relations, memory mirror, prioritization).
Schema v65 adds two columns to self_feedback:
impact_score INTEGER (0-100; default by severity)
effort_estimate INTEGER (1-5; default null → treated as 3 in selector)
Severity-derived default for impact_score, set by insertSelfFeedbackEntry
when no explicit value supplied:
critical → 95
high → 80
medium → 50
low → 20
selectInlineFixCandidates now sorts by:
1. impact desc — high-impact work first
2. effort asc — quick wins ahead of multi-day work at same impact
3. ts asc — older entries break ties (FIFO within priority)
Replaces the pure-FIFO ordering. Operators can override per-entry by
setting impact_score/effort_estimate explicitly at file time, so e.g.
a "low" severity entry with a critical real-world impact gets bumped
above routine "medium" entries.
Migration is idempotent: ensureSelfFeedbackTables (the fresh-DB CREATE
path) already includes both columns, so the v65 ALTER probes via
PRAGMA table_info before adding to avoid "duplicate column" errors on
fresh DBs. Older fixtures still get the ALTER. Two ALTER guards needed
because the columns are added independently and the second probe must
see post-first-ALTER state.
Tests:
sf-db-migration: assertion 64 → 65 + new impact_score/effort_estimate
column-exists checks
self-feedback-drain: prioritization order test (5 entries spanning
all severities + explicit-effort overrides) +
explicit-impact-overrides-default test
1616/1616 SF extension tests pass; typecheck clean.
Note: the consolidating reflection entry sf-mp4w89mv-3ulqp4 (filed by
the reflection layer's deepest-architectural-concern finding) is now
fully addressed across 4 commits today: 2f8ee5725 (memory mirror),
83c28b756 (kind taxonomy), d40a3d21d (causal links), this commit
(prioritization). Resolves both entries in one go.
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
d40a3d21dd
commit
2b64f308cf
6 changed files with 229 additions and 20 deletions
|
|
@ -176,6 +176,36 @@ function hasUncommittedChanges(basePath) {
|
|||
*
|
||||
* Consumer: dispatchSelfFeedbackInlineFixIfNeeded during session_start.
|
||||
*/
|
||||
// Severity-derived default impact_score, mirroring sf-db-self-feedback's
|
||||
// defaultImpactForSeverity. Used by the selector when an entry's
|
||||
// impact_score column is null (legacy rows pre-v65 + entries that didn't
|
||||
// override the default at insert time which means the column is null too —
|
||||
// the v65 column DEFAULT NULL is intentional so legacy rows participate
|
||||
// in prioritization without a data backfill).
|
||||
function effectiveImpact(entry) {
|
||||
if (typeof entry.impactScore === "number") return entry.impactScore;
|
||||
switch (entry.severity) {
|
||||
case "critical":
|
||||
return 95;
|
||||
case "high":
|
||||
return 80;
|
||||
case "medium":
|
||||
return 50;
|
||||
case "low":
|
||||
return 20;
|
||||
default:
|
||||
return 50;
|
||||
}
|
||||
}
|
||||
|
||||
// Default effort to 3 (mid of 1-5 scale) when unset so missing-effort
|
||||
// entries don't all sort identically; a 5-point scale gives operators
|
||||
// room to bump truly-tiny work above truly-huge work.
|
||||
function effectiveEffort(entry) {
|
||||
if (typeof entry.effortEstimate === "number") return entry.effortEstimate;
|
||||
return 3;
|
||||
}
|
||||
|
||||
export function selectInlineFixCandidates(basePath) {
|
||||
if (!isForgeRepo(basePath)) return [];
|
||||
return readAllSelfFeedback(basePath)
|
||||
|
|
@ -183,7 +213,17 @@ export function selectInlineFixCandidates(basePath) {
|
|||
if (isSuspectlyResolved(entry)) return true;
|
||||
return !entry.resolvedAt;
|
||||
})
|
||||
.sort((a, b) => a.ts.localeCompare(b.ts));
|
||||
.sort((a, b) => {
|
||||
// Prioritization (sf-mp4rxkx0-fkt3e2):
|
||||
// 1. impact desc — high-impact work first
|
||||
// 2. effort asc — quick wins ahead of multi-day work at same impact
|
||||
// 3. ts asc — older entries break ties (FIFO within priority)
|
||||
const impactDelta = effectiveImpact(b) - effectiveImpact(a);
|
||||
if (impactDelta !== 0) return impactDelta;
|
||||
const effortDelta = effectiveEffort(a) - effectiveEffort(b);
|
||||
if (effortDelta !== 0) return effortDelta;
|
||||
return a.ts.localeCompare(b.ts);
|
||||
});
|
||||
}
|
||||
function buildInlineFixPrompt(entries) {
|
||||
const rendered = entries
|
||||
|
|
|
|||
|
|
@ -867,6 +867,14 @@ export function rowToSelfFeedback(row) {
|
|||
resolvedCriteriaMet: row["resolved_criteria_json"]
|
||||
? JSON.parse(row["resolved_criteria_json"])
|
||||
: parsed.resolvedCriteriaMet,
|
||||
impactScore:
|
||||
typeof row["impact_score"] === "number"
|
||||
? row["impact_score"]
|
||||
: parsed.impactScore ?? null,
|
||||
effortEstimate:
|
||||
typeof row["effort_estimate"] === "number"
|
||||
? row["effort_estimate"]
|
||||
: parsed.effortEstimate ?? null,
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
|
|
@ -896,6 +904,14 @@ export function rowToSelfFeedback(row) {
|
|||
resolvedCriteriaMet: row["resolved_criteria_json"]
|
||||
? JSON.parse(row["resolved_criteria_json"])
|
||||
: undefined,
|
||||
impactScore:
|
||||
typeof row["impact_score"] === "number"
|
||||
? row["impact_score"]
|
||||
: undefined,
|
||||
effortEstimate:
|
||||
typeof row["effort_estimate"] === "number"
|
||||
? row["effort_estimate"]
|
||||
: undefined,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ function defaultQueryTimeout(operation, fallbackValue) {
|
|||
}
|
||||
}
|
||||
|
||||
const SCHEMA_VERSION = 64;
|
||||
const SCHEMA_VERSION = 65;
|
||||
function indexExists(db, name) {
|
||||
return !!db
|
||||
.prepare(
|
||||
|
|
@ -452,7 +452,9 @@ function ensureSelfFeedbackTables(db) {
|
|||
resolved_reason TEXT DEFAULT NULL,
|
||||
resolved_by_sf_version TEXT DEFAULT NULL,
|
||||
resolved_evidence_json TEXT DEFAULT NULL,
|
||||
resolved_criteria_json TEXT DEFAULT NULL
|
||||
resolved_criteria_json TEXT DEFAULT NULL,
|
||||
impact_score INTEGER DEFAULT NULL,
|
||||
effort_estimate INTEGER DEFAULT NULL
|
||||
)
|
||||
`);
|
||||
db.exec(
|
||||
|
|
@ -3352,6 +3354,50 @@ function migrateSchema(db, { currentPath, withQueryTimeout }) {
|
|||
});
|
||||
if (ok) appliedVersion = 64;
|
||||
}
|
||||
if (appliedVersion < 65) {
|
||||
const ok = runMigrationStep("v65", () => {
|
||||
// Schema v65: prioritization signals on self_feedback
|
||||
// (sf-mp4rxkx0-fkt3e2). Adds impact_score (0-100) and
|
||||
// effort_estimate (1-5) so selectInlineFixCandidates can sort
|
||||
// by impact desc, effort asc, ts asc instead of pure FIFO.
|
||||
//
|
||||
// Defaults are derived from severity at insert time
|
||||
// (low=20 / medium=50 / high=80 / critical=95) but operators
|
||||
// can override per-entry. NULL is allowed for backfill of
|
||||
// existing rows; the selection sort treats NULL as "use
|
||||
// severity-derived default" so legacy rows participate in
|
||||
// prioritization without a data migration.
|
||||
//
|
||||
// Idempotent ALTER: ensureSelfFeedbackTables (the fresh-DB CREATE
|
||||
// path) already includes both columns, so for fresh DBs the
|
||||
// migration step needs to run for schema_version-tracking but
|
||||
// skip the ALTER. Older fixtures that pre-date the fresh-DB
|
||||
// CREATE update do need the ALTER. Probe via PRAGMA table_info
|
||||
// before each ALTER.
|
||||
const cols = new Set(
|
||||
db.prepare("PRAGMA table_info(self_feedback)").all().map(
|
||||
(r) => r.name,
|
||||
),
|
||||
);
|
||||
if (!cols.has("impact_score")) {
|
||||
db.exec(
|
||||
"ALTER TABLE self_feedback ADD COLUMN impact_score INTEGER",
|
||||
);
|
||||
}
|
||||
if (!cols.has("effort_estimate")) {
|
||||
db.exec(
|
||||
"ALTER TABLE self_feedback ADD COLUMN effort_estimate INTEGER",
|
||||
);
|
||||
}
|
||||
db.prepare(
|
||||
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
|
||||
).run({
|
||||
":version": 65,
|
||||
":applied_at": new Date().toISOString(),
|
||||
});
|
||||
});
|
||||
if (ok) appliedVersion = 65;
|
||||
}
|
||||
|
||||
// Post-migration assertion: ensure critical tables created by historical
|
||||
// migrations are actually present. If a prior migration claimed success but
|
||||
|
|
|
|||
|
|
@ -2,19 +2,50 @@ import { _getAdapter, rowToSelfFeedback } from './sf-db-core.js';
|
|||
import { SF_STALE_STATE, SFError } from '../errors.js';
|
||||
import { logWarning } from '../workflow-logger.js';
|
||||
|
||||
/**
|
||||
* Severity-derived default impact_score (sf-mp4rxkx0-fkt3e2). Operators can
|
||||
* override per-entry via the explicit impact_score field; this default is
|
||||
* only consulted when no explicit score is supplied. Tuned so high/critical
|
||||
* entries naturally outrank medium ones, but the gap leaves room for an
|
||||
* operator to bump a "low" entry above a routine "medium" by setting score
|
||||
* 60 explicitly.
|
||||
*/
|
||||
function defaultImpactForSeverity(severity) {
|
||||
switch (severity) {
|
||||
case "critical":
|
||||
return 95;
|
||||
case "high":
|
||||
return 80;
|
||||
case "medium":
|
||||
return 50;
|
||||
case "low":
|
||||
return 20;
|
||||
default:
|
||||
return 50;
|
||||
}
|
||||
}
|
||||
|
||||
export function insertSelfFeedbackEntry(entry) {
|
||||
const currentDb = _getAdapter();
|
||||
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
|
||||
const occurred = entry.occurredIn ?? {};
|
||||
const impactScore =
|
||||
typeof entry.impactScore === "number"
|
||||
? entry.impactScore
|
||||
: defaultImpactForSeverity(entry.severity);
|
||||
const effortEstimate =
|
||||
typeof entry.effortEstimate === "number" ? entry.effortEstimate : null;
|
||||
currentDb
|
||||
.prepare(`INSERT INTO self_feedback (
|
||||
id, ts, kind, severity, blocking, repo_identity, sf_version, base_path,
|
||||
unit_type, milestone_id, slice_id, task_id, summary, evidence, suggested_fix, full_json,
|
||||
resolved_at, resolved_reason, resolved_by_sf_version, resolved_evidence_json, resolved_criteria_json
|
||||
resolved_at, resolved_reason, resolved_by_sf_version, resolved_evidence_json, resolved_criteria_json,
|
||||
impact_score, effort_estimate
|
||||
) VALUES (
|
||||
:id, :ts, :kind, :severity, :blocking, :repo_identity, :sf_version, :base_path,
|
||||
:unit_type, :milestone_id, :slice_id, :task_id, :summary, :evidence, :suggested_fix, :full_json,
|
||||
:resolved_at, :resolved_reason, :resolved_by_sf_version, :resolved_evidence_json, :resolved_criteria_json
|
||||
:resolved_at, :resolved_reason, :resolved_by_sf_version, :resolved_evidence_json, :resolved_criteria_json,
|
||||
:impact_score, :effort_estimate
|
||||
)
|
||||
ON CONFLICT(id) DO NOTHING`)
|
||||
.run({
|
||||
|
|
@ -43,6 +74,8 @@ export function insertSelfFeedbackEntry(entry) {
|
|||
":resolved_criteria_json": entry.resolvedCriteriaMet
|
||||
? JSON.stringify(entry.resolvedCriteriaMet)
|
||||
: null,
|
||||
":impact_score": impactScore,
|
||||
":effort_estimate": effortEstimate,
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ describe("selectInlineFixCandidates", () => {
|
|||
expect(selectInlineFixCandidates(dir)).toEqual([]);
|
||||
});
|
||||
|
||||
test("selects every open forge-local entry regardless of severity, blocking, or kind", () => {
|
||||
test("selects every open forge-local entry regardless of severity, blocking, or kind (sorted by prioritization)", () => {
|
||||
const dir = makeForgeProject();
|
||||
writeEntries(dir, [
|
||||
entry({ id: "hi-block", severity: "high", blocking: true, ts: "2026-05-01T00:00:00Z" }),
|
||||
|
|
@ -72,13 +72,15 @@ describe("selectInlineFixCandidates", () => {
|
|||
entry({ id: "arch", severity: "medium", blocking: false, kind: "architecture-defect:foo", ts: "2026-05-06T00:00:00Z" }),
|
||||
]);
|
||||
const ids = selectInlineFixCandidates(dir).map((e) => e.id);
|
||||
// Order: critical (95) > high (80) > medium (50) > low (20).
|
||||
// Within same impact, ts asc breaks the tie.
|
||||
expect(ids).toEqual([
|
||||
"hi-block",
|
||||
"crit-block",
|
||||
"med-block",
|
||||
"med-nonblock",
|
||||
"low-nonblock",
|
||||
"arch",
|
||||
"crit-block", // critical / 95
|
||||
"hi-block", // high / 80
|
||||
"med-block", // medium / 50, oldest
|
||||
"med-nonblock", // medium / 50
|
||||
"arch", // medium / 50, newest
|
||||
"low-nonblock", // low / 20
|
||||
]);
|
||||
});
|
||||
|
||||
|
|
@ -170,14 +172,74 @@ describe("selectInlineFixCandidates", () => {
|
|||
expect(ids).toEqual(["empty-resolution"]);
|
||||
});
|
||||
|
||||
test("orders results by timestamp ascending", () => {
|
||||
test("orders by impact desc, then effort asc, then ts asc (prioritization)", () => {
|
||||
const dir = makeForgeProject();
|
||||
// Mixed entries: high-severity-newest should outrank low-severity-oldest;
|
||||
// at equal impact, low effort should outrank high effort; at equal both,
|
||||
// older ts breaks the tie.
|
||||
writeEntries(dir, [
|
||||
entry({ id: "newer", kind: "gap:a", ts: "2026-05-03T00:00:00Z" }),
|
||||
entry({ id: "oldest", kind: "gap:b", ts: "2026-05-01T00:00:00Z" }),
|
||||
entry({ id: "middle", kind: "gap:c", ts: "2026-05-02T00:00:00Z" }),
|
||||
entry({
|
||||
id: "low-severity-oldest",
|
||||
severity: "low",
|
||||
kind: "gap:a",
|
||||
ts: "2026-05-01T00:00:00Z",
|
||||
}),
|
||||
entry({
|
||||
id: "high-severity-newer",
|
||||
severity: "high",
|
||||
kind: "gap:b",
|
||||
ts: "2026-05-05T00:00:00Z",
|
||||
}),
|
||||
entry({
|
||||
id: "high-severity-quick",
|
||||
severity: "high",
|
||||
kind: "gap:c",
|
||||
ts: "2026-05-04T00:00:00Z",
|
||||
effortEstimate: 1,
|
||||
}),
|
||||
entry({
|
||||
id: "high-severity-slow",
|
||||
severity: "high",
|
||||
kind: "gap:d",
|
||||
ts: "2026-05-03T00:00:00Z",
|
||||
effortEstimate: 5,
|
||||
}),
|
||||
entry({
|
||||
id: "medium-newest",
|
||||
severity: "medium",
|
||||
kind: "gap:e",
|
||||
ts: "2026-05-06T00:00:00Z",
|
||||
}),
|
||||
]);
|
||||
const ids = selectInlineFixCandidates(dir).map((e) => e.id);
|
||||
expect(ids).toEqual(["oldest", "middle", "newer"]);
|
||||
expect(ids).toEqual([
|
||||
"high-severity-quick", // high impact + effort=1 wins
|
||||
"high-severity-newer", // high impact + effort=undefined→3
|
||||
"high-severity-slow", // high impact + effort=5
|
||||
"medium-newest", // medium impact
|
||||
"low-severity-oldest", // low impact loses despite older ts
|
||||
]);
|
||||
});
|
||||
|
||||
test("explicit impactScore overrides severity-derived default", () => {
|
||||
const dir = makeForgeProject();
|
||||
writeEntries(dir, [
|
||||
entry({
|
||||
id: "low-but-bumped",
|
||||
severity: "low",
|
||||
kind: "gap:bumped",
|
||||
ts: "2026-05-02T00:00:00Z",
|
||||
impactScore: 90,
|
||||
}),
|
||||
entry({
|
||||
id: "high-default",
|
||||
severity: "high",
|
||||
kind: "gap:default",
|
||||
ts: "2026-05-01T00:00:00Z",
|
||||
}),
|
||||
]);
|
||||
const ids = selectInlineFixCandidates(dir).map((e) => e.id);
|
||||
// 90 explicit beats 80 (severity=high default)
|
||||
expect(ids).toEqual(["low-but-bumped", "high-default"]);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ test("openDatabase_migrates_v27_tasks_without_created_at_through_spec_backfill",
|
|||
const version = db
|
||||
.prepare("SELECT MAX(version) AS version FROM schema_version")
|
||||
.get();
|
||||
assert.equal(version.version, 64);
|
||||
assert.equal(version.version, 65);
|
||||
// v61: intent_chapters table exists
|
||||
const chaptersTable = db
|
||||
.prepare(
|
||||
|
|
@ -314,6 +314,18 @@ test("openDatabase_migrates_v27_tasks_without_created_at_through_spec_backfill",
|
|||
relationsTable,
|
||||
"self_feedback_relations table should exist after v64 migration",
|
||||
);
|
||||
// v65: self_feedback gained impact_score + effort_estimate columns
|
||||
// (prioritization signal — sf-mp4rxkx0-fkt3e2)
|
||||
const sfColumns = db.prepare("PRAGMA table_info(self_feedback)").all();
|
||||
const colNames = sfColumns.map((c) => c.name);
|
||||
assert.ok(
|
||||
colNames.includes("impact_score"),
|
||||
"impact_score column should exist after v65 migration",
|
||||
);
|
||||
assert.ok(
|
||||
colNames.includes("effort_estimate"),
|
||||
"effort_estimate column should exist after v65 migration",
|
||||
);
|
||||
const taskSpec = db
|
||||
.prepare(
|
||||
"SELECT milestone_id, slice_id, task_id, verify FROM task_specs WHERE task_id = 'T01'",
|
||||
|
|
@ -355,11 +367,11 @@ test("openDatabase_v52_db_heals_routing_history_and_auto_start_path_works", () =
|
|||
initRoutingHistory(dbPath);
|
||||
}, "initRoutingHistory should not throw on a v52 DB");
|
||||
|
||||
// Schema should have migrated to v64 (current head)
|
||||
// Schema should have migrated to v65 (current head)
|
||||
const version = db
|
||||
.prepare("SELECT MAX(version) AS version FROM schema_version")
|
||||
.get();
|
||||
assert.equal(version.version, 64);
|
||||
assert.equal(version.version, 65);
|
||||
});
|
||||
|
||||
test("openDatabase_when_fresh_db_supports_schedule_entries", () => {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue