feat(memory): add extraction diagnostics

This commit is contained in:
Mikael Hugo 2026-05-15 16:53:01 +02:00
parent fdc4650016
commit 6214f7c86d
9 changed files with 717 additions and 96 deletions

View file

@ -15,6 +15,7 @@ import {
resolveExecutorContextWindow, resolveExecutorContextWindow,
truncateAtSectionBoundary, truncateAtSectionBoundary,
} from "./context-budget.js"; } from "./context-budget.js";
import { getErrorMessage } from "./error-utils.js";
import { import {
formatOverridesSection, formatOverridesSection,
loadActiveOverrides, loadActiveOverrides,
@ -66,6 +67,7 @@ import {
isDbAvailable, isDbAvailable,
} from "./sf-db.js"; } from "./sf-db.js";
import { warnIfManifestHasMissingSkills } from "./skill-manifest.js"; import { warnIfManifestHasMissingSkills } from "./skill-manifest.js";
import { loadSkills } from "./skills/index.js";
import { import {
formatDecisionsCompact, formatDecisionsCompact,
formatRequirementsCompact, formatRequirementsCompact,
@ -76,15 +78,16 @@ import {
getDependencyTaskSummaryPaths, getDependencyTaskSummaryPaths,
getPriorTaskSummaryPaths, getPriorTaskSummaryPaths,
} from "./summary-helpers.js"; } from "./summary-helpers.js";
import { composeInlinedContext, composeUnitContext } from "./unit-context-composer.js"; import {
composeInlinedContext,
composeUnitContext,
} from "./unit-context-composer.js";
import { getUatType } from "./verdict-parser.js"; import { getUatType } from "./verdict-parser.js";
import { import {
buildCarryForwardSection, buildCarryForwardSection,
buildResumeSection, buildResumeSection,
} from "./workflow-helpers.js"; } from "./workflow-helpers.js";
import { logWarning } from "./workflow-logger.js"; import { logWarning } from "./workflow-logger.js";
import { getErrorMessage } from "./error-utils.js";
import { loadSkills } from "./skills/index.js";
// ─── Preamble Cap ───────────────────────────────────────────────────────────── // ─── Preamble Cap ─────────────────────────────────────────────────────────────
/** /**
@ -502,10 +505,7 @@ export async function inlineProjectFromDb(base) {
} }
} }
} catch (err) { } catch (err) {
logWarning( logWarning("prompt", `inlineProjectFromDb failed: ${getErrorMessage(err)}`);
"prompt",
`inlineProjectFromDb failed: ${getErrorMessage(err)}`,
);
} }
return inlineSfRootFile(base, "project.md", "Project"); return inlineSfRootFile(base, "project.md", "Project");
} }
@ -851,7 +851,9 @@ function buildWorkflowConstraintsBlock(params) {
); );
workflowSkills = allPatternSkills.filter( workflowSkills = allPatternSkills.filter(
(skill) => (skill) =>
!ALWAYS_ON_WORKFLOW_SKILL_NAMES.has(normalizeSkillReference(skill.name)), !ALWAYS_ON_WORKFLOW_SKILL_NAMES.has(
normalizeSkillReference(skill.name),
),
); );
} catch { } catch {
return ""; return "";
@ -1026,10 +1028,7 @@ export function buildSkillActivationBlock(params) {
matched.add(normalizeSkillReference(skillName)); matched.add(normalizeSkillReference(skillName));
} }
} catch (err) { } catch (err) {
logWarning( logWarning("prompt", `parseTaskPlanFile failed: ${getErrorMessage(err)}`);
"prompt",
`parseTaskPlanFile failed: ${getErrorMessage(err)}`,
);
} }
} }
const ordered = [...matched] const ordered = [...matched]
@ -1048,16 +1047,14 @@ export function buildSkillActivationBlock(params) {
} catch { } catch {
// getAutoSession may be unavailable in test contexts — use defaults // getAutoSession may be unavailable in test contexts — use defaults
} }
const workflowBlock = buildWorkflowConstraintsBlock( const workflowBlock = buildWorkflowConstraintsBlock({
{ base: params.base,
base: params.base, contextTokens,
contextTokens, explicitSkillNames: matched,
explicitSkillNames: matched, avoidedSkillNames: avoided,
avoidedSkillNames: avoided, workMode,
workMode, permissionProfile,
permissionProfile, });
},
);
return userSkillBlock + workflowBlock; return userSkillBlock + workflowBlock;
} }
@ -1441,10 +1438,16 @@ export async function buildResearchSlicePrompt(
case "roadmap": { case "roadmap": {
// Excerpt with full-roadmap fallback for context reduction. // Excerpt with full-roadmap fallback for context reduction.
const excerpt = await inlineRoadmapExcerpt(base, mid, sid); const excerpt = await inlineRoadmapExcerpt(base, mid, sid);
return excerpt ?? inlineFile(roadmapPath, roadmapRel, "Milestone Roadmap"); return (
excerpt ?? inlineFile(roadmapPath, roadmapRel, "Milestone Roadmap")
);
} }
case "milestone-context": case "milestone-context":
return inlineFileOptional(contextPath, contextRel, "Milestone Context"); return inlineFileOptional(
contextPath,
contextRel,
"Milestone Context",
);
case "slice-context": case "slice-context":
return inlineFileOptional( return inlineFileOptional(
sliceContextPath, sliceContextPath,
@ -1485,7 +1488,8 @@ export async function buildResearchSlicePrompt(
}, },
}, },
}); });
const parts = [prepend, inline].filter(Boolean); const memorySection = await buildProjectMemoriesSection(`${sid} ${sTitle}`);
const parts = [prepend, memorySection, inline].filter(Boolean);
const inlinedContext = capPreamble( const inlinedContext = capPreamble(
`## Inlined Context (preloaded — do not re-read these files)\n\n${parts.join("\n\n---\n\n")}`, `## Inlined Context (preloaded — do not re-read these files)\n\n${parts.join("\n\n---\n\n")}`,
); );
@ -1513,6 +1517,28 @@ export async function buildResearchSlicePrompt(
...buildSkillDiscoveryVars(), ...buildSkillDiscoveryVars(),
}); });
} }
/**
* Build a prompt-ready Project Memories section.
*
* Purpose: keep memory injection consistent across planning, research, and
* execution surfaces while preserving query-aware ranking when available.
*
* Consumer: autonomous prompt builders for research-slice, plan-slice, and
* execute-task.
*/
async function buildProjectMemoriesSection(query, limit = 10) {
const memoryQuery = String(query ?? "").trim();
try {
const usingRanker = !!memoryQuery;
const memories = usingRanker
? await getRelevantMemoriesRanked(memoryQuery, limit)
: getActiveMemoriesRanked(limit);
if (memories.length === 0) return "## Project Memories\n(none yet)";
return `## Project Memories\n${formatMemoriesForPrompt(memories, 2000, usingRanker)}`;
} catch {
return "## Project Memories\n(unavailable)";
}
}
/** /**
* Shared assembly for plan-slice and refine-slice prompts. Both builders need * Shared assembly for plan-slice and refine-slice prompts. Both builders need
* the same inlined context (roadmap excerpt, slice context, research, decisions, * the same inlined context (roadmap excerpt, slice context, research, decisions,
@ -1547,7 +1573,10 @@ async function renderSlicePrompt(options) {
// the overrides prepend and the inline artifacts. // the overrides prepend and the inline artifacts.
const researchSliceAnchor = readPhaseAnchor(base, mid, "research-slice"); const researchSliceAnchor = readPhaseAnchor(base, mid, "research-slice");
const prefixBlocks = [...prependBlocks]; const prefixBlocks = [...prependBlocks];
if (researchSliceAnchor) prefixBlocks.push(formatAnchorForPrompt(researchSliceAnchor)); const memorySection = await buildProjectMemoriesSection(`${sid} ${sTitle}`);
prefixBlocks.push(memorySection);
if (researchSliceAnchor)
prefixBlocks.push(formatAnchorForPrompt(researchSliceAnchor));
const prefixContent = const prefixContent =
prefixBlocks.length > 0 ? prefixBlocks.join("\n\n---\n\n") : null; prefixBlocks.length > 0 ? prefixBlocks.join("\n\n---\n\n") : null;
const depContent = await inlineDependencySummaries( const depContent = await inlineDependencySummaries(
@ -1563,7 +1592,9 @@ async function renderSlicePrompt(options) {
case "roadmap": { case "roadmap": {
// Excerpt with full-roadmap fallback for context reduction. // Excerpt with full-roadmap fallback for context reduction.
const excerpt = await inlineRoadmapExcerpt(base, mid, sid); const excerpt = await inlineRoadmapExcerpt(base, mid, sid);
return excerpt ?? inlineFile(roadmapPath, roadmapRel, "Milestone Roadmap"); return (
excerpt ?? inlineFile(roadmapPath, roadmapRel, "Milestone Roadmap")
);
} }
case "slice-context": case "slice-context":
return inlineFileOptional( return inlineFileOptional(
@ -1572,7 +1603,11 @@ async function renderSlicePrompt(options) {
"Slice Context (from discussion)", "Slice Context (from discussion)",
); );
case "slice-research": case "slice-research":
return inlineFileOptional(researchPath, researchRel, "Slice Research"); return inlineFileOptional(
researchPath,
researchRel,
"Slice Research",
);
case "decisions": case "decisions":
if (level === "minimal") return null; if (level === "minimal") return null;
return inlineDecisionsFromDb( return inlineDecisionsFromDb(
@ -1586,7 +1621,8 @@ async function renderSlicePrompt(options) {
return inlineRequirementsFromDb(base, mid, sid, level); return inlineRequirementsFromDb(base, mid, sid, level);
case "templates": { case "templates": {
const tplParts = [inlineTemplate("plan", "Slice Plan")]; const tplParts = [inlineTemplate("plan", "Slice Plan")];
if (level === "full") tplParts.push(inlineTemplate("task-plan", "Task Plan")); if (level === "full")
tplParts.push(inlineTemplate("task-plan", "Task Plan"));
return tplParts.join("\n\n---\n\n"); return tplParts.join("\n\n---\n\n");
} }
default: default:
@ -1600,8 +1636,7 @@ async function renderSlicePrompt(options) {
inputs: {}, inputs: {},
}, },
knowledge: { knowledge: {
build: async ({ keywords }, b) => build: async ({ keywords }, b) => inlineKnowledgeScoped(b, keywords),
inlineKnowledgeScoped(b, keywords),
inputs: { keywords: extractKeywords(sTitle) }, inputs: { keywords: extractKeywords(sTitle) },
}, },
graph: { graph: {
@ -1938,23 +1973,9 @@ export async function buildExecuteTaskPrompt(
// the cold static-rank top. Falls back to pure static ranking when no // the cold static-rank top. Falls back to pure static ranking when no
// gateway is configured or no embeddings exist yet — see // gateway is configured or no embeddings exist yet — see
// getRelevantMemoriesRanked for the fallback chain. // getRelevantMemoriesRanked for the fallback chain.
const memoryQuery = `${sTitle} ${tTitle}`.trim(); const memoriesSection = await buildProjectMemoriesSection(
const memoriesSection = await (async () => { `${sTitle} ${tTitle}`,
try { );
const usingRanker = !!memoryQuery;
const memories = usingRanker
? await getRelevantMemoriesRanked(memoryQuery, 10)
: getActiveMemoriesRanked(10);
if (memories.length === 0) return "## Project Memories\n(none yet)";
// preserveRankOrder=true when the input came from the query-aware
// ranker so semantic relevance dominates over CATEGORY_PRIORITY in
// the rendered list. Static-ranked input keeps the historical
// category-grouped layout.
return `## Project Memories\n${formatMemoriesForPrompt(memories, 2000, usingRanker)}`;
} catch {
return "## Project Memories\n(unavailable)";
}
})();
// SF ADR-011 P2: when the feature is enabled, teach the executor that it can // SF ADR-011 P2: when the feature is enabled, teach the executor that it can
// surface non-obvious choices via the `escalation` field on complete_task // surface non-obvious choices via the `escalation` field on complete_task
// rather than silently picking. Autonomous mode auto-accepts the recommendation // rather than silently picking. Autonomous mode auto-accepts the recommendation
@ -2306,7 +2327,11 @@ export async function buildCompleteMilestonePrompt(mid, midTitle, base, level) {
if (inlineLevel === "minimal") return null; if (inlineLevel === "minimal") return null;
return inlineProjectFromDb(base); return inlineProjectFromDb(base);
case "milestone-context": case "milestone-context":
return inlineFileOptional(contextPath, contextRel, "Milestone Context"); return inlineFileOptional(
contextPath,
contextRel,
"Milestone Context",
);
case "templates": case "templates":
return inlineTemplate("milestone-summary", "Milestone Summary"); return inlineTemplate("milestone-summary", "Milestone Summary");
default: default:
@ -2315,8 +2340,7 @@ export async function buildCompleteMilestonePrompt(mid, midTitle, base, level) {
}, },
computed: { computed: {
knowledge: { knowledge: {
build: async ({ keywords: kw }, b) => build: async ({ keywords: kw }, b) => inlineKnowledgeBudgeted(b, kw),
inlineKnowledgeBudgeted(b, kw),
inputs: { keywords }, inputs: { keywords },
}, },
graph: { graph: {
@ -2456,7 +2480,11 @@ export async function buildValidateMilestonePrompt(mid, midTitle, base, level) {
// Assemble slice-summaries block (summaries + assessments interleaved). // Assemble slice-summaries block (summaries + assessments interleaved).
const sliceSummariesParts = []; const sliceSummariesParts = [];
const outstandingItems = []; const outstandingItems = [];
for (const { summaryInline, assessmentInline, outstandingLines } of valSliceResults) { for (const {
summaryInline,
assessmentInline,
outstandingLines,
} of valSliceResults) {
sliceSummariesParts.push(summaryInline); sliceSummariesParts.push(summaryInline);
if (assessmentInline) sliceSummariesParts.push(assessmentInline); if (assessmentInline) sliceSummariesParts.push(assessmentInline);
outstandingItems.push(...outstandingLines); outstandingItems.push(...outstandingLines);
@ -2472,7 +2500,9 @@ export async function buildValidateMilestonePrompt(mid, midTitle, base, level) {
// Pre-compute previous validation for re-validation rounds. // Pre-compute previous validation for re-validation rounds.
const validationPath = resolveMilestoneFile(base, mid, "VALIDATION"); const validationPath = resolveMilestoneFile(base, mid, "VALIDATION");
const validationRel = relMilestoneFile(base, mid, "VALIDATION"); const validationRel = relMilestoneFile(base, mid, "VALIDATION");
const validationContent = validationPath ? await loadFile(validationPath) : null; const validationContent = validationPath
? await loadFile(validationPath)
: null;
let remediationRound = 0; let remediationRound = 0;
let previousValidationBlock = null; let previousValidationBlock = null;
if (validationContent) { if (validationContent) {
@ -2507,15 +2537,18 @@ export async function buildValidateMilestonePrompt(mid, midTitle, base, level) {
if (inlineLevel === "minimal") return null; if (inlineLevel === "minimal") return null;
return inlineProjectFromDb(base); return inlineProjectFromDb(base);
case "milestone-context": case "milestone-context":
return inlineFileOptional(contextPath, contextRel, "Milestone Context"); return inlineFileOptional(
contextPath,
contextRel,
"Milestone Context",
);
default: default:
return null; return null;
} }
}, },
computed: { computed: {
knowledge: { knowledge: {
build: async ({ keywords: kw }, b) => build: async ({ keywords: kw }, b) => inlineKnowledgeBudgeted(b, kw),
inlineKnowledgeBudgeted(b, kw),
inputs: { keywords }, inputs: { keywords },
}, },
graph: { graph: {
@ -2641,10 +2674,7 @@ export async function buildReplanSlicePrompt(mid, midTitle, sid, sTitle, base) {
.join("\n"); .join("\n");
} }
} catch (err) { } catch (err) {
logWarning( logWarning("prompt", `loadReplanCaptures failed: ${getErrorMessage(err)}`);
"prompt",
`loadReplanCaptures failed: ${getErrorMessage(err)}`,
);
} }
return loadPrompt("replan-slice", { return loadPrompt("replan-slice", {
workingDirectory: base, workingDirectory: base,

View file

@ -14,6 +14,7 @@
import { readFileSync, writeFileSync } from "node:fs"; import { readFileSync, writeFileSync } from "node:fs";
import { resolve as resolvePath } from "node:path"; import { resolve as resolvePath } from "node:path";
import { projectRoot } from "./commands/context.js"; import { projectRoot } from "./commands/context.js";
import { getErrorMessage } from "./error-utils.js";
import { import {
ingestFile, ingestFile,
ingestNote, ingestNote,
@ -32,7 +33,6 @@ import {
supersedeMemory, supersedeMemory,
} from "./memory-store.js"; } from "./memory-store.js";
import { _getAdapter, isDbAvailable } from "./sf-db.js"; import { _getAdapter, isDbAvailable } from "./sf-db.js";
import { getErrorMessage } from "./error-utils.js";
function parseArgs(raw) { function parseArgs(raw) {
const tokens = splitArgs(raw); const tokens = splitArgs(raw);
@ -401,6 +401,12 @@ async function handleStatus(ctx) {
` queued for backfill: ${dbStatus.unembeddedActive}`, ` queued for backfill: ${dbStatus.unembeddedActive}`,
` stored embeddings: ${dbStatus.embeddingsTotal}`, ` stored embeddings: ${dbStatus.embeddingsTotal}`,
"", "",
"Extraction:",
` processed units: ${dbStatus.processedUnits}`,
` attempts: ${dbStatus.extractionAttempts}`,
` last processed: ${formatMemoryStatusRow(dbStatus.lastProcessedUnit, "unit_key", "processed_at")}`,
` last attempt: ${formatMemoryAttempt(dbStatus.lastExtractionAttempt)}`,
"",
"Backfill:", "Backfill:",
" trigger: agent_end", " trigger: agent_end",
" max per turn: 50", " max per turn: 50",
@ -460,14 +466,50 @@ function readMemoryDbStatus(adapter) {
activeCount > 0 activeCount > 0
? `${Math.round((embeddedActive / activeCount) * 100)}%` ? `${Math.round((embeddedActive / activeCount) * 100)}%`
: "n/a"; : "n/a";
const processedUnits =
adapter
.prepare("SELECT count(*) as cnt FROM memory_processed_units")
.get()?.["cnt"] ?? 0;
const extractionAttempts =
adapter
.prepare("SELECT count(*) as cnt FROM memory_extraction_attempts")
.get()?.["cnt"] ?? 0;
const lastProcessedUnit =
adapter
.prepare(`SELECT unit_key, activity_file, processed_at
FROM memory_processed_units
ORDER BY processed_at DESC
LIMIT 1`)
.get() ?? null;
const lastExtractionAttempt =
adapter
.prepare(`SELECT unit_key, status, reason, error, created_at
FROM memory_extraction_attempts
ORDER BY created_at DESC, id DESC
LIMIT 1`)
.get() ?? null;
return { return {
activeCount, activeCount,
embeddedActive, embeddedActive,
embeddingsTotal, embeddingsTotal,
unembeddedActive, unembeddedActive,
coverage, coverage,
processedUnits,
extractionAttempts,
lastProcessedUnit,
lastExtractionAttempt,
}; };
} }
function formatMemoryStatusRow(row, keyField, timeField) {
if (!row) return "none";
return `${row[keyField]} at ${row[timeField]}`;
}
function formatMemoryAttempt(row) {
if (!row) return "none";
const reason = row["reason"] ? ` (${row["reason"]})` : "";
const error = row["error"] ? ` error=${row["error"]}` : "";
return `${row["unit_key"]}: ${row["status"]}${reason} at ${row["created_at"]}${error}`;
}
async function probeEmbedding(gatewayConfig, createGatewayEmbedFn) { async function probeEmbedding(gatewayConfig, createGatewayEmbedFn) {
const startedAt = Date.now(); const startedAt = Date.now();
try { try {

View file

@ -13,6 +13,7 @@ import {
getActiveMemories, getActiveMemories,
isUnitProcessed, isUnitProcessed,
markUnitProcessed, markUnitProcessed,
recordMemoryExtractionAttempt,
} from "./memory-store.js"; } from "./memory-store.js";
// ─── Concurrency Guard ────────────────────────────────────────────────────── // ─── Concurrency Guard ──────────────────────────────────────────────────────
@ -259,41 +260,86 @@ export async function extractMemoriesFromUnit(
unitId, unitId,
llmCallFn, llmCallFn,
) { ) {
const unitKey = `${unitType}/${unitId}`;
const recordAttempt = (status, reason, error) =>
recordMemoryExtractionAttempt({
unitKey,
unitType,
unitId,
activityFile,
status,
reason,
error: error?.message ?? error,
});
// Mutex guard // Mutex guard
if (_extracting) { if (_extracting) {
debugLog("memory-extract", { phase: "skip", reason: "mutex-busy", unitKey: `${unitType}/${unitId}` }); recordAttempt("skipped", "mutex-busy");
debugLog("memory-extract", {
phase: "skip",
reason: "mutex-busy",
unitKey,
});
return; return;
} }
// Rate limit // Rate limit
const now = Date.now(); const now = Date.now();
if (now - _lastExtractionTime < MIN_EXTRACTION_INTERVAL_MS) { if (now - _lastExtractionTime < MIN_EXTRACTION_INTERVAL_MS) {
debugLog("memory-extract", { phase: "skip", reason: "rate-limited", unitKey: `${unitType}/${unitId}`, lastExtraction: _lastExtractionTime }); recordAttempt("skipped", "rate-limited");
debugLog("memory-extract", {
phase: "skip",
reason: "rate-limited",
unitKey: `${unitType}/${unitId}`,
lastExtraction: _lastExtractionTime,
});
return; return;
} }
// Skip certain unit types // Skip certain unit types
if (SKIP_TYPES.has(unitType)) { if (SKIP_TYPES.has(unitType)) {
debugLog("memory-extract", { phase: "skip", reason: "skip-type", unitType }); recordAttempt("skipped", "skip-type");
debugLog("memory-extract", {
phase: "skip",
reason: "skip-type",
unitType,
});
return; return;
} }
const unitKey = `${unitType}/${unitId}`;
// Already processed // Already processed
if (isUnitProcessed(unitKey)) { if (isUnitProcessed(unitKey)) {
debugLog("memory-extract", { phase: "skip", reason: "already-processed", unitKey }); recordAttempt("skipped", "already-processed");
debugLog("memory-extract", {
phase: "skip",
reason: "already-processed",
unitKey,
});
return; return;
} }
// Check file size // Check file size
try { try {
const stat = statSync(activityFile); const stat = statSync(activityFile);
if (stat.size < MIN_ACTIVITY_SIZE) { if (stat.size < MIN_ACTIVITY_SIZE) {
debugLog("memory-extract", { phase: "skip", reason: "file-too-small", unitKey, size: stat.size, min: MIN_ACTIVITY_SIZE }); recordAttempt("skipped", "file-too-small");
debugLog("memory-extract", {
phase: "skip",
reason: "file-too-small",
unitKey,
size: stat.size,
min: MIN_ACTIVITY_SIZE,
});
return; return;
} }
} catch { } catch (error) {
debugLog("memory-extract", { phase: "skip", reason: "stat-failed", unitKey, file: activityFile }); recordAttempt("skipped", "stat-failed", error);
debugLog("memory-extract", {
phase: "skip",
reason: "stat-failed",
unitKey,
file: activityFile,
});
return; return;
} }
_extracting = true; _extracting = true;
_lastExtractionTime = now; _lastExtractionTime = now;
recordAttempt("started");
debugLog("memory-extract", { phase: "start", unitKey, file: activityFile }); debugLog("memory-extract", { phase: "start", unitKey, file: activityFile });
let userPrompt; let userPrompt;
try { try {
@ -301,7 +347,12 @@ export async function extractMemoriesFromUnit(
const raw = readFileSync(activityFile, "utf-8"); const raw = readFileSync(activityFile, "utf-8");
const transcript = extractTranscriptFromActivity(raw); const transcript = extractTranscriptFromActivity(raw);
if (!transcript.trim()) { if (!transcript.trim()) {
debugLog("memory-extract", { phase: "skip", reason: "empty-transcript", unitKey }); recordAttempt("skipped", "empty-transcript");
debugLog("memory-extract", {
phase: "skip",
reason: "empty-transcript",
unitKey,
});
return; return;
} }
// Redact secrets // Redact secrets
@ -320,34 +371,62 @@ export async function extractMemoriesFromUnit(
safeTranscript, safeTranscript,
); );
// Call LLM // Call LLM
debugLog("memory-extract", { phase: "llm-call", unitKey, transcriptChars: safeTranscript.length }); debugLog("memory-extract", {
phase: "llm-call",
unitKey,
transcriptChars: safeTranscript.length,
});
const response = await llmCallFn(EXTRACTION_SYSTEM, userPrompt); const response = await llmCallFn(EXTRACTION_SYSTEM, userPrompt);
// Parse response // Parse response
const actions = parseMemoryResponse(response); const actions = parseMemoryResponse(response);
debugLog("memory-extract", { phase: "parsed", unitKey, actions: actions.length }); debugLog("memory-extract", {
phase: "parsed",
unitKey,
actions: actions.length,
});
// Apply actions (consolidation-path only — add/prune discipline enforced) // Apply actions (consolidation-path only — add/prune discipline enforced)
if (actions.length > 0) { if (actions.length > 0) {
applyConsolidationActions(actions, unitType, unitId); applyConsolidationActions(actions, unitType, unitId);
debugLog("memory-extract", { phase: "applied", unitKey, actions: actions.length }); debugLog("memory-extract", {
phase: "applied",
unitKey,
actions: actions.length,
});
} }
// Decay stale memories periodically // Decay stale memories periodically
decayStaleMemories(20); decayStaleMemories(20);
// Mark unit as processed // Mark unit as processed
markUnitProcessed(unitKey, activityFile); markUnitProcessed(unitKey, activityFile);
recordAttempt(
"processed",
actions.length > 0 ? "actions-applied" : "no-actions",
);
debugLog("memory-extract", { phase: "done", unitKey }); debugLog("memory-extract", { phase: "done", unitKey });
} catch (err) { } catch (err) {
debugLog("memory-extract", { phase: "error", unitKey, error: err?.message || String(err) }); recordAttempt("error", "extract-failed", err);
debugLog("memory-extract", {
phase: "error",
unitKey,
error: err?.message || String(err),
});
// Retry once after a brief delay // Retry once after a brief delay
if (userPrompt) { if (userPrompt) {
try { try {
await delay(2000); await delay(2000);
const response2 = await llmCallFn(EXTRACTION_SYSTEM, userPrompt); const response2 = await llmCallFn(EXTRACTION_SYSTEM, userPrompt);
const actions2 = parseMemoryResponse(response2); const actions2 = parseMemoryResponse(response2);
if (actions2.length > 0) applyConsolidationActions(actions2, unitType, unitId); if (actions2.length > 0)
applyConsolidationActions(actions2, unitType, unitId);
markUnitProcessed(unitKey, activityFile); markUnitProcessed(unitKey, activityFile);
recordAttempt("processed", "retry-success");
debugLog("memory-extract", { phase: "retry-success", unitKey }); debugLog("memory-extract", { phase: "retry-success", unitKey });
} catch (err2) { } catch (err2) {
debugLog("memory-extract", { phase: "retry-failed", unitKey, error: err2?.message || String(err2) }); recordAttempt("error", "retry-failed", err2);
debugLog("memory-extract", {
phase: "retry-failed",
unitKey,
error: err2?.message || String(err2),
});
// Non-fatal — memory extraction failure should never affect autonomous mode // Non-fatal — memory extraction failure should never affect autonomous mode
} }
} }

View file

@ -8,8 +8,10 @@ import {
decayMemoriesBefore, decayMemoriesBefore,
deleteMemoryEmbedding, deleteMemoryEmbedding,
incrementMemoryHitCount, incrementMemoryHitCount,
insertMemoryExtractionAttempt,
insertMemoryRow, insertMemoryRow,
isDbAvailable, isDbAvailable,
listMemoryExtractionAttempts,
markMemoryUnitProcessed, markMemoryUnitProcessed,
rewriteMemoryId, rewriteMemoryId,
supersedeLowestRankedMemories, supersedeLowestRankedMemories,
@ -31,6 +33,22 @@ const CATEGORY_PRIORITY = {
environment: 4, environment: 4,
preference: 5, preference: 5,
}; };
const QUERY_TOKEN_STOPWORDS = new Set([
"the",
"and",
"for",
"with",
"that",
"this",
"from",
"into",
"when",
"then",
"have",
"should",
"would",
"could",
]);
function safeJsonArray(raw) { function safeJsonArray(raw) {
try { try {
const parsed = JSON.parse(raw); const parsed = JSON.parse(raw);
@ -41,6 +59,36 @@ function safeJsonArray(raw) {
return []; return [];
} }
} }
function tokenizeMemoryQuery(text) {
return String(text ?? "")
.toLowerCase()
.split(/[^a-z0-9_.-]+/g)
.map((token) => token.trim())
.filter((token) => token.length >= 3 && !QUERY_TOKEN_STOPWORDS.has(token));
}
function rankMemoriesByLexicalQuery(memories, query, limit) {
const queryTokens = tokenizeMemoryQuery(query);
if (queryTokens.length === 0) return memories.slice(0, limit);
return memories
.map((memory, index) => {
const haystack =
`${memory.category ?? ""} ${memory.content ?? ""} ${(memory.tags ?? []).join(" ")}`.toLowerCase();
const lexicalHits = queryTokens.reduce(
(count, token) => count + (haystack.includes(token) ? 1 : 0),
0,
);
const lexicalScore = lexicalHits / queryTokens.length;
const staticScore = memory.confidence * (1 + memory.hit_count * 0.1);
return {
memory,
index,
score: staticScore + lexicalScore * 2,
};
})
.sort((a, b) => b.score - a.score || a.index - b.index)
.slice(0, limit)
.map((entry) => entry.memory);
}
// ─── Row Mapping ──────────────────────────────────────────────────────────── // ─── Row Mapping ────────────────────────────────────────────────────────────
function rowToMemory(row) { function rowToMemory(row) {
return { return {
@ -158,7 +206,7 @@ export async function getRelevantMemoriesRanked(query, limit = 10) {
Promise.resolve(loadEmbeddingMap()), Promise.resolve(loadEmbeddingMap()),
]); ]);
if (!queryVec || embeddingMap.size === 0) { if (!queryVec || embeddingMap.size === 0) {
return mergedPool.slice(0, limit); return rankMemoriesByLexicalQuery(mergedPool, query, limit);
} }
let ranked = rankMemoriesByEmbedding( let ranked = rankMemoriesByEmbedding(
mergedPool.map((m) => ({ mergedPool.map((m) => ({
@ -233,7 +281,7 @@ export async function getRelevantMemoriesRanked(query, limit = 10) {
} }
return topK; return topK;
} catch { } catch {
return mergedPool.slice(0, limit); return rankMemoriesByLexicalQuery(mergedPool, query, limit);
} }
} }
/** /**
@ -407,6 +455,56 @@ export function markUnitProcessed(unitKey, activityFile) {
return false; return false;
} }
} }
/**
* Record a memory extraction attempt without marking the unit processed.
*
* Purpose: make provider skips, rate limits, parse errors, and successful
* closeouts queryable from the SF DB.
*
* Consumer: memory-extractor, auto-unit-closeout, and /memory status.
*/
export function recordMemoryExtractionAttempt({
unitKey,
unitType,
unitId,
activityFile,
status,
reason,
error,
}) {
if (!isDbAvailable()) return false;
try {
insertMemoryExtractionAttempt({
unitKey: unitKey ?? `${unitType}/${unitId}`,
unitType,
unitId,
activityFile,
status,
reason,
error,
createdAt: new Date().toISOString(),
});
return true;
} catch {
return false;
}
}
/**
* Return recent memory extraction attempts.
*
* Purpose: give diagnostics and tests a structured view of memory closeout
* lifecycle without relying on sqlite3 shell availability.
*
* Consumer: /memory status and memory lifecycle tests.
*/
export function getRecentMemoryExtractionAttempts(limit = 10) {
if (!isDbAvailable()) return [];
try {
return listMemoryExtractionAttempts(limit);
} catch {
return [];
}
}
// ─── Maintenance ──────────────────────────────────────────────────────────── // ─── Maintenance ────────────────────────────────────────────────────────────
/** /**
* Reduce confidence for memories not updated within the last N processed units. * Reduce confidence for memories not updated within the last N processed units.
@ -647,6 +745,7 @@ export function formatMemoriesForPrompt(
const header = "## Project Memory (auto-learned)\n"; const header = "## Project Memory (auto-learned)\n";
let output = header; let output = header;
let remaining = charBudget - header.length; let remaining = charBudget - header.length;
const renderedMemories = [];
if (preserveRankOrder) { if (preserveRankOrder) {
// Render in input order — caller already ranked semantically. Each // Render in input order — caller already ranked semantically. Each
// bullet shows the category inline so the agent can still tell // bullet shows the category inline so the agent can still tell
@ -655,8 +754,10 @@ export function formatMemoriesForPrompt(
const bullet = `- [${item.category}] ${item.content}\n`; const bullet = `- [${item.category}] ${item.content}\n`;
if (remaining < bullet.length) break; if (remaining < bullet.length) break;
output += bullet; output += bullet;
renderedMemories.push(item);
remaining -= bullet.length; remaining -= bullet.length;
} }
recordMemoryPromptUsage(renderedMemories);
return output.trimEnd(); return output.trimEnd();
} }
// Group by category // Group by category
@ -680,8 +781,36 @@ export function formatMemoriesForPrompt(
const bullet = `- ${item.content}\n`; const bullet = `- ${item.content}\n`;
if (remaining < bullet.length) break; if (remaining < bullet.length) break;
output += bullet; output += bullet;
renderedMemories.push(item);
remaining -= bullet.length; remaining -= bullet.length;
} }
} }
recordMemoryPromptUsage(renderedMemories);
return output.trimEnd(); return output.trimEnd();
} }
/**
* Increment hit counts for memories that were actually injected into a prompt.
*
* Purpose: make memory ranking reflect real reuse instead of only explicit
* reinforcement events.
*
* Consumer: formatMemoriesForPrompt callers across autonomous prompts.
*/
export function recordMemoryPromptUsage(memories) {
if (!isDbAvailable()) return;
const now = new Date().toISOString();
const seen = new Set();
for (const memory of memories) {
const id = memory?.id;
if (typeof id !== "string" || id.startsWith("sm-") || seen.has(id)) {
continue;
}
seen.add(id);
try {
incrementMemoryHitCount(id, now);
} catch {
// Prompt rendering must never fail because memory telemetry failed.
}
}
}

View file

@ -1,6 +1,5 @@
import { _getAdapter, intBool, parseJsonObject } from './sf-db-core.js'; import { SF_STALE_STATE, SFError } from "../errors.js";
import { SF_STALE_STATE, SFError } from '../errors.js'; import { _getAdapter, intBool, parseJsonObject } from "./sf-db-core.js";
import { logWarning } from '../workflow-logger.js';
export function getActiveMemories({ category, limit = 200 } = {}) { export function getActiveMemories({ category, limit = 200 } = {}) {
const currentDb = _getAdapter(); const currentDb = _getAdapter();
@ -116,6 +115,51 @@ export function markMemoryUnitProcessed(unitKey, activityFile, processedAt) {
.run({ ":key": unitKey, ":file": activityFile, ":at": processedAt }); .run({ ":key": unitKey, ":file": activityFile, ":at": processedAt });
} }
/**
* Insert a memory extraction attempt row.
*
* Purpose: keep skipped and failed memory closeouts visible without marking
* the unit as fully processed.
*
* Consumer: memory-extractor and /memory status diagnostics.
*/
export function insertMemoryExtractionAttempt(args) {
const currentDb = _getAdapter();
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");
currentDb
.prepare(`INSERT INTO memory_extraction_attempts
(unit_key, unit_type, unit_id, activity_file, status, reason, error, created_at)
VALUES (:unit_key, :unit_type, :unit_id, :activity_file, :status, :reason, :error, :created_at)`)
.run({
":unit_key": args.unitKey,
":unit_type": args.unitType ?? null,
":unit_id": args.unitId ?? null,
":activity_file": args.activityFile ?? null,
":status": args.status,
":reason": args.reason ?? null,
":error": args.error ?? null,
":created_at": args.createdAt,
});
}
/**
* Return recent memory extraction attempt rows.
*
* Purpose: expose memory closeout health to diagnostics without requiring
* ad-hoc SQL access on machines that lack sqlite3.
*
* Consumer: /memory status and focused memory lifecycle tests.
*/
export function listMemoryExtractionAttempts(limit = 10) {
const currentDb = _getAdapter();
if (!currentDb) return [];
return currentDb
.prepare(`SELECT * FROM memory_extraction_attempts
ORDER BY created_at DESC, id DESC
LIMIT :limit`)
.all({ ":limit": limit });
}
export function decayMemoriesBefore(cutoffTs, now) { export function decayMemoriesBefore(cutoffTs, now) {
const currentDb = _getAdapter(); const currentDb = _getAdapter();
if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open"); if (!currentDb) throw new SFError(SF_STALE_STATE, "sf-db: No database open");

View file

@ -840,6 +840,23 @@ export function initSchema(db, fileBacked, options = {}) {
activity_file TEXT, activity_file TEXT,
processed_at TEXT NOT NULL processed_at TEXT NOT NULL
) )
`);
db.exec(`
CREATE TABLE IF NOT EXISTS memory_extraction_attempts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
unit_key TEXT NOT NULL,
unit_type TEXT,
unit_id TEXT,
activity_file TEXT,
status TEXT NOT NULL,
reason TEXT,
error TEXT,
created_at TEXT NOT NULL
)
`);
db.exec(`
CREATE INDEX IF NOT EXISTS idx_memory_extraction_attempts_created
ON memory_extraction_attempts(created_at)
`); `);
// memory_embeddings, memory_relations, memory_sources used to be referenced // memory_embeddings, memory_relations, memory_sources used to be referenced
// by helper functions and queries (memory-embeddings.ts, memory-relations.ts, // by helper functions and queries (memory-embeddings.ts, memory-relations.ts,
@ -1623,6 +1640,23 @@ function migrateSchema(db, { currentPath, withQueryTimeout }) {
processed_at TEXT NOT NULL processed_at TEXT NOT NULL
) )
`); `);
db.exec(`
CREATE TABLE IF NOT EXISTS memory_extraction_attempts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
unit_key TEXT NOT NULL,
unit_type TEXT,
unit_id TEXT,
activity_file TEXT,
status TEXT NOT NULL,
reason TEXT,
error TEXT,
created_at TEXT NOT NULL
)
`);
db.exec(`
CREATE INDEX IF NOT EXISTS idx_memory_extraction_attempts_created
ON memory_extraction_attempts(created_at)
`);
db.exec( db.exec(
"CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)", "CREATE INDEX IF NOT EXISTS idx_memories_active ON memories(superseded_by)",
); );
@ -3383,19 +3417,16 @@ function migrateSchema(db, { currentPath, withQueryTimeout }) {
// CREATE update do need the ALTER. Probe via PRAGMA table_info // CREATE update do need the ALTER. Probe via PRAGMA table_info
// before each ALTER. // before each ALTER.
const cols = new Set( const cols = new Set(
db.prepare("PRAGMA table_info(self_feedback)").all().map( db
(r) => r.name, .prepare("PRAGMA table_info(self_feedback)")
), .all()
.map((r) => r.name),
); );
if (!cols.has("impact_score")) { if (!cols.has("impact_score")) {
db.exec( db.exec("ALTER TABLE self_feedback ADD COLUMN impact_score INTEGER");
"ALTER TABLE self_feedback ADD COLUMN impact_score INTEGER",
);
} }
if (!cols.has("effort_estimate")) { if (!cols.has("effort_estimate")) {
db.exec( db.exec("ALTER TABLE self_feedback ADD COLUMN effort_estimate INTEGER");
"ALTER TABLE self_feedback ADD COLUMN effort_estimate INTEGER",
);
} }
db.prepare( db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)", "INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
@ -3439,9 +3470,7 @@ function migrateSchema(db, { currentPath, withQueryTimeout }) {
db.exec("ALTER TABLE quality_gates ADD COLUMN run_control TEXT"); db.exec("ALTER TABLE quality_gates ADD COLUMN run_control TEXT");
} }
if (!cols.has("permission_profile")) { if (!cols.has("permission_profile")) {
db.exec( db.exec("ALTER TABLE quality_gates ADD COLUMN permission_profile TEXT");
"ALTER TABLE quality_gates ADD COLUMN permission_profile TEXT",
);
} }
if (!cols.has("trace_id")) { if (!cols.has("trace_id")) {
db.exec("ALTER TABLE quality_gates ADD COLUMN trace_id TEXT"); db.exec("ALTER TABLE quality_gates ADD COLUMN trace_id TEXT");
@ -3459,6 +3488,41 @@ function migrateSchema(db, { currentPath, withQueryTimeout }) {
if (ok) appliedVersion = 66; if (ok) appliedVersion = 66;
} }
if (appliedVersion < 67) {
const ok = runMigrationStep("v67", () => {
// Schema v67: explicit extraction attempt ledger for memory closeout.
//
// memory_processed_units is only the completed-inspection ledger.
// Skips and errors need their own DB-backed status rows so operators can
// tell whether memory is idle, blocked by provider config, rate-limited,
// or failing at parse/apply time.
db.exec(`
CREATE TABLE IF NOT EXISTS memory_extraction_attempts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
unit_key TEXT NOT NULL,
unit_type TEXT,
unit_id TEXT,
activity_file TEXT,
status TEXT NOT NULL,
reason TEXT,
error TEXT,
created_at TEXT NOT NULL
)
`);
db.exec(`
CREATE INDEX IF NOT EXISTS idx_memory_extraction_attempts_created
ON memory_extraction_attempts(created_at)
`);
db.prepare(
"INSERT INTO schema_version (version, applied_at) VALUES (:version, :applied_at)",
).run({
":version": 67,
":applied_at": new Date().toISOString(),
});
});
if (ok) appliedVersion = 67;
}
// Post-migration assertion: ensure critical tables created by historical // Post-migration assertion: ensure critical tables created by historical
// migrations are actually present. If a prior migration claimed success but // migrations are actually present. If a prior migration claimed success but
// the table is missing (e.g., due to a rolled-back transaction that failed // the table is missing (e.g., due to a rolled-back transaction that failed

View file

@ -0,0 +1,196 @@
/**
* memory-extraction-lifecycle.test.mjs memory closeout observability.
*
* Purpose: prove autonomous memory extraction records skips, completions, and
* prompt reuse in DB-backed state instead of leaving operators to infer health
* from logs.
*/
import assert from "node:assert/strict";
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, test } from "vitest";
import {
_resetExtractionState,
extractMemoriesFromUnit,
} from "../memory-extractor.js";
import {
createMemory,
formatMemoriesForPrompt,
getActiveMemoriesRanked,
getRecentMemoryExtractionAttempts,
getRelevantMemoriesRanked,
} from "../memory-store.js";
import { closeDatabase, getDatabase, openDatabase } from "../sf-db.js";
const tmpDirs = [];
afterEach(() => {
_resetExtractionState();
closeDatabase();
while (tmpDirs.length > 0) {
rmSync(tmpDirs.pop(), { recursive: true, force: true });
}
});
function makeProject() {
const dir = mkdtempSync(join(tmpdir(), "sf-memory-lifecycle-"));
tmpDirs.push(dir);
mkdirSync(join(dir, ".sf"), { recursive: true });
assert.equal(openDatabase(join(dir, ".sf", "sf.db")), true);
return dir;
}
function writeActivity(dir, name, text) {
const activityFile = join(dir, name);
writeFileSync(
activityFile,
`${JSON.stringify({
type: "custom_message",
customType: "sf-auto",
text,
})}\n`,
"utf8",
);
return activityFile;
}
test("extractMemoriesFromUnit_when_activity_too_small_records_skip_attempt", async () => {
const project = makeProject();
const activityFile = writeActivity(project, "small.jsonl", "short");
await extractMemoriesFromUnit(
activityFile,
"execute-task",
"M001/S01/T01",
async () => "[]",
);
const attempts = getRecentMemoryExtractionAttempts(5);
assert.equal(attempts.length, 1);
assert.equal(attempts[0].status, "skipped");
assert.equal(attempts[0].reason, "file-too-small");
assert.equal(attempts[0].unit_key, "execute-task/M001/S01/T01");
const processed = getDatabase()
.prepare("SELECT count(*) AS cnt FROM memory_processed_units")
.get();
assert.equal(processed.cnt, 0);
});
test("extractMemoriesFromUnit_when_successful_records_processed_unit_and_attempt", async () => {
const project = makeProject();
const activityFile = writeActivity(
project,
"large.jsonl",
"Autonomous memory extraction should persist useful project knowledge. ".repeat(
40,
),
);
await extractMemoriesFromUnit(
activityFile,
"execute-task",
"M001/S01/T02",
async () =>
JSON.stringify([
{
action: "add",
category: "knowledge",
content:
"Memory extraction attempts are stored in SQLite for diagnostics.",
confidence: 0.9,
},
]),
);
const latestAttempt = getRecentMemoryExtractionAttempts(1)[0];
assert.equal(latestAttempt.status, "processed");
assert.equal(latestAttempt.reason, "actions-applied");
const processed = getDatabase()
.prepare("SELECT count(*) AS cnt FROM memory_processed_units")
.get();
assert.equal(processed.cnt, 1);
const memories = getActiveMemoriesRanked(10);
assert.equal(memories.length, 1);
assert.match(memories[0].content, /stored in SQLite/);
});
test("formatMemoriesForPrompt_when_injected_increments_hit_count", () => {
makeProject();
const id = createMemory({
category: "knowledge",
content: "Injected memories should count as used.",
confidence: 0.8,
});
const memories = getActiveMemoriesRanked(10);
const rendered = formatMemoriesForPrompt(memories);
assert.match(rendered, /Injected memories/);
const row = getDatabase()
.prepare("SELECT hit_count FROM memories WHERE id = :id")
.get({ ":id": id });
assert.equal(row.hit_count, 1);
});
test("formatMemoriesForPrompt_when_budget_truncates_skips_hidden_hit_count", () => {
makeProject();
const renderedId = createMemory({
category: "knowledge",
content: "short",
confidence: 0.8,
});
const hiddenId = createMemory({
category: "knowledge",
content:
"This memory is intentionally long enough that the small prompt budget excludes it.",
confidence: 0.8,
});
formatMemoriesForPrompt(
[
{ id: renderedId, category: "knowledge", content: "short" },
{
id: hiddenId,
category: "knowledge",
content:
"This memory is intentionally long enough that the small prompt budget excludes it.",
},
],
15,
true,
);
const rows = getDatabase()
.prepare("SELECT id, hit_count FROM memories ORDER BY id")
.all();
assert.deepEqual(
rows.map((row) => [row.id, row.hit_count]),
[
[renderedId, 1],
[hiddenId, 0],
],
);
});
test("getRelevantMemoriesRanked_when_embeddings_unavailable_uses_query_terms", async () => {
makeProject();
createMemory({
category: "knowledge",
content: "Generic unrelated implementation note.",
confidence: 0.95,
});
const relevantId = createMemory({
category: "knowledge",
content:
"OpenCode provider routing must fall back from paid model failures to another free model.",
confidence: 0.6,
});
const [top] = await getRelevantMemoriesRanked(
"opencode paid model triage fallback",
1,
);
assert.equal(top.id, relevantId);
});

View file

@ -16,6 +16,7 @@ import { tmpdir } from "node:os";
import { join } from "node:path"; import { join } from "node:path";
import { DatabaseSync } from "node:sqlite"; import { DatabaseSync } from "node:sqlite";
import { afterEach, test } from "vitest"; import { afterEach, test } from "vitest";
import { initRoutingHistory } from "../routing-history.js";
import { import {
closeDatabase, closeDatabase,
getDatabase, getDatabase,
@ -29,7 +30,6 @@ import {
openDatabase, openDatabase,
reconcileWorktreeDb, reconcileWorktreeDb,
} from "../sf-db.js"; } from "../sf-db.js";
import { initRoutingHistory } from "../routing-history.js";
const tmpDirs = []; const tmpDirs = [];
@ -273,7 +273,7 @@ test("openDatabase_migrates_v27_tasks_without_created_at_through_spec_backfill",
const version = db const version = db
.prepare("SELECT MAX(version) AS version FROM schema_version") .prepare("SELECT MAX(version) AS version FROM schema_version")
.get(); .get();
assert.equal(version.version, 66); assert.equal(version.version, 67);
// v61: intent_chapters table exists // v61: intent_chapters table exists
const chaptersTable = db const chaptersTable = db
.prepare( .prepare(
@ -387,7 +387,7 @@ test("openDatabase_v52_db_heals_routing_history_and_auto_start_path_works", () =
const version = db const version = db
.prepare("SELECT MAX(version) AS version FROM schema_version") .prepare("SELECT MAX(version) AS version FROM schema_version")
.get(); .get();
assert.equal(version.version, 66); assert.equal(version.version, 67);
}); });
test("openDatabase_when_fresh_db_supports_schedule_entries", () => { test("openDatabase_when_fresh_db_supports_schedule_entries", () => {
@ -533,6 +533,32 @@ test("openDatabase_memory_indexes_exist", () => {
); );
}); });
test("openDatabase_memory_extraction_attempts_table_exists", () => {
assert.equal(openDatabase(":memory:"), true);
const db = getDatabase();
const columns = db
.prepare("PRAGMA table_info(memory_extraction_attempts)")
.all();
const names = columns.map((row) => row.name);
assert.deepEqual(names, [
"id",
"unit_key",
"unit_type",
"unit_id",
"activity_file",
"status",
"reason",
"error",
"created_at",
]);
const index = db
.prepare(
"SELECT name FROM sqlite_master WHERE type = 'index' AND name = 'idx_memory_extraction_attempts_created'",
)
.get();
assert.ok(index, "memory extraction attempts should be indexed by time");
});
test("openDatabase_judgments_table_round_trip", () => { test("openDatabase_judgments_table_round_trip", () => {
assert.equal(openDatabase(":memory:"), true); assert.equal(openDatabase(":memory:"), true);
insertJudgment({ insertJudgment({

View file

@ -43,6 +43,9 @@ export async function closeoutUnit(
const { buildMemoryLLMCall, extractMemoriesFromUnit } = await import( const { buildMemoryLLMCall, extractMemoriesFromUnit } = await import(
"../memory-extractor.js" "../memory-extractor.js"
); );
const { recordMemoryExtractionAttempt } = await import(
"../memory-store.js"
);
const llmCallFn = buildMemoryLLMCall(ctx); const llmCallFn = buildMemoryLLMCall(ctx);
if (llmCallFn) { if (llmCallFn) {
extractMemoriesFromUnit( extractMemoriesFromUnit(
@ -56,6 +59,14 @@ export async function closeoutUnit(
`memory extraction failed for ${unitType}/${unitId}: ${err.message}`, `memory extraction failed for ${unitType}/${unitId}: ${err.message}`,
); );
}); });
} else {
recordMemoryExtractionAttempt({
unitType,
unitId,
activityFile,
status: "skipped",
reason: "llm-unavailable",
});
} }
} catch (err) { } catch (err) {
/* non-fatal */ /* non-fatal */