diff --git a/.sf.migrating/milestones/M001-6377a4/M001-6377a4-ROADMAP.json b/.sf.migrating/milestones/M001-6377a4/M001-6377a4-ROADMAP.json index dc0764a0c..7d4276d6e 100644 --- a/.sf.migrating/milestones/M001-6377a4/M001-6377a4-ROADMAP.json +++ b/.sf.migrating/milestones/M001-6377a4/M001-6377a4-ROADMAP.json @@ -1,102 +1,96 @@ { - "schemaVersion": 1, - "origin": "db-projection", - "generatedAt": "2026-05-06T21:03:18.498Z", - "sourceDbPath": "/home/mhugo/code/singularity-forge/.sf/sf.db", - "milestone": { - "id": "M001-6377a4", - "title": "Consolidate Memory Systems into Unified node:sqlite Store", - "status": "active", - "vision": "Replace the three fragmented memory systems (KNOWLEDGE.md, memory-store.js/sf.db, and memory extension/sql.js) with a single unified memory store backed by node:sqlite. All memory ingestion, querying, and prompt injection flows through one canonical database table with semantic search, confidence ranking, and automatic maintenance.", - "dependsOn": [], - "successCriteria": [ - "A single `memories` table in `sf.db` stores all project memory with categories, confidence, hit_count, and embeddings", - "Session transcript extraction pipeline writes directly to `sf.db` instead of separate `agent.db`", - "KNOWLEDGE.md is retired as a live system; existing entries are optionally migrated", - "Memory query returns ranked results via `getRelevantMemoriesRanked(query, limit)` using node:sqlite", - "System prompt injection uses the unified memory store, not multiple competing sources", - "All better-sqlite3 and sql.js dependencies for memory are removed" - ], - "definitionOfDone": [ - "All three legacy memory systems are removed or deprecated", - "Unified memory store passes all new tests", - "Migration script can import existing KNOWLEDGE.md and old agent.db data", - "Documentation updated to describe the single memory system", - "No references to better-sqlite3 or sql.js in memory-related code" - ], - "requirementCoverage": "Covers: unified memory storage, semantic search, session-based learning. Partially covers: cross-session context persistence. Leaves for later: multi-project memory sharing, cloud-synced memory.", - "boundaryMapMarkdown": "### S01 → S02\n\nProduces:\n- Unified `memories` table schema in `sf.db`\n- `MemoryStore` class with CRUD, query, and ranking API\n- `formatMemoriesForPrompt()` for system prompt injection\n\nConsumes:\n- Existing `sf-db.js` adapter and schema initialization\n\n### S02 → S03\n\nProduces:\n- Session transcript scanner and LLM extraction pipeline\n- Direct write path to `sf.db` memories table\n- `/memory` command reading from unified store\n\nConsumes:\n- `MemoryStore` API from S01\n- `completeSimple` LLM call from pi-ai\n\n### S03 → S04\n\nProduces:\n- KNOWLEDGE.md importer\n- Legacy agent.db migration script\n- Deprecation warnings for old memory paths\n\nConsumes:\n- `MemoryStore` API from S01\n- Existing KNOWLEDGE.md and agent.db files" - }, - "slices": [ - { - "id": "S01", - "title": "Unified Memory Schema and Core API", - "status": "pending", - "risk": "high", - "depends": [], - "demo": "A test creates a memory, queries it by relevance, and formats it for prompt injection — all via node:sqlite", - "goal": "Create the canonical `memories` table in `sf.db` and a `MemoryStore` class that replaces memory-store.js with a node:sqlite implementation", - "successCriteria": "- `sf.db` has a `memories` table with: id, category, content, confidence, hit_count, source, session_id, source_unit_type, source_unit_id, created_at, updated_at, superseded_by\n- `MemoryStore` class exposes: createMemory, updateMemory, reinforceMemory, supersedeMemory, getActiveMemoriesRanked, getRelevantMemoriesRanked, formatMemoriesForPrompt\n- All methods gracefully degrade when DB is unavailable\n- Unit tests cover CRUD, ranking, source filtering, and embedding fallback", - "proofLevel": "contract", - "integrationClosure": "Produces the MemoryStore API that all downstream slices consume. No external wiring yet.", - "observabilityImpact": "No runtime signals added yet; tests verify table schema and query correctness.", - "isSketch": false, - "sketchScope": "", - "sequence": 1 - }, - { - "id": "S02", - "title": "Session Transcript Extraction Pipeline", - "status": "pending", - "risk": "medium", - "depends": [ - "S01" - ], - "demo": "After running a session, the pipeline extracts memories and stores them in sf.db; `/memory view` shows the extracted knowledge", - "goal": "Port the two-phase extraction pipeline from the memory extension to write directly into the unified sf.db store instead of separate agent.db", - "successCriteria": "- Session .jsonl files are scanned and filtered for the current cwd\n- Phase 1: LLM extracts durable knowledge from transcripts, writes to sf.db memories table with source='extracted'\n- Phase 2: Consolidation deduplicates and ranks memories\n- /memory command reads from unified store\n- Pipeline runs fire-and-forget on session start without blocking dispatch\n- Rate limiting prevents store bloat (max 10 memories per session)", - "proofLevel": "integration", - "integrationClosure": "Wires the extraction pipeline into the session lifecycle and replaces the old memory extension storage path.", - "observabilityImpact": "Pipeline logs extraction stats (processed, errors) to the console. Memory store tracks hit_count for reinforcement.", - "isSketch": false, - "sketchScope": "", - "sequence": 2 - }, - { - "id": "S03", - "title": "Migration and Legacy Deprecation", - "status": "pending", - "risk": "low", - "depends": [ - "S02" - ], - "demo": "Running a migration command imports existing KNOWLEDGE.md and agent.db memories into sf.db; old files are left as read-only backups", - "goal": "Provide a one-time migration path from KNOWLEDGE.md and the old agent.db into the unified store, then deprecate the legacy systems", - "successCriteria": "- Migration script can import KNOWLEDGE.md entries as memories with source='migrated'\n- Migration script can import agent.db stage1_outputs as memories with source='extracted'\n- Dry-run mode reports counts without modifying database\n- sf.db is automatically backed up before migration\n- knowledge-compounding.js and knowledge-injector.js are removed or no-op'd\n- Memory extension uses MemoryStore instead of sql.js\n- CHANGELOG documents the consolidation and deprecation", - "proofLevel": "contract", - "integrationClosure": "Cleans up legacy code paths and provides a safe migration for existing users.", - "observabilityImpact": "Migration script reports counts of imported records per source. Deprecation warnings logged for old API usage.", - "isSketch": false, - "sketchScope": "", - "sequence": 3 - }, - { - "id": "S04", - "title": "System Prompt Integration and Cleanup", - "status": "pending", - "risk": "medium", - "depends": [ - "S03" - ], - "demo": "Starting a new session injects relevant ranked memories from sf.db into the system prompt; no references to old memory systems remain in the codebase", - "goal": "Replace all system prompt injection paths to use the unified memory store, remove dead code, and verify end-to-end", - "successCriteria": "Not provided.", - "proofLevel": "Not provided.", - "integrationClosure": "Not provided.", - "observabilityImpact": "Not provided.", - "isSketch": false, - "sketchScope": "", - "sequence": 4 - } - ] + "schemaVersion": 1, + "origin": "db-projection", + "generatedAt": "2026-05-06T21:03:18.498Z", + "sourceDbPath": "/home/mhugo/code/singularity-forge/.sf/sf.db", + "milestone": { + "id": "M001-6377a4", + "title": "Consolidate Memory Systems into Unified node:sqlite Store", + "status": "active", + "vision": "Replace the three fragmented memory systems (KNOWLEDGE.md, memory-store.js/sf.db, and memory extension/sql.js) with a single unified memory store backed by node:sqlite. All memory ingestion, querying, and prompt injection flows through one canonical database table with semantic search, confidence ranking, and automatic maintenance.", + "dependsOn": [], + "successCriteria": [ + "A single `memories` table in `sf.db` stores all project memory with categories, confidence, hit_count, and embeddings", + "Session transcript extraction pipeline writes directly to `sf.db` instead of separate `agent.db`", + "KNOWLEDGE.md is retired as a live system; existing entries are optionally migrated", + "Memory query returns ranked results via `getRelevantMemoriesRanked(query, limit)` using node:sqlite", + "System prompt injection uses the unified memory store, not multiple competing sources", + "All better-sqlite3 and sql.js dependencies for memory are removed" + ], + "definitionOfDone": [ + "All three legacy memory systems are removed or deprecated", + "Unified memory store passes all new tests", + "Migration script can import existing KNOWLEDGE.md and old agent.db data", + "Documentation updated to describe the single memory system", + "No references to better-sqlite3 or sql.js in memory-related code" + ], + "requirementCoverage": "Covers: unified memory storage, semantic search, session-based learning. Partially covers: cross-session context persistence. Leaves for later: multi-project memory sharing, cloud-synced memory.", + "boundaryMapMarkdown": "### S01 → S02\n\nProduces:\n- Unified `memories` table schema in `sf.db`\n- `MemoryStore` class with CRUD, query, and ranking API\n- `formatMemoriesForPrompt()` for system prompt injection\n\nConsumes:\n- Existing `sf-db.js` adapter and schema initialization\n\n### S02 → S03\n\nProduces:\n- Session transcript scanner and LLM extraction pipeline\n- Direct write path to `sf.db` memories table\n- `/memory` command reading from unified store\n\nConsumes:\n- `MemoryStore` API from S01\n- `completeSimple` LLM call from pi-ai\n\n### S03 → S04\n\nProduces:\n- KNOWLEDGE.md importer\n- Legacy agent.db migration script\n- Deprecation warnings for old memory paths\n\nConsumes:\n- `MemoryStore` API from S01\n- Existing KNOWLEDGE.md and agent.db files" + }, + "slices": [ + { + "id": "S01", + "title": "Unified Memory Schema and Core API", + "status": "pending", + "risk": "high", + "depends": [], + "demo": "A test creates a memory, queries it by relevance, and formats it for prompt injection — all via node:sqlite", + "goal": "Create the canonical `memories` table in `sf.db` and a `MemoryStore` class that replaces memory-store.js with a node:sqlite implementation", + "successCriteria": "- `sf.db` has a `memories` table with: id, category, content, confidence, hit_count, source, session_id, source_unit_type, source_unit_id, created_at, updated_at, superseded_by\n- `MemoryStore` class exposes: createMemory, updateMemory, reinforceMemory, supersedeMemory, getActiveMemoriesRanked, getRelevantMemoriesRanked, formatMemoriesForPrompt\n- All methods gracefully degrade when DB is unavailable\n- Unit tests cover CRUD, ranking, source filtering, and embedding fallback", + "proofLevel": "contract", + "integrationClosure": "Produces the MemoryStore API that all downstream slices consume. No external wiring yet.", + "observabilityImpact": "No runtime signals added yet; tests verify table schema and query correctness.", + "isSketch": false, + "sketchScope": "", + "sequence": 1 + }, + { + "id": "S02", + "title": "Session Transcript Extraction Pipeline", + "status": "pending", + "risk": "medium", + "depends": ["S01"], + "demo": "After running a session, the pipeline extracts memories and stores them in sf.db; `/memory view` shows the extracted knowledge", + "goal": "Port the two-phase extraction pipeline from the memory extension to write directly into the unified sf.db store instead of separate agent.db", + "successCriteria": "- Session .jsonl files are scanned and filtered for the current cwd\n- Phase 1: LLM extracts durable knowledge from transcripts, writes to sf.db memories table with source='extracted'\n- Phase 2: Consolidation deduplicates and ranks memories\n- /memory command reads from unified store\n- Pipeline runs fire-and-forget on session start without blocking dispatch\n- Rate limiting prevents store bloat (max 10 memories per session)", + "proofLevel": "integration", + "integrationClosure": "Wires the extraction pipeline into the session lifecycle and replaces the old memory extension storage path.", + "observabilityImpact": "Pipeline logs extraction stats (processed, errors) to the console. Memory store tracks hit_count for reinforcement.", + "isSketch": false, + "sketchScope": "", + "sequence": 2 + }, + { + "id": "S03", + "title": "Migration and Legacy Deprecation", + "status": "pending", + "risk": "low", + "depends": ["S02"], + "demo": "Running a migration command imports existing KNOWLEDGE.md and agent.db memories into sf.db; old files are left as read-only backups", + "goal": "Provide a one-time migration path from KNOWLEDGE.md and the old agent.db into the unified store, then deprecate the legacy systems", + "successCriteria": "- Migration script can import KNOWLEDGE.md entries as memories with source='migrated'\n- Migration script can import agent.db stage1_outputs as memories with source='extracted'\n- Dry-run mode reports counts without modifying database\n- sf.db is automatically backed up before migration\n- knowledge-compounding.js and knowledge-injector.js are removed or no-op'd\n- Memory extension uses MemoryStore instead of sql.js\n- CHANGELOG documents the consolidation and deprecation", + "proofLevel": "contract", + "integrationClosure": "Cleans up legacy code paths and provides a safe migration for existing users.", + "observabilityImpact": "Migration script reports counts of imported records per source. Deprecation warnings logged for old API usage.", + "isSketch": false, + "sketchScope": "", + "sequence": 3 + }, + { + "id": "S04", + "title": "System Prompt Integration and Cleanup", + "status": "pending", + "risk": "medium", + "depends": ["S03"], + "demo": "Starting a new session injects relevant ranked memories from sf.db into the system prompt; no references to old memory systems remain in the codebase", + "goal": "Replace all system prompt injection paths to use the unified memory store, remove dead code, and verify end-to-end", + "successCriteria": "Not provided.", + "proofLevel": "Not provided.", + "integrationClosure": "Not provided.", + "observabilityImpact": "Not provided.", + "isSketch": false, + "sketchScope": "", + "sequence": 4 + } + ] } diff --git a/.sf.migrating/runtime/sift-index-warmup.json b/.sf.migrating/runtime/sift-index-warmup.json index ab0bbc95c..4d50f992a 100644 --- a/.sf.migrating/runtime/sift-index-warmup.json +++ b/.sf.migrating/runtime/sift-index-warmup.json @@ -1,28 +1,28 @@ { - "schemaVersion": 3, - "status": "warming", - "startedAt": "2026-05-06T21:03:03.705Z", - "command": "/nix/store/vzx1mi9c0xfadmsm9dhd83d005cb1qs9-coreutils-9.8/bin/timeout", - "cwd": "/home/mhugo/code/singularity-forge", - "args": [ - "--kill-after=10", - "600", - "/home/mhugo/.cargo/bin/sift", - "search", - "--json", - "--strategy", - "page-index-hybrid", - "--limit", - "1", - "--retriever-timeout-ms", - "30000", - ".", - "repo architecture source tests entrypoints configuration" - ], - "scope": ".", - "siftBinary": "/home/mhugo/.cargo/bin/sift", - "hardTimeoutSec": 600, - "searchCache": "/home/mhugo/code/singularity-forge/.sf/runtime/sift/search-cache", - "tmpDir": "/home/mhugo/code/singularity-forge/.sf/runtime/sift/tmp", - "pid": 602029 + "schemaVersion": 3, + "status": "warming", + "startedAt": "2026-05-06T21:03:03.705Z", + "command": "/nix/store/vzx1mi9c0xfadmsm9dhd83d005cb1qs9-coreutils-9.8/bin/timeout", + "cwd": "/home/mhugo/code/singularity-forge", + "args": [ + "--kill-after=10", + "600", + "/home/mhugo/.cargo/bin/sift", + "search", + "--json", + "--strategy", + "page-index-hybrid", + "--limit", + "1", + "--retriever-timeout-ms", + "30000", + ".", + "repo architecture source tests entrypoints configuration" + ], + "scope": ".", + "siftBinary": "/home/mhugo/.cargo/bin/sift", + "hardTimeoutSec": 600, + "searchCache": "/home/mhugo/code/singularity-forge/.sf/runtime/sift/search-cache", + "tmpDir": "/home/mhugo/code/singularity-forge/.sf/runtime/sift/tmp", + "pid": 602029 } diff --git a/.sf.migrating/runtime/uok-diagnostics.json b/.sf.migrating/runtime/uok-diagnostics.json index 60a166a91..37a3f5c0d 100644 --- a/.sf.migrating/runtime/uok-diagnostics.json +++ b/.sf.migrating/runtime/uok-diagnostics.json @@ -1,19 +1,19 @@ { - "schemaVersion": 1, - "generatedAt": "2026-05-06T21:03:15.738Z", - "verdict": "clear", - "classification": "healthy", - "signals": { - "lock": "missing", - "parity": "ok", - "ledger": "consistent", - "runtimeProjection": "ok", - "wrapper": "unknown" - }, - "currentUnit": null, - "latestRun": null, - "runtimeUnits": [], - "issues": [], - "recommendations": [], - "reportPath": "/home/mhugo/code/singularity-forge/.sf/runtime/uok-diagnostics.json" + "schemaVersion": 1, + "generatedAt": "2026-05-06T21:03:15.738Z", + "verdict": "clear", + "classification": "healthy", + "signals": { + "lock": "missing", + "parity": "ok", + "ledger": "consistent", + "runtimeProjection": "ok", + "wrapper": "unknown" + }, + "currentUnit": null, + "latestRun": null, + "runtimeUnits": [], + "issues": [], + "recommendations": [], + "reportPath": "/home/mhugo/code/singularity-forge/.sf/runtime/uok-diagnostics.json" } diff --git a/.sf.migrating/runtime/uok-parity-report.json b/.sf.migrating/runtime/uok-parity-report.json index 9774d9961..71449efb1 100644 --- a/.sf.migrating/runtime/uok-parity-report.json +++ b/.sf.migrating/runtime/uok-parity-report.json @@ -1,27 +1,27 @@ { - "generatedAt": "2026-05-06T21:03:18.493Z", - "sourcePath": "/home/mhugo/code/singularity-forge/.sf/runtime/uok-parity.jsonl", - "totalEvents": 0, - "paths": {}, - "statuses": {}, - "criticalMismatches": [], - "historicalCriticalMismatches": [], - "currentErrorEvents": 0, - "historicalErrorEvents": 0, - "enterEvents": 0, - "exitEvents": 0, - "missingExitEvents": 0, - "legacyMissingExitEvents": 0, - "ledgerRunCount": 0, - "unmatchedRuns": [], - "freshUnmatchedRuns": [], - "historicalUnmatchedRuns": [], - "totalDiffs": 0, - "divergencesByPlane": { - "plan": 0, - "graph": 0, - "model-policy": 0, - "audit-envelope": 0, - "gitops": 0 - } + "generatedAt": "2026-05-06T21:03:18.493Z", + "sourcePath": "/home/mhugo/code/singularity-forge/.sf/runtime/uok-parity.jsonl", + "totalEvents": 0, + "paths": {}, + "statuses": {}, + "criticalMismatches": [], + "historicalCriticalMismatches": [], + "currentErrorEvents": 0, + "historicalErrorEvents": 0, + "enterEvents": 0, + "exitEvents": 0, + "missingExitEvents": 0, + "legacyMissingExitEvents": 0, + "ledgerRunCount": 0, + "unmatchedRuns": [], + "freshUnmatchedRuns": [], + "historicalUnmatchedRuns": [], + "totalDiffs": 0, + "divergencesByPlane": { + "plan": 0, + "graph": 0, + "model-policy": 0, + "audit-envelope": 0, + "gitops": 0 + } } diff --git a/.sf.migrating/scaffold-manifest.json b/.sf.migrating/scaffold-manifest.json index 7a940f406..d73f628fe 100644 --- a/.sf.migrating/scaffold-manifest.json +++ b/.sf.migrating/scaffold-manifest.json @@ -1,181 +1,181 @@ { - "schemaVersion": 1, - "applied": [ - { - "path": "src/AGENTS.md", - "template": "src/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:ac5f874be887aed0bd29105a7c3b7e9269b4b3c0cc7b5b1506da0994c466cf01" - }, - { - "path": "tests/AGENTS.md", - "template": "tests/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:dab2b9a31c2c48e07e4913cd8744be8553f279ea516ebc138730301ae401a683" - }, - { - "path": "docs/AGENTS.md", - "template": "docs/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:b35804ce78ca309cab8769719f6e0738141f1121682fbd46490419abd2c6f870" - }, - { - "path": "docs/records/AGENTS.md", - "template": "docs/records/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:dc21117dfa7607d7ce4cc6ce5724658348a95e9807673ff526b9cf02e2568de0" - }, - { - "path": "docs/RECORDS_KEEPER.md", - "template": "docs/RECORDS_KEEPER.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:3872de9cd72bd9129814a5e77e3b86abe76bef33f3ca34e04ae7582b4cfd066a" - }, - { - "path": "docs/design-docs/AGENTS.md", - "template": "docs/design-docs/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:707d43067edcfd33dc0a11db32d383f89739e187f95da4bda6b94575a9e3d272" - }, - { - "path": "docs/exec-plans/AGENTS.md", - "template": "docs/exec-plans/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:7bd9f815617cb42b1f2c85e6d557986a2f1e7fbc081fcc4e71f3d14cecefc5f0" - }, - { - "path": "docs/generated/db-schema.md", - "template": "docs/generated/db-schema.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:8488a607c1a2981654a3b030600d2e10627d132ebd0c75700648a08dede93368" - }, - { - "path": "docs/product-specs/index.md", - "template": "docs/product-specs/index.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:ca3477e8d74fe277a2e0b2cdb3f03c235e294015a6ece2f571a82acc7475d31c" - }, - { - "path": "docs/product-specs/AGENTS.md", - "template": "docs/product-specs/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:cc7b5687e8b1ea78848ee318ca5a25346e5a6ee0dc33b54506d8f56bc16f13c4" - }, - { - "path": "docs/references/design-system-reference-llms.txt", - "template": "docs/references/design-system-reference-llms.txt", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:5a5a35a3f80c8b4433ad30c1f155b1e8c7fd245ce2a3def9627daa9f40854eb3" - }, - { - "path": "docs/references/nixpacks-llms.txt", - "template": "docs/references/nixpacks-llms.txt", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:22f9a8549e3ced71d0b0a912c6dcdfb2ec83a573168ee1b44ca266f1eb0307bf" - }, - { - "path": "docs/references/uv-llms.txt", - "template": "docs/references/uv-llms.txt", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:e8a998667c0f830a15b68e207f6b69e6377dd7e82728833f842678f72864e9b6" - }, - { - "path": "docs/FRONTEND.md", - "template": "docs/FRONTEND.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:03087953d690c9902d35297720d1482262c1610e3050084f891db3be711571ef" - }, - { - "path": ".sf/harness/AGENTS.md", - "template": ".sf/harness/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:685c41e601340086b8076263a71315c66554efdaeb074bc1b907eebf879174c6" - }, - { - "path": ".sf/harness/specs/AGENTS.md", - "template": ".sf/harness/specs/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:0f4fbf4111704d05744e4a4e13a9bf3eada262f0da9517c2010f0b46f4bd3c45" - }, - { - "path": ".sf/harness/specs/bootstrap.md", - "template": ".sf/harness/specs/bootstrap.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:b86ba7cf2cec39a7a9f9d94f885998cfe26eebfc5b76fdd8375ef125e927e0cf" - }, - { - "path": ".sf/harness/evals/AGENTS.md", - "template": ".sf/harness/evals/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:6f88bf8a2bad95d8db5985c9b3317b9edd65592c12e98bb0bff1a24ec152d768" - }, - { - "path": ".sf/harness/graders/AGENTS.md", - "template": ".sf/harness/graders/AGENTS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:2db17feae1acfe62d85aafbe32d016873c3036d4d76e9dd0db478375fae0794e" - }, - { - "path": ".sf/PRINCIPLES.md", - "template": ".sf/PRINCIPLES.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:9d5c50cb3d602f66468a33a4324068fab8a022fab0fd6940c371a5986af2947e" - }, - { - "path": ".sf/TASTE.md", - "template": ".sf/TASTE.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:ee9ea8ade6f8434e9425a989bab9735d2068395978d61940e18679b93b1f11b0" - }, - { - "path": ".sf/ANTI-GOALS.md", - "template": ".sf/ANTI-GOALS.md", - "version": "2.75.3", - "appliedAt": "2026-05-06T19:53:05.336Z", - "stateAtApply": "pending", - "contentHash": "sha256:56b4f7d20d49e4558ac5aea5e967cf3475ef98d68dfa8985ba42b3e2b0d72e25" - } - ] + "schemaVersion": 1, + "applied": [ + { + "path": "src/AGENTS.md", + "template": "src/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:ac5f874be887aed0bd29105a7c3b7e9269b4b3c0cc7b5b1506da0994c466cf01" + }, + { + "path": "tests/AGENTS.md", + "template": "tests/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:dab2b9a31c2c48e07e4913cd8744be8553f279ea516ebc138730301ae401a683" + }, + { + "path": "docs/AGENTS.md", + "template": "docs/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:b35804ce78ca309cab8769719f6e0738141f1121682fbd46490419abd2c6f870" + }, + { + "path": "docs/records/AGENTS.md", + "template": "docs/records/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:dc21117dfa7607d7ce4cc6ce5724658348a95e9807673ff526b9cf02e2568de0" + }, + { + "path": "docs/RECORDS_KEEPER.md", + "template": "docs/RECORDS_KEEPER.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:3872de9cd72bd9129814a5e77e3b86abe76bef33f3ca34e04ae7582b4cfd066a" + }, + { + "path": "docs/design-docs/AGENTS.md", + "template": "docs/design-docs/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:707d43067edcfd33dc0a11db32d383f89739e187f95da4bda6b94575a9e3d272" + }, + { + "path": "docs/exec-plans/AGENTS.md", + "template": "docs/exec-plans/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:7bd9f815617cb42b1f2c85e6d557986a2f1e7fbc081fcc4e71f3d14cecefc5f0" + }, + { + "path": "docs/generated/db-schema.md", + "template": "docs/generated/db-schema.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:8488a607c1a2981654a3b030600d2e10627d132ebd0c75700648a08dede93368" + }, + { + "path": "docs/product-specs/index.md", + "template": "docs/product-specs/index.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:ca3477e8d74fe277a2e0b2cdb3f03c235e294015a6ece2f571a82acc7475d31c" + }, + { + "path": "docs/product-specs/AGENTS.md", + "template": "docs/product-specs/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:cc7b5687e8b1ea78848ee318ca5a25346e5a6ee0dc33b54506d8f56bc16f13c4" + }, + { + "path": "docs/references/design-system-reference-llms.txt", + "template": "docs/references/design-system-reference-llms.txt", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:5a5a35a3f80c8b4433ad30c1f155b1e8c7fd245ce2a3def9627daa9f40854eb3" + }, + { + "path": "docs/references/nixpacks-llms.txt", + "template": "docs/references/nixpacks-llms.txt", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:22f9a8549e3ced71d0b0a912c6dcdfb2ec83a573168ee1b44ca266f1eb0307bf" + }, + { + "path": "docs/references/uv-llms.txt", + "template": "docs/references/uv-llms.txt", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:e8a998667c0f830a15b68e207f6b69e6377dd7e82728833f842678f72864e9b6" + }, + { + "path": "docs/FRONTEND.md", + "template": "docs/FRONTEND.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:03087953d690c9902d35297720d1482262c1610e3050084f891db3be711571ef" + }, + { + "path": ".sf/harness/AGENTS.md", + "template": ".sf/harness/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:685c41e601340086b8076263a71315c66554efdaeb074bc1b907eebf879174c6" + }, + { + "path": ".sf/harness/specs/AGENTS.md", + "template": ".sf/harness/specs/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:0f4fbf4111704d05744e4a4e13a9bf3eada262f0da9517c2010f0b46f4bd3c45" + }, + { + "path": ".sf/harness/specs/bootstrap.md", + "template": ".sf/harness/specs/bootstrap.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:b86ba7cf2cec39a7a9f9d94f885998cfe26eebfc5b76fdd8375ef125e927e0cf" + }, + { + "path": ".sf/harness/evals/AGENTS.md", + "template": ".sf/harness/evals/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:6f88bf8a2bad95d8db5985c9b3317b9edd65592c12e98bb0bff1a24ec152d768" + }, + { + "path": ".sf/harness/graders/AGENTS.md", + "template": ".sf/harness/graders/AGENTS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:2db17feae1acfe62d85aafbe32d016873c3036d4d76e9dd0db478375fae0794e" + }, + { + "path": ".sf/PRINCIPLES.md", + "template": ".sf/PRINCIPLES.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:9d5c50cb3d602f66468a33a4324068fab8a022fab0fd6940c371a5986af2947e" + }, + { + "path": ".sf/TASTE.md", + "template": ".sf/TASTE.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:ee9ea8ade6f8434e9425a989bab9735d2068395978d61940e18679b93b1f11b0" + }, + { + "path": ".sf/ANTI-GOALS.md", + "template": ".sf/ANTI-GOALS.md", + "version": "2.75.3", + "appliedAt": "2026-05-06T19:53:05.336Z", + "stateAtApply": "pending", + "contentHash": "sha256:56b4f7d20d49e4558ac5aea5e967cf3475ef98d68dfa8985ba42b3e2b0d72e25" + } + ] } diff --git a/.sf.migrating/state-manifest.json b/.sf.migrating/state-manifest.json index 631acdec7..4be4c8d32 100644 --- a/.sf.migrating/state-manifest.json +++ b/.sf.migrating/state-manifest.json @@ -1,649 +1,619 @@ { - "version": 1, - "origin": "db-projection", - "source_db_path": "/home/mhugo/code/singularity-forge/.sf/sf.db", - "exported_at": "2026-05-06T21:01:36.517Z", - "milestones": [ - { - "id": "M001-6377a4", - "title": "Consolidate Memory Systems into Unified node:sqlite Store", - "status": "active", - "depends_on": [], - "created_at": "2026-05-06T20:51:08.963Z", - "completed_at": null, - "vision": "Replace the three fragmented memory systems (KNOWLEDGE.md, memory-store.js/sf.db, and memory extension/sql.js) with a single unified memory store backed by node:sqlite. All memory ingestion, querying, and prompt injection flows through one canonical database table with semantic search, confidence ranking, and automatic maintenance.", - "success_criteria": [ - "A single `memories` table in `sf.db` stores all project memory with categories, confidence, hit_count, and embeddings", - "Session transcript extraction pipeline writes directly to `sf.db` instead of separate `agent.db`", - "KNOWLEDGE.md is retired as a live system; existing entries are optionally migrated", - "Memory query returns ranked results via `getRelevantMemoriesRanked(query, limit)` using node:sqlite", - "System prompt injection uses the unified memory store, not multiple competing sources", - "All better-sqlite3 and sql.js dependencies for memory are removed" - ], - "key_risks": [ - { - "risk": "Data loss during migration from existing memory stores", - "whyItMatters": "Users may have valuable accumulated memories in the old systems that would be lost if we don't migrate or preserve them" - }, - { - "risk": "Performance regression from consolidating into a single database with WAL contention", - "whyItMatters": "The sf.db already has a single-writer invariant; adding memory extraction writes could create lock contention during session startup" - }, - { - "risk": "Breaking the memory extension API contract for external consumers", - "whyItMatters": "The memory extension is a Pi extension with hooks and commands; changing its storage backend changes observable behavior" - } - ], - "proof_strategy": [ - { - "riskOrUnknown": "Single database performance under write load", - "retireIn": "S02", - "whatWillBeProven": "Memory extraction during session startup completes without blocking dispatch, proven by timing telemetry and zero timeout errors" - }, - { - "riskOrUnknown": "Migration path preserves existing memories", - "retireIn": "S03", - "whatWillBeProven": "Existing KNOWLEDGE.md entries and memory extension extractions are importable into the new schema with no data loss" - } - ], - "verification_contract": "Unit tests for memory CRUD, ranking, and embedding fallback. Integration test for full pipeline: session scan → extraction → storage → query → prompt injection.", - "verification_integration": "End-to-end test: run a mock session, trigger memory extraction, verify the memory appears in sf.db and is injectable into a prompt.", - "verification_operational": "Verify no memory-related background processes leak or consume excessive resources. Confirm WAL checkpoint behavior under memory write load.", - "verification_uat": "Manual verification: run `/memory view` command and confirm it reads from sf.db. Check that system prompt contains relevant memories from previous sessions.", - "definition_of_done": [ - "All three legacy memory systems are removed or deprecated", - "Unified memory store passes all new tests", - "Migration script can import existing KNOWLEDGE.md and old agent.db data", - "Documentation updated to describe the single memory system", - "No references to better-sqlite3 or sql.js in memory-related code" - ], - "requirement_coverage": "Covers: unified memory storage, semantic search, session-based learning. Partially covers: cross-session context persistence. Leaves for later: multi-project memory sharing, cloud-synced memory.", - "boundary_map_markdown": "### S01 → S02\n\nProduces:\n- Unified `memories` table schema in `sf.db`\n- `MemoryStore` class with CRUD, query, and ranking API\n- `formatMemoriesForPrompt()` for system prompt injection\n\nConsumes:\n- Existing `sf-db.js` adapter and schema initialization\n\n### S02 → S03\n\nProduces:\n- Session transcript scanner and LLM extraction pipeline\n- Direct write path to `sf.db` memories table\n- `/memory` command reading from unified store\n\nConsumes:\n- `MemoryStore` API from S01\n- `completeSimple` LLM call from pi-ai\n\n### S03 → S04\n\nProduces:\n- KNOWLEDGE.md importer\n- Legacy agent.db migration script\n- Deprecation warnings for old memory paths\n\nConsumes:\n- `MemoryStore` API from S01\n- Existing KNOWLEDGE.md and agent.db files", - "vision_meeting": null - } - ], - "slices": [ - { - "milestone_id": "M001-6377a4", - "id": "S01", - "title": "Unified Memory Schema and Core API", - "status": "pending", - "risk": "high", - "depends": [], - "demo": "A test creates a memory, queries it by relevance, and formats it for prompt injection — all via node:sqlite", - "created_at": "2026-05-06T20:52:17.318Z", - "completed_at": null, - "full_summary_md": "", - "full_uat_md": "", - "goal": "Create the canonical `memories` table in `sf.db` and a `MemoryStore` class that replaces memory-store.js with a node:sqlite implementation", - "success_criteria": "- `sf.db` has a `memories` table with: id, category, content, confidence, hit_count, source, session_id, source_unit_type, source_unit_id, created_at, updated_at, superseded_by\n- `MemoryStore` class exposes: createMemory, updateMemory, reinforceMemory, supersedeMemory, getActiveMemoriesRanked, getRelevantMemoriesRanked, formatMemoriesForPrompt\n- All methods gracefully degrade when DB is unavailable\n- Unit tests cover CRUD, ranking, source filtering, and embedding fallback", - "proof_level": "contract", - "integration_closure": "Produces the MemoryStore API that all downstream slices consume. No external wiring yet.", - "observability_impact": "No runtime signals added yet; tests verify table schema and query correctness.", - "adversarial_partner": "Missing partner review.", - "adversarial_combatant": "Missing combatant review.", - "adversarial_architect": "Missing architect review.", - "planning_meeting": { - "trigger": "Need to understand current memory-store.js implementation and sf-db.js schema before designing the unified table", - "pm": "The memory-store.js already has a working API with categories, confidence, hit_count, superseded_by, and embeddings. We should preserve this API surface but ensure it uses node:sqlite natively (not better-sqlite3 wrappers). The sf-db.js already uses node:sqlite DatabaseSync.", - "researcher": "Current memory-store.js uses sf-db.js which uses node:sqlite DatabaseSync. The schema already has memories table with: seq (autoincrement), id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at, superseded_by, hit_count. Embeddings are in a separate memory_embeddings table. Relations in memory_relations. Processed units tracked in memory_processed_units.", - "partner": "The existing memory-store.js is already well-designed. S01 should: (1) verify the schema is correct in sf-db.js, (2) create a clean MemoryStore class that wraps the existing functions, (3) add unit tests, (4) ensure graceful degradation. We don't need to redesign the schema — it's already good.", - "combatant": "The current memory-store.js exports individual functions, not a class. Creating a class is an unnecessary abstraction if the functions already work. Also, the existing schema might be missing fields needed for the session extraction pipeline (like thread_id, session_id). We should check if the memory extension's schema has fields we need to add.", - "architect": "Agree with combatant on checking schema compatibility. The memory extension's pipeline stores: thread_id, file_path, file_size, file_mtime, cwd, status, extraction_json. These are pipeline metadata, not memory content. The actual memories are extracted JSON with category/content/confidence. So the existing memories table schema should work. However, we might want to add a `source` column to distinguish manual capture_thought vs auto-extracted vs KNOWLEDGE.md migrated entries.", - "moderator": "Decision: Keep the existing memories table schema (it's already correct). Add a `source` column to distinguish ingestion paths. Create a MemoryStore class that wraps the existing function-based API for cleaner consumption by the pipeline. Write unit tests. Do NOT redesign the schema from scratch.", - "recommendedRoute": "planning", - "confidenceSummary": "High confidence. The existing memory-store.js and sf-db.js are already well-designed. We just need to add a source column, create a class wrapper, and add tests." - }, - "sequence": 1, - "replan_triggered_at": null - }, - { - "milestone_id": "M001-6377a4", - "id": "S02", - "title": "Session Transcript Extraction Pipeline", - "status": "pending", - "risk": "medium", - "depends": [ - "S01" - ], - "demo": "After running a session, the pipeline extracts memories and stores them in sf.db; `/memory view` shows the extracted knowledge", - "created_at": "2026-05-06T20:52:17.319Z", - "completed_at": null, - "full_summary_md": "", - "full_uat_md": "", - "goal": "Port the two-phase extraction pipeline from the memory extension to write directly into the unified sf.db store instead of separate agent.db", - "success_criteria": "- Session .jsonl files are scanned and filtered for the current cwd\n- Phase 1: LLM extracts durable knowledge from transcripts, writes to sf.db memories table with source='extracted'\n- Phase 2: Consolidation deduplicates and ranks memories\n- /memory command reads from unified store\n- Pipeline runs fire-and-forget on session start without blocking dispatch\n- Rate limiting prevents store bloat (max 10 memories per session)", - "proof_level": "integration", - "integration_closure": "Wires the extraction pipeline into the session lifecycle and replaces the old memory extension storage path.", - "observability_impact": "Pipeline logs extraction stats (processed, errors) to the console. Memory store tracks hit_count for reinforcement.", - "adversarial_partner": "Missing partner review.", - "adversarial_combatant": "Missing combatant review.", - "adversarial_architect": "Missing architect review.", - "planning_meeting": { - "trigger": "Need to port the session transcript extraction pipeline from the memory extension (which uses sql.js/agent.db) to use the unified MemoryStore (sf.db with node:sqlite)", - "pm": "The memory extension's pipeline is well-designed but writes to a separate database. We should port it to use the unified MemoryStore while preserving the two-phase architecture (extract → consolidate). The /memory command should read from sf.db.", - "researcher": "The memory extension pipeline has: scanSessionFiles, filterSessionContent, runPhase1 (LLM extraction), runPhase2 (consolidation). It uses sql.js MemoryStorage class. We need to replace MemoryStorage with MemoryStore and write to sf.db instead of agent.db.", - "partner": "The pipeline should be preserved as-is but write to the unified store. Phase 1 extracts memories from session .jsonl files via LLM. Phase 2 consolidates and deduplicates. Both should write to sf.db memories table with source='extracted'.", - "combatant": "The pipeline might create too many memories and overwhelm the store. We need rate limiting and deduplication. Also, running LLM extraction on every session startup could be expensive and slow.", - "architect": "Agree on rate limiting. The pipeline should: (1) use the existing session scanner, (2) call LLM for extraction, (3) write to MemoryStore with source='extracted', (4) run consolidation as a separate phase. We should add a processed_units check to avoid re-processing the same sessions.", - "moderator": "Decision: Port the pipeline to use MemoryStore. Add session_id tracking to avoid re-processing. Keep the two-phase architecture. Add rate limiting (max memories per session). Run extraction asynchronously without blocking dispatch.", - "recommendedRoute": "planning", - "confidenceSummary": "Medium-high confidence. The pipeline code is well-understood. The main risk is performance and store bloat, which we mitigate with rate limiting and deduplication." - }, - "sequence": 2, - "replan_triggered_at": null - }, - { - "milestone_id": "M001-6377a4", - "id": "S03", - "title": "Migration and Legacy Deprecation", - "status": "pending", - "risk": "low", - "depends": [ - "S02" - ], - "demo": "Running a migration command imports existing KNOWLEDGE.md and agent.db memories into sf.db; old files are left as read-only backups", - "created_at": "2026-05-06T20:52:17.319Z", - "completed_at": null, - "full_summary_md": "", - "full_uat_md": "", - "goal": "Provide a one-time migration path from KNOWLEDGE.md and the old agent.db into the unified store, then deprecate the legacy systems", - "success_criteria": "- Migration script can import KNOWLEDGE.md entries as memories with source='migrated'\n- Migration script can import agent.db stage1_outputs as memories with source='extracted'\n- Dry-run mode reports counts without modifying database\n- sf.db is automatically backed up before migration\n- knowledge-compounding.js and knowledge-injector.js are removed or no-op'd\n- Memory extension uses MemoryStore instead of sql.js\n- CHANGELOG documents the consolidation and deprecation", - "proof_level": "contract", - "integration_closure": "Cleans up legacy code paths and provides a safe migration for existing users.", - "observability_impact": "Migration script reports counts of imported records per source. Deprecation warnings logged for old API usage.", - "adversarial_partner": "Missing partner review.", - "adversarial_combatant": "Missing combatant review.", - "adversarial_architect": "Missing architect review.", - "planning_meeting": { - "trigger": "Need to migrate existing KNOWLEDGE.md entries and old agent.db memories into the unified sf.db store before deprecating legacy systems", - "pm": "Users may have valuable accumulated knowledge in KNOWLEDGE.md and extracted memories in agent.db. We need a safe migration path that preserves this data. The migration should be idempotent and leave old files as read-only backups.", - "researcher": "KNOWLEDGE.md format: plain markdown with sections (Rules, Patterns, Lessons Learned). Each entry is a table row. Agent.db format: sql.js database with tables: threads, stage1_outputs, jobs. stage1_outputs contains extraction_json (JSON array of memories).", - "partner": "Migration should: (1) parse KNOWLEDGE.md entries and insert as memories with source='migrated', (2) read agent.db stage1_outputs and insert as memories with source='extracted', (3) deduplicate against existing memories, (4) leave old files untouched as backups.", - "combatant": "Migration scripts are often buggy and can corrupt data. We should validate the migration with dry-run mode and backup the sf.db before running. Also, what if the old agent.db is corrupted or uses an older schema?", - "architect": "Agree on dry-run and backup. The migration script should: (1) create a backup of sf.db, (2) parse KNOWLEDGE.md with regex/table parsing, (3) parse agent.db extractions with JSON.parse, (4) check for duplicates before inserting, (5) report counts. For corrupted agent.db, skip with warning.", - "moderator": "Decision: Create a migration script with dry-run mode and automatic backup. Parse KNOWLEDGE.md entries as individual memories. Parse agent.db stage1_outputs as memory arrays. Deduplicate by content hash. Report migration statistics. Leave old files as read-only backups.", - "recommendedRoute": "planning", - "confidenceSummary": "Medium confidence. Migration scripts are inherently risky, but dry-run and backup mitigate this. The parsing logic is straightforward." - }, - "sequence": 3, - "replan_triggered_at": null - }, - { - "milestone_id": "M001-6377a4", - "id": "S04", - "title": "System Prompt Integration and Cleanup", - "status": "pending", - "risk": "medium", - "depends": [ - "S03" - ], - "demo": "Starting a new session injects relevant ranked memories from sf.db into the system prompt; no references to old memory systems remain in the codebase", - "created_at": "2026-05-06T20:52:17.319Z", - "completed_at": null, - "full_summary_md": "", - "full_uat_md": "", - "goal": "Replace all system prompt injection paths to use the unified memory store, remove dead code, and verify end-to-end", - "success_criteria": "Not provided.", - "proof_level": "Not provided.", - "integration_closure": "Not provided.", - "observability_impact": "Not provided.", - "adversarial_partner": "Missing partner review.", - "adversarial_combatant": "Missing combatant review.", - "adversarial_architect": "Missing architect review.", - "planning_meeting": { - "trigger": "Need to integrate unified memory store into system prompt injection and remove all legacy memory code paths", - "pm": "The system prompt currently loads KNOWLEDGE.md and memory-store.js separately. We need to replace this with a single MemoryStore.getRelevantMemoriesRanked() call that injects ranked memories into the prompt. This simplifies the context assembly and ensures all memory flows through one path.", - "researcher": "system-context.js currently loads: knowledgeBlock (from KNOWLEDGE.md files) and memoryBlock (from memory-store.js getActiveMemoriesRanked). We need to replace both with a single call to MemoryStore.getRelevantMemoriesRanked(query, limit) where query is derived from the current task context.", - "partner": "The integration should: (1) replace knowledgeBlock + memoryBlock with a single memory injection, (2) use task context keywords for relevance scoring, (3) format memories with category headers, (4) respect token budget, (5) remove all imports of old memory systems.", - "combatant": "Removing KNOWLEDGE.md injection might lose important human-curated rules that aren't in the memory store yet. We should ensure migration is complete before removing the old path, or keep a fallback for unmigrated knowledge.", - "architect": "Agree on fallback. The integration should: (1) try MemoryStore first, (2) if empty, fall back to KNOWLEDGE.md (read-only, no new writes), (3) inject formatted memories into system prompt via before_agent_start hook, (4) track memory usage for feedback loop.", - "moderator": "Decision: Replace system prompt injection with MemoryStore.getRelevantMemoriesRanked(). Add a temporary fallback to KNOWLEDGE.md if MemoryStore returns empty (until migration is complete). Remove all old memory system imports. Add memory usage tracking. Run end-to-end test.", - "recommendedRoute": "planning", - "confidenceSummary": "High confidence. The integration is straightforward — replace two injection paths with one. The fallback ensures no regression during migration." - }, - "sequence": 4, - "replan_triggered_at": null - } - ], - "tasks": [ - { - "milestone_id": "M001-6377a4", - "slice_id": "S01", - "id": "T01", - "title": "Audit and extend sf.db memories schema", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Read sf-db.js schema initialization to verify memories table exists with correct columns\n2. Add `source` column (TEXT, default 'manual') to distinguish: 'manual', 'extracted', 'migrated'\n3. Add `session_id` column (TEXT, nullable) to link extracted memories to source sessions\n4. Bump SCHEMA_VERSION in sf-db.js\n5. Write migration SQL that adds columns if they don't exist (for existing databases)\n6. Verify: run tests that create and query memories with new columns", - "estimate": "1h", - "files": [ - "src/resources/extensions/sf/sf-db.js", - "src/resources/extensions/sf/memory-store.js" - ], - "verify": "npm test -- --grep \"memory schema\"", - "inputs": [ - "src/resources/extensions/sf/sf-db.js" - ], - "expected_output": [ - "src/resources/extensions/sf/sf-db.js (modified)" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S01", - "id": "T02", - "title": "Create MemoryStore class wrapper", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Create src/resources/extensions/sf/MemoryStore.ts (or .js) that wraps existing memory-store.js functions\n2. Expose methods: createMemory, updateMemory, reinforceMemory, supersedeMemory, getActiveMemoriesRanked, getRelevantMemoriesRanked, formatMemoriesForPrompt\n3. Add source parameter to createMemory (default 'manual')\n4. Ensure all methods delegate to existing sf-db.js wrappers\n5. Export both the class and individual functions for backward compatibility\n6. Verify: unit tests pass for class and function APIs", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/MemoryStore.ts", - "src/resources/extensions/sf/memory-store.js" - ], - "verify": "npm test -- --grep \"MemoryStore\"", - "inputs": [ - "src/resources/extensions/sf/memory-store.js", - "src/resources/extensions/sf/sf-db.js" - ], - "expected_output": [ - "src/resources/extensions/sf/MemoryStore.ts" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S01", - "id": "T03", - "title": "Write unit tests for MemoryStore CRUD and ranking", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Create src/resources/extensions/sf/tests/memory-store.test.ts\n2. Test CRUD: createMemory, getActiveMemories, updateMemory, reinforceMemory, supersedeMemory\n3. Test ranking: getActiveMemoriesRanked returns correct order by confidence * hit_count\n4. Test graceful degradation: returns empty arrays when DB unavailable\n5. Test source filtering: memories created with different sources are queryable\n6. Verify: all tests pass", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/tests/memory-store.test.ts" - ], - "verify": "npm test -- src/resources/extensions/sf/tests/memory-store.test.ts", - "inputs": [ - "src/resources/extensions/sf/MemoryStore.ts", - "src/resources/extensions/sf/memory-store.js" - ], - "expected_output": [ - "src/resources/extensions/sf/tests/memory-store.test.ts" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S02", - "id": "T01", - "title": "Port session scanner to use MemoryStore", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Copy scanSessionFiles and filterSessionContent from memory extension\n2. Adapt to read from ~/.sf/agent/sessions/ directory structure\n3. Add session fingerprinting (hash of content) to avoid re-processing\n4. Check memory_processed_units table before processing\n5. Verify: scanner finds session files and filters correctly", - "estimate": "1h", - "files": [ - "src/resources/extensions/sf/memory-pipeline.js" - ], - "verify": "npm test -- --grep \"session scanner\"", - "inputs": [ - "packages/pi-coding-agent/src/resources/extensions/memory/pipeline.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/memory-pipeline.js" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S02", - "id": "T02", - "title": "Implement Phase 1: LLM extraction to MemoryStore", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Port runPhase1 from memory extension\n2. Use completeSimple from pi-ai for LLM calls\n3. Extract memories as JSON array with category/content/confidence\n4. Write each extracted memory to MemoryStore with source='extracted' and session_id\n5. Add rate limiting: max 10 memories per session, min confidence 0.6\n6. Mark session as processed in memory_processed_units\n7. Verify: extraction creates memories in sf.db", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/memory-pipeline.js", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "verify": "npm test -- --grep \"memory extraction\"", - "inputs": [ - "src/resources/extensions/sf/memory-pipeline.js", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/memory-pipeline.js (phase 1)" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S02", - "id": "T03", - "title": "Implement Phase 2: Consolidation and /memory command", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Port runPhase2 consolidation logic\n2. Query MemoryStore for source='extracted' memories\n3. Group by category, deduplicate similar content (simple string similarity)\n4. Update consolidated memories with higher confidence\n5. Supersede duplicate memories\n6. Implement /memory command that reads from MemoryStore\n7. Verify: consolidation runs and /memory shows extracted memories", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/memory-pipeline.js", - "src/resources/extensions/sf/memory-commands.js" - ], - "verify": "npm test -- --grep \"memory consolidation\"", - "inputs": [ - "src/resources/extensions/sf/memory-pipeline.js", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/memory-commands.js" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S03", - "id": "T01", - "title": "Create KNOWLEDGE.md parser and migrator", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Create src/resources/extensions/sf/migrate-knowledge.js\n2. Parse KNOWLEDGE.md sections (Rules, Patterns, Lessons Learned)\n3. Extract table rows as memory entries\n4. Map to memory categories: rule→convention, pattern→pattern, lesson→gotcha\n5. Insert into MemoryStore with source='migrated'\n6. Deduplicate by content hash against existing memories\n7. Add dry-run mode (report without inserting)\n8. Verify: dry-run reports correct counts", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/migrate-knowledge.js", - ".sf/KNOWLEDGE.md", - "KNOWLEDGE.md" - ], - "verify": "node src/resources/extensions/sf/migrate-knowledge.js --dry-run", - "inputs": [ - ".sf/KNOWLEDGE.md", - "KNOWLEDGE.md", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/migrate-knowledge.js" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S03", - "id": "T02", - "title": "Create agent.db migrator", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Create src/resources/extensions/sf/migrate-agent-db.js\n2. Read old agent.db using sql.js\n3. Extract stage1_outputs.extraction_json fields\n4. Parse JSON arrays of memories\n5. Insert into MemoryStore with source='extracted'\n6. Deduplicate by content hash\n7. Handle corrupted databases gracefully (skip with warning)\n8. Add dry-run mode\n9. Verify: dry-run reports correct counts", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/migrate-agent-db.js", - "~/.sf/agent/agent.db" - ], - "verify": "node src/resources/extensions/sf/migrate-agent-db.js --dry-run", - "inputs": [ - "packages/pi-coding-agent/src/resources/extensions/memory/storage.ts", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/migrate-agent-db.js" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S03", - "id": "T03", - "title": "Deprecate legacy memory systems", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Add deprecation warnings to memory-store.js exports (log warning on first use)\n2. Remove knowledge-compounding.js and knowledge-injector.js or make them no-op\n3. Update memory extension to use MemoryStore instead of sql.js\n4. Remove sql.js dependency from memory extension\n5. Update CHANGELOG.md with migration instructions\n6. Verify: no imports of old memory systems in active code paths", - "estimate": "1h", - "files": [ - "src/resources/extensions/sf/memory-store.js", - "src/resources/extensions/sf/knowledge-compounding.js", - "src/resources/extensions/sf/knowledge-injector.js", - "packages/pi-coding-agent/src/resources/extensions/memory/index.ts", - "packages/pi-coding-agent/src/resources/extensions/memory/storage.ts", - "CHANGELOG.md" - ], - "verify": "grep -r \"knowledge-compounding\\|knowledge-injector\\|sql.js\" src/ packages/ --include=\"*.ts\" --include=\"*.js\" | grep -v \"test\\|spec\\|deprecated\"", - "inputs": [ - "src/resources/extensions/sf/migrate-knowledge.js", - "src/resources/extensions/sf/migrate-agent-db.js" - ], - "expected_output": [ - "src/resources/extensions/sf/memory-store.js (deprecated)", - "CHANGELOG.md (updated)" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S04", - "id": "T01", - "title": "Integrate MemoryStore into system prompt injection", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Read system-context.js to understand current memory injection\n2. Replace knowledgeBlock + memoryBlock with MemoryStore.getRelevantMemoriesRanked()\n3. Derive query from task context (domain, keywords, technology)\n4. Format memories with category headers\n5. Respect token budget (truncate if needed)\n6. Add fallback to KNOWLEDGE.md if MemoryStore empty\n7. Verify: system prompt contains formatted memories", - "estimate": "1.5h", - "files": [ - "src/resources/extensions/sf/bootstrap/system-context.js", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "verify": "npm test -- --grep \"system context memory\"", - "inputs": [ - "src/resources/extensions/sf/bootstrap/system-context.js", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/bootstrap/system-context.js (modified)" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S04", - "id": "T02", - "title": "Remove legacy memory system imports", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Remove all imports of memory-store.js from active code\n2. Remove all imports of knowledge-injector.js\n3. Remove all imports of knowledge-compounding.js\n4. Update package.json to remove better-sqlite3 and sql.js from memory-related dependencies\n5. Verify: grep returns no active references to old systems", - "estimate": "1h", - "files": [ - "src/resources/extensions/sf/**/*.js", - "src/resources/extensions/sf/**/*.ts", - "packages/pi-coding-agent/src/resources/extensions/memory/**/*.ts" - ], - "verify": "grep -r \"knowledge-injector\\|knowledge-compounding\\|memory-store\" src/ packages/ --include=\"*.ts\" --include=\"*.js\" | grep -v \"test\\|spec\\|deprecated\"", - "inputs": [ - "src/resources/extensions/sf/bootstrap/system-context.js" - ], - "expected_output": [ - "Clean codebase with no legacy memory imports" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - }, - { - "milestone_id": "M001-6377a4", - "slice_id": "S04", - "id": "T03", - "title": "Write end-to-end integration tests", - "status": "pending", - "one_liner": "", - "narrative": "", - "verification_result": "", - "duration": "", - "completed_at": null, - "blocker_discovered": false, - "deviations": "", - "known_issues": "", - "key_files": [], - "key_decisions": [], - "full_summary_md": "", - "description": "1. Create end-to-end test: mock session with memory-worthy content\n2. Trigger memory extraction pipeline\n3. Start new session with matching context\n4. Verify system prompt contains the extracted memory\n5. Test graceful degradation: empty store returns empty string\n6. Test token budget: large memory set is truncated\n7. Verify: all tests pass", - "estimate": "2h", - "files": [ - "src/resources/extensions/sf/tests/memory-e2e.test.ts" - ], - "verify": "npm test -- src/resources/extensions/sf/tests/memory-e2e.test.ts", - "inputs": [ - "src/resources/extensions/sf/bootstrap/system-context.js", - "src/resources/extensions/sf/memory-pipeline.js", - "src/resources/extensions/sf/MemoryStore.ts" - ], - "expected_output": [ - "src/resources/extensions/sf/tests/memory-e2e.test.ts" - ], - "observability_impact": "", - "full_plan_md": "", - "sequence": 0 - } - ], - "decisions": [ - { - "seq": 1, - "id": "D001", - "when_context": "", - "scope": "environment", - "decision": "How to handle missing libsecret dependency for keytar native addon on Linux.", - "choice": "Add libsecret-1-0 / libsecret-1-dev to all Linux environment definitions (Docker, Nix, CI).", - "rationale": "@github/keytar (used by @google/gemini-cli-core) requires libsecret on Linux. Without it, keychain initialization fails with a cryptic \"Failed to load keytar native addon\" error. Adding it to the core environment definitions ensures it is present in all official SF environments.", - "revisable": "Yes", - "made_by": "agent", - "superseded_by": null - } - ], - "verification_evidence": [] -} \ No newline at end of file + "version": 1, + "origin": "db-projection", + "source_db_path": "/home/mhugo/code/singularity-forge/.sf/sf.db", + "exported_at": "2026-05-06T21:01:36.517Z", + "milestones": [ + { + "id": "M001-6377a4", + "title": "Consolidate Memory Systems into Unified node:sqlite Store", + "status": "active", + "depends_on": [], + "created_at": "2026-05-06T20:51:08.963Z", + "completed_at": null, + "vision": "Replace the three fragmented memory systems (KNOWLEDGE.md, memory-store.js/sf.db, and memory extension/sql.js) with a single unified memory store backed by node:sqlite. All memory ingestion, querying, and prompt injection flows through one canonical database table with semantic search, confidence ranking, and automatic maintenance.", + "success_criteria": [ + "A single `memories` table in `sf.db` stores all project memory with categories, confidence, hit_count, and embeddings", + "Session transcript extraction pipeline writes directly to `sf.db` instead of separate `agent.db`", + "KNOWLEDGE.md is retired as a live system; existing entries are optionally migrated", + "Memory query returns ranked results via `getRelevantMemoriesRanked(query, limit)` using node:sqlite", + "System prompt injection uses the unified memory store, not multiple competing sources", + "All better-sqlite3 and sql.js dependencies for memory are removed" + ], + "key_risks": [ + { + "risk": "Data loss during migration from existing memory stores", + "whyItMatters": "Users may have valuable accumulated memories in the old systems that would be lost if we don't migrate or preserve them" + }, + { + "risk": "Performance regression from consolidating into a single database with WAL contention", + "whyItMatters": "The sf.db already has a single-writer invariant; adding memory extraction writes could create lock contention during session startup" + }, + { + "risk": "Breaking the memory extension API contract for external consumers", + "whyItMatters": "The memory extension is a Pi extension with hooks and commands; changing its storage backend changes observable behavior" + } + ], + "proof_strategy": [ + { + "riskOrUnknown": "Single database performance under write load", + "retireIn": "S02", + "whatWillBeProven": "Memory extraction during session startup completes without blocking dispatch, proven by timing telemetry and zero timeout errors" + }, + { + "riskOrUnknown": "Migration path preserves existing memories", + "retireIn": "S03", + "whatWillBeProven": "Existing KNOWLEDGE.md entries and memory extension extractions are importable into the new schema with no data loss" + } + ], + "verification_contract": "Unit tests for memory CRUD, ranking, and embedding fallback. Integration test for full pipeline: session scan → extraction → storage → query → prompt injection.", + "verification_integration": "End-to-end test: run a mock session, trigger memory extraction, verify the memory appears in sf.db and is injectable into a prompt.", + "verification_operational": "Verify no memory-related background processes leak or consume excessive resources. Confirm WAL checkpoint behavior under memory write load.", + "verification_uat": "Manual verification: run `/memory view` command and confirm it reads from sf.db. Check that system prompt contains relevant memories from previous sessions.", + "definition_of_done": [ + "All three legacy memory systems are removed or deprecated", + "Unified memory store passes all new tests", + "Migration script can import existing KNOWLEDGE.md and old agent.db data", + "Documentation updated to describe the single memory system", + "No references to better-sqlite3 or sql.js in memory-related code" + ], + "requirement_coverage": "Covers: unified memory storage, semantic search, session-based learning. Partially covers: cross-session context persistence. Leaves for later: multi-project memory sharing, cloud-synced memory.", + "boundary_map_markdown": "### S01 → S02\n\nProduces:\n- Unified `memories` table schema in `sf.db`\n- `MemoryStore` class with CRUD, query, and ranking API\n- `formatMemoriesForPrompt()` for system prompt injection\n\nConsumes:\n- Existing `sf-db.js` adapter and schema initialization\n\n### S02 → S03\n\nProduces:\n- Session transcript scanner and LLM extraction pipeline\n- Direct write path to `sf.db` memories table\n- `/memory` command reading from unified store\n\nConsumes:\n- `MemoryStore` API from S01\n- `completeSimple` LLM call from pi-ai\n\n### S03 → S04\n\nProduces:\n- KNOWLEDGE.md importer\n- Legacy agent.db migration script\n- Deprecation warnings for old memory paths\n\nConsumes:\n- `MemoryStore` API from S01\n- Existing KNOWLEDGE.md and agent.db files", + "vision_meeting": null + } + ], + "slices": [ + { + "milestone_id": "M001-6377a4", + "id": "S01", + "title": "Unified Memory Schema and Core API", + "status": "pending", + "risk": "high", + "depends": [], + "demo": "A test creates a memory, queries it by relevance, and formats it for prompt injection — all via node:sqlite", + "created_at": "2026-05-06T20:52:17.318Z", + "completed_at": null, + "full_summary_md": "", + "full_uat_md": "", + "goal": "Create the canonical `memories` table in `sf.db` and a `MemoryStore` class that replaces memory-store.js with a node:sqlite implementation", + "success_criteria": "- `sf.db` has a `memories` table with: id, category, content, confidence, hit_count, source, session_id, source_unit_type, source_unit_id, created_at, updated_at, superseded_by\n- `MemoryStore` class exposes: createMemory, updateMemory, reinforceMemory, supersedeMemory, getActiveMemoriesRanked, getRelevantMemoriesRanked, formatMemoriesForPrompt\n- All methods gracefully degrade when DB is unavailable\n- Unit tests cover CRUD, ranking, source filtering, and embedding fallback", + "proof_level": "contract", + "integration_closure": "Produces the MemoryStore API that all downstream slices consume. No external wiring yet.", + "observability_impact": "No runtime signals added yet; tests verify table schema and query correctness.", + "adversarial_partner": "Missing partner review.", + "adversarial_combatant": "Missing combatant review.", + "adversarial_architect": "Missing architect review.", + "planning_meeting": { + "trigger": "Need to understand current memory-store.js implementation and sf-db.js schema before designing the unified table", + "pm": "The memory-store.js already has a working API with categories, confidence, hit_count, superseded_by, and embeddings. We should preserve this API surface but ensure it uses node:sqlite natively (not better-sqlite3 wrappers). The sf-db.js already uses node:sqlite DatabaseSync.", + "researcher": "Current memory-store.js uses sf-db.js which uses node:sqlite DatabaseSync. The schema already has memories table with: seq (autoincrement), id, category, content, confidence, source_unit_type, source_unit_id, created_at, updated_at, superseded_by, hit_count. Embeddings are in a separate memory_embeddings table. Relations in memory_relations. Processed units tracked in memory_processed_units.", + "partner": "The existing memory-store.js is already well-designed. S01 should: (1) verify the schema is correct in sf-db.js, (2) create a clean MemoryStore class that wraps the existing functions, (3) add unit tests, (4) ensure graceful degradation. We don't need to redesign the schema — it's already good.", + "combatant": "The current memory-store.js exports individual functions, not a class. Creating a class is an unnecessary abstraction if the functions already work. Also, the existing schema might be missing fields needed for the session extraction pipeline (like thread_id, session_id). We should check if the memory extension's schema has fields we need to add.", + "architect": "Agree with combatant on checking schema compatibility. The memory extension's pipeline stores: thread_id, file_path, file_size, file_mtime, cwd, status, extraction_json. These are pipeline metadata, not memory content. The actual memories are extracted JSON with category/content/confidence. So the existing memories table schema should work. However, we might want to add a `source` column to distinguish manual capture_thought vs auto-extracted vs KNOWLEDGE.md migrated entries.", + "moderator": "Decision: Keep the existing memories table schema (it's already correct). Add a `source` column to distinguish ingestion paths. Create a MemoryStore class that wraps the existing function-based API for cleaner consumption by the pipeline. Write unit tests. Do NOT redesign the schema from scratch.", + "recommendedRoute": "planning", + "confidenceSummary": "High confidence. The existing memory-store.js and sf-db.js are already well-designed. We just need to add a source column, create a class wrapper, and add tests." + }, + "sequence": 1, + "replan_triggered_at": null + }, + { + "milestone_id": "M001-6377a4", + "id": "S02", + "title": "Session Transcript Extraction Pipeline", + "status": "pending", + "risk": "medium", + "depends": ["S01"], + "demo": "After running a session, the pipeline extracts memories and stores them in sf.db; `/memory view` shows the extracted knowledge", + "created_at": "2026-05-06T20:52:17.319Z", + "completed_at": null, + "full_summary_md": "", + "full_uat_md": "", + "goal": "Port the two-phase extraction pipeline from the memory extension to write directly into the unified sf.db store instead of separate agent.db", + "success_criteria": "- Session .jsonl files are scanned and filtered for the current cwd\n- Phase 1: LLM extracts durable knowledge from transcripts, writes to sf.db memories table with source='extracted'\n- Phase 2: Consolidation deduplicates and ranks memories\n- /memory command reads from unified store\n- Pipeline runs fire-and-forget on session start without blocking dispatch\n- Rate limiting prevents store bloat (max 10 memories per session)", + "proof_level": "integration", + "integration_closure": "Wires the extraction pipeline into the session lifecycle and replaces the old memory extension storage path.", + "observability_impact": "Pipeline logs extraction stats (processed, errors) to the console. Memory store tracks hit_count for reinforcement.", + "adversarial_partner": "Missing partner review.", + "adversarial_combatant": "Missing combatant review.", + "adversarial_architect": "Missing architect review.", + "planning_meeting": { + "trigger": "Need to port the session transcript extraction pipeline from the memory extension (which uses sql.js/agent.db) to use the unified MemoryStore (sf.db with node:sqlite)", + "pm": "The memory extension's pipeline is well-designed but writes to a separate database. We should port it to use the unified MemoryStore while preserving the two-phase architecture (extract → consolidate). The /memory command should read from sf.db.", + "researcher": "The memory extension pipeline has: scanSessionFiles, filterSessionContent, runPhase1 (LLM extraction), runPhase2 (consolidation). It uses sql.js MemoryStorage class. We need to replace MemoryStorage with MemoryStore and write to sf.db instead of agent.db.", + "partner": "The pipeline should be preserved as-is but write to the unified store. Phase 1 extracts memories from session .jsonl files via LLM. Phase 2 consolidates and deduplicates. Both should write to sf.db memories table with source='extracted'.", + "combatant": "The pipeline might create too many memories and overwhelm the store. We need rate limiting and deduplication. Also, running LLM extraction on every session startup could be expensive and slow.", + "architect": "Agree on rate limiting. The pipeline should: (1) use the existing session scanner, (2) call LLM for extraction, (3) write to MemoryStore with source='extracted', (4) run consolidation as a separate phase. We should add a processed_units check to avoid re-processing the same sessions.", + "moderator": "Decision: Port the pipeline to use MemoryStore. Add session_id tracking to avoid re-processing. Keep the two-phase architecture. Add rate limiting (max memories per session). Run extraction asynchronously without blocking dispatch.", + "recommendedRoute": "planning", + "confidenceSummary": "Medium-high confidence. The pipeline code is well-understood. The main risk is performance and store bloat, which we mitigate with rate limiting and deduplication." + }, + "sequence": 2, + "replan_triggered_at": null + }, + { + "milestone_id": "M001-6377a4", + "id": "S03", + "title": "Migration and Legacy Deprecation", + "status": "pending", + "risk": "low", + "depends": ["S02"], + "demo": "Running a migration command imports existing KNOWLEDGE.md and agent.db memories into sf.db; old files are left as read-only backups", + "created_at": "2026-05-06T20:52:17.319Z", + "completed_at": null, + "full_summary_md": "", + "full_uat_md": "", + "goal": "Provide a one-time migration path from KNOWLEDGE.md and the old agent.db into the unified store, then deprecate the legacy systems", + "success_criteria": "- Migration script can import KNOWLEDGE.md entries as memories with source='migrated'\n- Migration script can import agent.db stage1_outputs as memories with source='extracted'\n- Dry-run mode reports counts without modifying database\n- sf.db is automatically backed up before migration\n- knowledge-compounding.js and knowledge-injector.js are removed or no-op'd\n- Memory extension uses MemoryStore instead of sql.js\n- CHANGELOG documents the consolidation and deprecation", + "proof_level": "contract", + "integration_closure": "Cleans up legacy code paths and provides a safe migration for existing users.", + "observability_impact": "Migration script reports counts of imported records per source. Deprecation warnings logged for old API usage.", + "adversarial_partner": "Missing partner review.", + "adversarial_combatant": "Missing combatant review.", + "adversarial_architect": "Missing architect review.", + "planning_meeting": { + "trigger": "Need to migrate existing KNOWLEDGE.md entries and old agent.db memories into the unified sf.db store before deprecating legacy systems", + "pm": "Users may have valuable accumulated knowledge in KNOWLEDGE.md and extracted memories in agent.db. We need a safe migration path that preserves this data. The migration should be idempotent and leave old files as read-only backups.", + "researcher": "KNOWLEDGE.md format: plain markdown with sections (Rules, Patterns, Lessons Learned). Each entry is a table row. Agent.db format: sql.js database with tables: threads, stage1_outputs, jobs. stage1_outputs contains extraction_json (JSON array of memories).", + "partner": "Migration should: (1) parse KNOWLEDGE.md entries and insert as memories with source='migrated', (2) read agent.db stage1_outputs and insert as memories with source='extracted', (3) deduplicate against existing memories, (4) leave old files untouched as backups.", + "combatant": "Migration scripts are often buggy and can corrupt data. We should validate the migration with dry-run mode and backup the sf.db before running. Also, what if the old agent.db is corrupted or uses an older schema?", + "architect": "Agree on dry-run and backup. The migration script should: (1) create a backup of sf.db, (2) parse KNOWLEDGE.md with regex/table parsing, (3) parse agent.db extractions with JSON.parse, (4) check for duplicates before inserting, (5) report counts. For corrupted agent.db, skip with warning.", + "moderator": "Decision: Create a migration script with dry-run mode and automatic backup. Parse KNOWLEDGE.md entries as individual memories. Parse agent.db stage1_outputs as memory arrays. Deduplicate by content hash. Report migration statistics. Leave old files as read-only backups.", + "recommendedRoute": "planning", + "confidenceSummary": "Medium confidence. Migration scripts are inherently risky, but dry-run and backup mitigate this. The parsing logic is straightforward." + }, + "sequence": 3, + "replan_triggered_at": null + }, + { + "milestone_id": "M001-6377a4", + "id": "S04", + "title": "System Prompt Integration and Cleanup", + "status": "pending", + "risk": "medium", + "depends": ["S03"], + "demo": "Starting a new session injects relevant ranked memories from sf.db into the system prompt; no references to old memory systems remain in the codebase", + "created_at": "2026-05-06T20:52:17.319Z", + "completed_at": null, + "full_summary_md": "", + "full_uat_md": "", + "goal": "Replace all system prompt injection paths to use the unified memory store, remove dead code, and verify end-to-end", + "success_criteria": "Not provided.", + "proof_level": "Not provided.", + "integration_closure": "Not provided.", + "observability_impact": "Not provided.", + "adversarial_partner": "Missing partner review.", + "adversarial_combatant": "Missing combatant review.", + "adversarial_architect": "Missing architect review.", + "planning_meeting": { + "trigger": "Need to integrate unified memory store into system prompt injection and remove all legacy memory code paths", + "pm": "The system prompt currently loads KNOWLEDGE.md and memory-store.js separately. We need to replace this with a single MemoryStore.getRelevantMemoriesRanked() call that injects ranked memories into the prompt. This simplifies the context assembly and ensures all memory flows through one path.", + "researcher": "system-context.js currently loads: knowledgeBlock (from KNOWLEDGE.md files) and memoryBlock (from memory-store.js getActiveMemoriesRanked). We need to replace both with a single call to MemoryStore.getRelevantMemoriesRanked(query, limit) where query is derived from the current task context.", + "partner": "The integration should: (1) replace knowledgeBlock + memoryBlock with a single memory injection, (2) use task context keywords for relevance scoring, (3) format memories with category headers, (4) respect token budget, (5) remove all imports of old memory systems.", + "combatant": "Removing KNOWLEDGE.md injection might lose important human-curated rules that aren't in the memory store yet. We should ensure migration is complete before removing the old path, or keep a fallback for unmigrated knowledge.", + "architect": "Agree on fallback. The integration should: (1) try MemoryStore first, (2) if empty, fall back to KNOWLEDGE.md (read-only, no new writes), (3) inject formatted memories into system prompt via before_agent_start hook, (4) track memory usage for feedback loop.", + "moderator": "Decision: Replace system prompt injection with MemoryStore.getRelevantMemoriesRanked(). Add a temporary fallback to KNOWLEDGE.md if MemoryStore returns empty (until migration is complete). Remove all old memory system imports. Add memory usage tracking. Run end-to-end test.", + "recommendedRoute": "planning", + "confidenceSummary": "High confidence. The integration is straightforward — replace two injection paths with one. The fallback ensures no regression during migration." + }, + "sequence": 4, + "replan_triggered_at": null + } + ], + "tasks": [ + { + "milestone_id": "M001-6377a4", + "slice_id": "S01", + "id": "T01", + "title": "Audit and extend sf.db memories schema", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Read sf-db.js schema initialization to verify memories table exists with correct columns\n2. Add `source` column (TEXT, default 'manual') to distinguish: 'manual', 'extracted', 'migrated'\n3. Add `session_id` column (TEXT, nullable) to link extracted memories to source sessions\n4. Bump SCHEMA_VERSION in sf-db.js\n5. Write migration SQL that adds columns if they don't exist (for existing databases)\n6. Verify: run tests that create and query memories with new columns", + "estimate": "1h", + "files": [ + "src/resources/extensions/sf/sf-db.js", + "src/resources/extensions/sf/memory-store.js" + ], + "verify": "npm test -- --grep \"memory schema\"", + "inputs": ["src/resources/extensions/sf/sf-db.js"], + "expected_output": ["src/resources/extensions/sf/sf-db.js (modified)"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S01", + "id": "T02", + "title": "Create MemoryStore class wrapper", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Create src/resources/extensions/sf/MemoryStore.ts (or .js) that wraps existing memory-store.js functions\n2. Expose methods: createMemory, updateMemory, reinforceMemory, supersedeMemory, getActiveMemoriesRanked, getRelevantMemoriesRanked, formatMemoriesForPrompt\n3. Add source parameter to createMemory (default 'manual')\n4. Ensure all methods delegate to existing sf-db.js wrappers\n5. Export both the class and individual functions for backward compatibility\n6. Verify: unit tests pass for class and function APIs", + "estimate": "1.5h", + "files": [ + "src/resources/extensions/sf/MemoryStore.ts", + "src/resources/extensions/sf/memory-store.js" + ], + "verify": "npm test -- --grep \"MemoryStore\"", + "inputs": [ + "src/resources/extensions/sf/memory-store.js", + "src/resources/extensions/sf/sf-db.js" + ], + "expected_output": ["src/resources/extensions/sf/MemoryStore.ts"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S01", + "id": "T03", + "title": "Write unit tests for MemoryStore CRUD and ranking", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Create src/resources/extensions/sf/tests/memory-store.test.ts\n2. Test CRUD: createMemory, getActiveMemories, updateMemory, reinforceMemory, supersedeMemory\n3. Test ranking: getActiveMemoriesRanked returns correct order by confidence * hit_count\n4. Test graceful degradation: returns empty arrays when DB unavailable\n5. Test source filtering: memories created with different sources are queryable\n6. Verify: all tests pass", + "estimate": "1.5h", + "files": ["src/resources/extensions/sf/tests/memory-store.test.ts"], + "verify": "npm test -- src/resources/extensions/sf/tests/memory-store.test.ts", + "inputs": [ + "src/resources/extensions/sf/MemoryStore.ts", + "src/resources/extensions/sf/memory-store.js" + ], + "expected_output": [ + "src/resources/extensions/sf/tests/memory-store.test.ts" + ], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S02", + "id": "T01", + "title": "Port session scanner to use MemoryStore", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Copy scanSessionFiles and filterSessionContent from memory extension\n2. Adapt to read from ~/.sf/agent/sessions/ directory structure\n3. Add session fingerprinting (hash of content) to avoid re-processing\n4. Check memory_processed_units table before processing\n5. Verify: scanner finds session files and filters correctly", + "estimate": "1h", + "files": ["src/resources/extensions/sf/memory-pipeline.js"], + "verify": "npm test -- --grep \"session scanner\"", + "inputs": [ + "packages/pi-coding-agent/src/resources/extensions/memory/pipeline.ts" + ], + "expected_output": ["src/resources/extensions/sf/memory-pipeline.js"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S02", + "id": "T02", + "title": "Implement Phase 1: LLM extraction to MemoryStore", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Port runPhase1 from memory extension\n2. Use completeSimple from pi-ai for LLM calls\n3. Extract memories as JSON array with category/content/confidence\n4. Write each extracted memory to MemoryStore with source='extracted' and session_id\n5. Add rate limiting: max 10 memories per session, min confidence 0.6\n6. Mark session as processed in memory_processed_units\n7. Verify: extraction creates memories in sf.db", + "estimate": "1.5h", + "files": [ + "src/resources/extensions/sf/memory-pipeline.js", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "verify": "npm test -- --grep \"memory extraction\"", + "inputs": [ + "src/resources/extensions/sf/memory-pipeline.js", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "expected_output": [ + "src/resources/extensions/sf/memory-pipeline.js (phase 1)" + ], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S02", + "id": "T03", + "title": "Implement Phase 2: Consolidation and /memory command", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Port runPhase2 consolidation logic\n2. Query MemoryStore for source='extracted' memories\n3. Group by category, deduplicate similar content (simple string similarity)\n4. Update consolidated memories with higher confidence\n5. Supersede duplicate memories\n6. Implement /memory command that reads from MemoryStore\n7. Verify: consolidation runs and /memory shows extracted memories", + "estimate": "1.5h", + "files": [ + "src/resources/extensions/sf/memory-pipeline.js", + "src/resources/extensions/sf/memory-commands.js" + ], + "verify": "npm test -- --grep \"memory consolidation\"", + "inputs": [ + "src/resources/extensions/sf/memory-pipeline.js", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "expected_output": ["src/resources/extensions/sf/memory-commands.js"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S03", + "id": "T01", + "title": "Create KNOWLEDGE.md parser and migrator", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Create src/resources/extensions/sf/migrate-knowledge.js\n2. Parse KNOWLEDGE.md sections (Rules, Patterns, Lessons Learned)\n3. Extract table rows as memory entries\n4. Map to memory categories: rule→convention, pattern→pattern, lesson→gotcha\n5. Insert into MemoryStore with source='migrated'\n6. Deduplicate by content hash against existing memories\n7. Add dry-run mode (report without inserting)\n8. Verify: dry-run reports correct counts", + "estimate": "1.5h", + "files": [ + "src/resources/extensions/sf/migrate-knowledge.js", + ".sf/KNOWLEDGE.md", + "KNOWLEDGE.md" + ], + "verify": "node src/resources/extensions/sf/migrate-knowledge.js --dry-run", + "inputs": [ + ".sf/KNOWLEDGE.md", + "KNOWLEDGE.md", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "expected_output": ["src/resources/extensions/sf/migrate-knowledge.js"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S03", + "id": "T02", + "title": "Create agent.db migrator", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Create src/resources/extensions/sf/migrate-agent-db.js\n2. Read old agent.db using sql.js\n3. Extract stage1_outputs.extraction_json fields\n4. Parse JSON arrays of memories\n5. Insert into MemoryStore with source='extracted'\n6. Deduplicate by content hash\n7. Handle corrupted databases gracefully (skip with warning)\n8. Add dry-run mode\n9. Verify: dry-run reports correct counts", + "estimate": "1.5h", + "files": [ + "src/resources/extensions/sf/migrate-agent-db.js", + "~/.sf/agent/agent.db" + ], + "verify": "node src/resources/extensions/sf/migrate-agent-db.js --dry-run", + "inputs": [ + "packages/pi-coding-agent/src/resources/extensions/memory/storage.ts", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "expected_output": ["src/resources/extensions/sf/migrate-agent-db.js"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S03", + "id": "T03", + "title": "Deprecate legacy memory systems", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Add deprecation warnings to memory-store.js exports (log warning on first use)\n2. Remove knowledge-compounding.js and knowledge-injector.js or make them no-op\n3. Update memory extension to use MemoryStore instead of sql.js\n4. Remove sql.js dependency from memory extension\n5. Update CHANGELOG.md with migration instructions\n6. Verify: no imports of old memory systems in active code paths", + "estimate": "1h", + "files": [ + "src/resources/extensions/sf/memory-store.js", + "src/resources/extensions/sf/knowledge-compounding.js", + "src/resources/extensions/sf/knowledge-injector.js", + "packages/pi-coding-agent/src/resources/extensions/memory/index.ts", + "packages/pi-coding-agent/src/resources/extensions/memory/storage.ts", + "CHANGELOG.md" + ], + "verify": "grep -r \"knowledge-compounding\\|knowledge-injector\\|sql.js\" src/ packages/ --include=\"*.ts\" --include=\"*.js\" | grep -v \"test\\|spec\\|deprecated\"", + "inputs": [ + "src/resources/extensions/sf/migrate-knowledge.js", + "src/resources/extensions/sf/migrate-agent-db.js" + ], + "expected_output": [ + "src/resources/extensions/sf/memory-store.js (deprecated)", + "CHANGELOG.md (updated)" + ], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S04", + "id": "T01", + "title": "Integrate MemoryStore into system prompt injection", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Read system-context.js to understand current memory injection\n2. Replace knowledgeBlock + memoryBlock with MemoryStore.getRelevantMemoriesRanked()\n3. Derive query from task context (domain, keywords, technology)\n4. Format memories with category headers\n5. Respect token budget (truncate if needed)\n6. Add fallback to KNOWLEDGE.md if MemoryStore empty\n7. Verify: system prompt contains formatted memories", + "estimate": "1.5h", + "files": [ + "src/resources/extensions/sf/bootstrap/system-context.js", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "verify": "npm test -- --grep \"system context memory\"", + "inputs": [ + "src/resources/extensions/sf/bootstrap/system-context.js", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "expected_output": [ + "src/resources/extensions/sf/bootstrap/system-context.js (modified)" + ], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S04", + "id": "T02", + "title": "Remove legacy memory system imports", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Remove all imports of memory-store.js from active code\n2. Remove all imports of knowledge-injector.js\n3. Remove all imports of knowledge-compounding.js\n4. Update package.json to remove better-sqlite3 and sql.js from memory-related dependencies\n5. Verify: grep returns no active references to old systems", + "estimate": "1h", + "files": [ + "src/resources/extensions/sf/**/*.js", + "src/resources/extensions/sf/**/*.ts", + "packages/pi-coding-agent/src/resources/extensions/memory/**/*.ts" + ], + "verify": "grep -r \"knowledge-injector\\|knowledge-compounding\\|memory-store\" src/ packages/ --include=\"*.ts\" --include=\"*.js\" | grep -v \"test\\|spec\\|deprecated\"", + "inputs": ["src/resources/extensions/sf/bootstrap/system-context.js"], + "expected_output": ["Clean codebase with no legacy memory imports"], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + }, + { + "milestone_id": "M001-6377a4", + "slice_id": "S04", + "id": "T03", + "title": "Write end-to-end integration tests", + "status": "pending", + "one_liner": "", + "narrative": "", + "verification_result": "", + "duration": "", + "completed_at": null, + "blocker_discovered": false, + "deviations": "", + "known_issues": "", + "key_files": [], + "key_decisions": [], + "full_summary_md": "", + "description": "1. Create end-to-end test: mock session with memory-worthy content\n2. Trigger memory extraction pipeline\n3. Start new session with matching context\n4. Verify system prompt contains the extracted memory\n5. Test graceful degradation: empty store returns empty string\n6. Test token budget: large memory set is truncated\n7. Verify: all tests pass", + "estimate": "2h", + "files": ["src/resources/extensions/sf/tests/memory-e2e.test.ts"], + "verify": "npm test -- src/resources/extensions/sf/tests/memory-e2e.test.ts", + "inputs": [ + "src/resources/extensions/sf/bootstrap/system-context.js", + "src/resources/extensions/sf/memory-pipeline.js", + "src/resources/extensions/sf/MemoryStore.ts" + ], + "expected_output": [ + "src/resources/extensions/sf/tests/memory-e2e.test.ts" + ], + "observability_impact": "", + "full_plan_md": "", + "sequence": 0 + } + ], + "decisions": [ + { + "seq": 1, + "id": "D001", + "when_context": "", + "scope": "environment", + "decision": "How to handle missing libsecret dependency for keytar native addon on Linux.", + "choice": "Add libsecret-1-0 / libsecret-1-dev to all Linux environment definitions (Docker, Nix, CI).", + "rationale": "@github/keytar (used by @google/gemini-cli-core) requires libsecret on Linux. Without it, keychain initialization fails with a cryptic \"Failed to load keytar native addon\" error. Adding it to the core environment definitions ensures it is present in all official SF environments.", + "revisable": "Yes", + "made_by": "agent", + "superseded_by": null + } + ], + "verification_evidence": [] +}