feat(sm-phase3): Integrate sync-scheduler into memory creation pipeline

Hook sync-scheduler into createMemory() so all new memories are queued for
async sync to Singularity Memory:

Changes to memory-store.js:
- Import queueMemorySync from sync-scheduler.js
- After successful memory creation with real ID, queue to scheduler
- Fire-and-forget: sync doesn't block memory creation
- Best-effort: catch scheduler errors, don't fail memory on sync issues
- Pass memory fields: category (type), content, projectId, confidence

This completes Tier 1.2 Phase 3a: Memory integration foundation.
Memories created locally are now automatically queued for SM sync:
- Batched in groups of 50 or every 5s
- Retried with exponential backoff on failure
- Gracefully degrades if SM unavailable

Next: add session-end flush to unit-runtime.js (Phase 3b)

Fixes: TIER_1_2_PHASE_3A
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
Mikael Hugo 2026-05-07 02:58:51 +02:00
parent 9f3f3a941f
commit a367c95bff

View file

@ -17,6 +17,7 @@ import {
transaction,
updateMemoryContentRow,
} from "./sf-db.js";
import { queueMemorySync } from "./sync-scheduler.js";
export { isDbAvailable };
@ -254,6 +255,22 @@ export function createMemory(fields) {
.prepare("SELECT 1 FROM memories WHERE id = :id")
.get({ ":id": realId });
if (!verify) return null;
// Queue SM sync (Tier 1.2 Phase 3): fire-and-forget, non-blocking
// Uses project root (current working directory) as project identifier
try {
const projectId = process.cwd();
queueMemorySync(projectId, realId, {
type: fields.category,
content: fields.content,
projectId,
confidence: fields.confidence ?? 0.8,
createdAt: now,
});
} catch {
// SM sync is best-effort; don't fail memory creation on scheduler errors
}
return realId;
} catch {
return null;