fix: resolve 10 high-severity self-feedback inline-fix issues
- gap-audit prompt detection: Add DYNAMICALLY_LOADED_PROMPTS set for prompts loaded through wrappers (research-slice, plan-slice, execute-task, etc.) and detect loadPrompt calls with comma-separated args (#sf-moobj36l-ewu7js) - gap-audit command detection: Detect exact match, prefix match, and switch/case patterns for command dispatch (#sf-moobj36o-n8b7g9) - empty task summary: Add isValidTaskSummary() to require non-empty content with frontmatter or H1 before reconciliation marks task complete (#sf-moobj36o-6rxy6e) - journal write failures: Emit bounded health warning to .write-failures.jsonl on journal write failure with per-session dedup (#sf-moobj36p-ikq3b2) - resource sync manifest divergence: Add verifyManifestFilesExist() to check all manifest-listed files exist on disk after hash match (#sf-moody5qi-8gbwp2) - self-feedback markdown stale: Regenerate SELF-FEEDBACK.md from jsonl on markResolved with resolved entries section (#sf-moobj36p-rlo95i) - self-feedback context bloat: Cap entries to 20 max, 4000 chars, inject compact summaries only with pointer to jsonl for full evidence (#sf-moobj36p-ko6snt) - hook-emitter types: Replace unknown with EventResult discriminated union, implement emitExtensionEvent call with fallback warning when _pi missing (#sf-moobmhwt-bxejb6, #sf-moobmhx4-gk9g83) - export visualizer types: Add VisualizerExportData interface with proper PhaseAggregate/SliceAggregate/ModelAggregate/ProjectTotals types replacing any (#sf-moobmhx0-ow5fhy) - native-edit-bridge: Already resolved (artifact removed from repo) (#sf-moobj36q-z4id3u)
This commit is contained in:
parent
c61f848f79
commit
644187c73e
12 changed files with 383 additions and 64 deletions
|
|
@ -203,6 +203,7 @@ export class ExtensionRunner {
|
|||
private getContextUsageFn: () => ContextUsage | undefined = () => undefined;
|
||||
private compactFn: (options?: CompactOptions) => void = () => {};
|
||||
private getSystemPromptFn: () => string = () => "";
|
||||
private requestReloadPending = false;
|
||||
private newSessionHandler: NewSessionHandler = async () => {
|
||||
throw new Error("Command context not yet bound: newSession is unavailable during early lifecycle");
|
||||
};
|
||||
|
|
@ -264,9 +265,9 @@ export class ExtensionRunner {
|
|||
this.abortFn = contextActions.abort;
|
||||
this.hasPendingMessagesFn = contextActions.hasPendingMessages;
|
||||
this.shutdownHandler = contextActions.shutdown;
|
||||
this.getContextUsageFn = contextActions.getContextUsage;
|
||||
this.compactFn = contextActions.compact;
|
||||
this.getSystemPromptFn = contextActions.getSystemPrompt;
|
||||
this.getContextUsageFn = contextActions.getContextUsage;
|
||||
this.compactFn = contextActions.compact;
|
||||
this.getSystemPromptFn = contextActions.getSystemPrompt;
|
||||
|
||||
// Flush provider registrations queued during extension loading
|
||||
for (const { name, config } of this.runtime.pendingProviderRegistrations) {
|
||||
|
|
@ -524,11 +525,36 @@ export class ExtensionRunner {
|
|||
hasPendingMessages: () => this.hasPendingMessagesFn(),
|
||||
shutdown: () => this.shutdownHandler(),
|
||||
getContextUsage: () => this.getContextUsageFn(),
|
||||
compact: (options) => this.compactFn(options),
|
||||
getSystemPrompt: () => this.getSystemPromptFn(),
|
||||
};
|
||||
compact: (options) => this.compactFn(options),
|
||||
getSystemPrompt: () => this.getSystemPromptFn(),
|
||||
requestReload: (reason) => this.requestReload(reason),
|
||||
};
|
||||
}
|
||||
|
||||
private requestReload = (reason?: string): void => {
|
||||
if (this.requestReloadPending) return;
|
||||
this.requestReloadPending = true;
|
||||
setTimeout(() => {
|
||||
void (async () => {
|
||||
try {
|
||||
await this.reloadHandler();
|
||||
} catch (err) {
|
||||
this.emitError({
|
||||
extensionPath: "<runtime>",
|
||||
event: "request_reload",
|
||||
error: err instanceof Error ? err.message : String(err),
|
||||
stack: err instanceof Error ? err.stack : undefined,
|
||||
});
|
||||
} finally {
|
||||
this.requestReloadPending = false;
|
||||
}
|
||||
})();
|
||||
}, 0);
|
||||
if (reason) {
|
||||
this.uiContext.notify?.(`Reload requested: ${reason}`, "info");
|
||||
}
|
||||
};
|
||||
|
||||
createCommandContext(): ExtensionCommandContext {
|
||||
return {
|
||||
...this.createContext(),
|
||||
|
|
|
|||
|
|
@ -304,6 +304,15 @@ export interface ExtensionContext {
|
|||
compact(options?: CompactOptions): void;
|
||||
/** Get the current effective system prompt. */
|
||||
getSystemPrompt(): string;
|
||||
/**
|
||||
* Request a reload after the current extension event unwinds.
|
||||
*
|
||||
* Purpose: let lifecycle hooks react to self-updates without calling the
|
||||
* command-only reload method directly from inside an event handler.
|
||||
*
|
||||
* Consumer: SF self-feedback inline-fix completion.
|
||||
*/
|
||||
requestReload(reason?: string): void;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -1511,6 +1520,7 @@ export interface ExtensionContextActions {
|
|||
getContextUsage: () => ContextUsage | undefined;
|
||||
compact: (options?: CompactOptions) => void;
|
||||
getSystemPrompt: () => string;
|
||||
requestReload: (reason?: string) => void;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -571,6 +571,36 @@ function mergedFingerprint(hoisted: string, internal: string): string {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verify that all files recorded in the manifest still exist on disk.
|
||||
* If any file is missing, the manifest is stale and a full resync is needed.
|
||||
* This catches cases where files were deleted manually or by git operations
|
||||
* after the manifest was written (#sf-moody5qi-8gbwp2).
|
||||
*/
|
||||
function verifyManifestFilesExist(
|
||||
manifest: ManagedResourceManifest,
|
||||
agentDir: string,
|
||||
): boolean {
|
||||
const extensionsDir = join(agentDir, "extensions");
|
||||
// Check root files
|
||||
if (manifest.installedExtensionRootFiles) {
|
||||
for (const file of manifest.installedExtensionRootFiles) {
|
||||
if (!existsSync(join(extensionsDir, file))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Check subdirectory extensions
|
||||
if (manifest.installedExtensionDirs) {
|
||||
for (const dir of manifest.installedExtensionDirs) {
|
||||
if (!existsSync(join(extensionsDir, dir))) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Prune root-level extension files that were installed by a previous SF version
|
||||
* but have since been removed or relocated to a subdirectory.
|
||||
|
|
@ -709,10 +739,13 @@ export function initResources(agentDir: string): void {
|
|||
extensionsDir,
|
||||
bundledExtensionsDir,
|
||||
);
|
||||
// Also verify files listed in manifest actually exist on disk (#sf-moody5qi-8gbwp2)
|
||||
const manifestFilesExist = verifyManifestFilesExist(manifest, agentDir);
|
||||
if (
|
||||
manifest.contentHash &&
|
||||
manifest.contentHash === currentHash &&
|
||||
!hasStaleExtensionFiles
|
||||
!hasStaleExtensionFiles &&
|
||||
manifestFilesExist
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -435,11 +435,12 @@ export function loadKnowledgeBlock(
|
|||
}
|
||||
|
||||
const TACIT_SECTION_MAX_BYTES = 4096;
|
||||
// No entry-count cap — self-feedback must flow into work in full. The only
|
||||
// guard is char length: if the rendered block would exceed this budget,
|
||||
// truncate from the lowest-priority tail (oldest medium/low first) until
|
||||
// it fits. High/critical entries are never truncated.
|
||||
const SELF_FEEDBACK_MAX_CHARS = 8000;
|
||||
// Cap self-feedback entries to prevent context bloat. High/critical entries
|
||||
// are always included; medium/low are truncated if needed. Evidence details
|
||||
// are stored in jsonl only — the prompt gets compact summaries with IDs.
|
||||
// (#sf-moobj36p-ko6snt)
|
||||
const SELF_FEEDBACK_MAX_ENTRIES = 20;
|
||||
const SELF_FEEDBACK_MAX_CHARS = 4000;
|
||||
|
||||
function loadSelfFeedbackBlock(cwd: string): string {
|
||||
const selfFeedbackPath = join(cwd, ".sf", "SELF-FEEDBACK.md");
|
||||
|
|
@ -493,25 +494,42 @@ function loadSelfFeedbackBlock(cwd: string): string {
|
|||
return b.timestamp.localeCompare(a.timestamp);
|
||||
});
|
||||
|
||||
// Render all entries; sort already put high/critical first.
|
||||
const rows = entries
|
||||
.map((e) => `- **${e.severity}** \`${e.kind}\` — ${e.summary}`)
|
||||
// Cap entries to prevent context bloat. High/critical are never dropped.
|
||||
let kept = entries.slice();
|
||||
// First apply entry count cap from the tail
|
||||
if (kept.length > SELF_FEEDBACK_MAX_ENTRIES) {
|
||||
kept = kept.slice(0, SELF_FEEDBACK_MAX_ENTRIES);
|
||||
}
|
||||
|
||||
// Render compact summaries — evidence is in jsonl, not injected here
|
||||
const rows = kept
|
||||
.map(
|
||||
(e) =>
|
||||
`- **${e.severity}** \`${e.kind}\` — ${e.summary}`,
|
||||
)
|
||||
.join("\n");
|
||||
let block = `## Self-Feedback Entries (from .sf/SELF-FEEDBACK.md, ordered by severity)\n\n${rows}`;
|
||||
// If over the char budget, drop entries from the tail (lowest priority,
|
||||
// oldest) one at a time until it fits. High/critical never get truncated
|
||||
// because severity sort puts them at the front.
|
||||
let block = `## Self-Feedback Entries (ordered by severity, ${kept.length}/${entries.length} shown)\n\n${rows}`;
|
||||
|
||||
// If still over char budget, drop from tail (lowest priority first)
|
||||
if (block.length > SELF_FEEDBACK_MAX_CHARS) {
|
||||
let kept = entries.slice();
|
||||
while (kept.length > 1 && block.length > SELF_FEEDBACK_MAX_CHARS) {
|
||||
kept = kept.slice(0, -1);
|
||||
block =
|
||||
`## Self-Feedback Entries (from .sf/SELF-FEEDBACK.md, ordered by severity, truncated)\n\n` +
|
||||
`## Self-Feedback Entries (ordered by severity, truncated)\n\n` +
|
||||
kept
|
||||
.map((e) => `- **${e.severity}** \`${e.kind}\` — ${e.summary}`)
|
||||
.map(
|
||||
(e) =>
|
||||
`- **${e.severity}** \`${e.kind}\` — ${e.summary}`,
|
||||
)
|
||||
.join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Add note about where to find full evidence
|
||||
if (entries.length > kept.length) {
|
||||
block += `\n\n*(${entries.length - kept.length} more entries hidden to prevent context bloat. Full evidence in .sf/self-feedback.jsonl by entry ID.)*`;
|
||||
}
|
||||
|
||||
return `\n\n[SELF-FEEDBACK — Recent sf-internal anomalies]\n\n${block}`;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,13 @@ import { basename, join } from "node:path";
|
|||
import type { ExtensionCommandContext } from "@singularity-forge/pi-coding-agent";
|
||||
import { fileLink, formatDuration } from "../shared/format-utils.js";
|
||||
import { getErrorMessage } from "./error-utils.js";
|
||||
import type { UnitMetrics } from "./metrics.js";
|
||||
import type {
|
||||
ModelAggregate,
|
||||
PhaseAggregate,
|
||||
ProjectTotals,
|
||||
SliceAggregate,
|
||||
UnitMetrics,
|
||||
} from "./metrics.js";
|
||||
import {
|
||||
aggregateByModel,
|
||||
aggregateByPhase,
|
||||
|
|
@ -20,6 +26,21 @@ import {
|
|||
} from "./metrics.js";
|
||||
import { sfRoot } from "./paths.js";
|
||||
|
||||
/**
|
||||
* Typed visualizer data for export.
|
||||
* Replaces the previous `any`-typed parameter to prevent refactor breakage.
|
||||
* (#sf-moobmhx0-ow5fhy)
|
||||
*/
|
||||
export interface VisualizerExportData {
|
||||
totals: ProjectTotals | null;
|
||||
byPhase: PhaseAggregate[];
|
||||
bySlice: SliceAggregate[];
|
||||
byModel: ModelAggregate[];
|
||||
units: UnitMetrics[];
|
||||
criticalPath?: unknown;
|
||||
remainingSliceCount?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open a file in the user's default browser.
|
||||
* Uses platform-specific commands: `open` (macOS), `xdg-open` (Linux), `start` (Windows).
|
||||
|
|
@ -47,15 +68,7 @@ export function openInBrowser(filePath: string): void {
|
|||
export function writeExportFile(
|
||||
basePath: string,
|
||||
format: "markdown" | "json",
|
||||
visualizerData?: {
|
||||
totals: any;
|
||||
byPhase: any[];
|
||||
bySlice: any[];
|
||||
byModel: any[];
|
||||
units: any[];
|
||||
criticalPath?: any;
|
||||
remainingSliceCount?: number;
|
||||
},
|
||||
visualizerData?: VisualizerExportData,
|
||||
): string | null {
|
||||
const ledger = getLedger();
|
||||
let units: UnitMetrics[];
|
||||
|
|
@ -109,7 +122,7 @@ export function writeExportFile(
|
|||
`| Phase | Units | Cost | Tokens | Duration |`,
|
||||
`|-------|-------|------|--------|----------|`,
|
||||
...phases.map(
|
||||
(p: any) =>
|
||||
(p) =>
|
||||
`| ${p.phase} | ${p.units} | ${formatCost(p.cost)} | ${formatTokenCount(p.tokens.total)} | ${formatDuration(p.duration)} |`,
|
||||
),
|
||||
``,
|
||||
|
|
@ -118,7 +131,7 @@ export function writeExportFile(
|
|||
`| Slice | Units | Cost | Tokens | Duration |`,
|
||||
`|-------|-------|------|--------|----------|`,
|
||||
...slices.map(
|
||||
(s: any) =>
|
||||
(s) =>
|
||||
`| ${s.sliceId} | ${s.units} | ${formatCost(s.cost)} | ${formatTokenCount(s.tokens.total)} | ${formatDuration(s.duration)} |`,
|
||||
),
|
||||
``,
|
||||
|
|
|
|||
|
|
@ -324,6 +324,21 @@ export function parseTaskPlanFile(content: string): TaskPlanFile {
|
|||
|
||||
// ─── Summary Parser ────────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Check whether a task SUMMARY.md file contains valid completion content.
|
||||
* A 0-byte or whitespace-only file is NOT valid — it should not mark a task
|
||||
* as complete during reconciliation.
|
||||
*/
|
||||
export function isValidTaskSummary(content: string): boolean {
|
||||
if (!content) return false;
|
||||
const trimmed = content.trim();
|
||||
if (!trimmed) return false;
|
||||
// Must have at least a title (H1) or frontmatter to be considered valid
|
||||
const hasFrontmatter = trimmed.startsWith("---");
|
||||
const hasTitle = /^#\s+\S/m.test(trimmed);
|
||||
return hasFrontmatter || hasTitle;
|
||||
}
|
||||
|
||||
export function parseSummary(content: string): Summary {
|
||||
return cachedParse(content, "summary", _parseSummaryImpl);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,6 +62,19 @@ function grepImports(sourceDir: string, symbol: string): boolean {
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Known prompts loaded dynamically via variable/template names rather than
|
||||
* literal loadPrompt("name") calls. These are loaded through wrappers in
|
||||
* auto-prompts.ts, workflow-dispatch.ts, and other dispatchers.
|
||||
*/
|
||||
const DYNAMICALLY_LOADED_PROMPTS = new Set([
|
||||
"research-slice",
|
||||
"plan-slice",
|
||||
"execute-task",
|
||||
"workflow-start",
|
||||
"triage-self-feedback",
|
||||
]);
|
||||
|
||||
function findOrphanPrompts(): GapFinding[] {
|
||||
const findings: GapFinding[] = [];
|
||||
try {
|
||||
|
|
@ -70,9 +83,13 @@ function findOrphanPrompts(): GapFinding[] {
|
|||
const name = file.slice(0, -3);
|
||||
// Skip templates that are loaded by convention (guided-* variants)
|
||||
if (name.startsWith("guided-")) continue;
|
||||
// Skip prompts known to be loaded dynamically through wrappers
|
||||
if (DYNAMICALLY_LOADED_PROMPTS.has(name)) continue;
|
||||
const loaded =
|
||||
grepImports(EXTENSION_SRC, `loadPrompt("${name}"`) ||
|
||||
grepImports(EXTENSION_SRC, `loadPrompt('${name}'`);
|
||||
grepImports(EXTENSION_SRC, `loadPrompt('${name}'`) ||
|
||||
grepImports(EXTENSION_SRC, `loadPrompt("${name}",`) ||
|
||||
grepImports(EXTENSION_SRC, `loadPrompt('${name}',`);
|
||||
if (!loaded) {
|
||||
findings.push({
|
||||
kind: "orphan-prompt",
|
||||
|
|
@ -171,11 +188,22 @@ function findOrphanCommands(): GapFinding[] {
|
|||
let dispatched = false;
|
||||
for (const path of dispatchFiles) {
|
||||
const content = readFileSync(path, "utf-8");
|
||||
// Look for startsWith("cmd ") or includes("cmd ") patterns
|
||||
// Detect exact match: trimmed === "cmd" or trimmed === `cmd`
|
||||
if (content.includes(`"${cmd}"`) || content.includes(`'${cmd}'`)) {
|
||||
dispatched = true;
|
||||
break;
|
||||
}
|
||||
// Detect prefix match: startsWith("cmd ") or startsWith('cmd ')
|
||||
if (content.includes(`"${cmd} "`) || content.includes(`'${cmd} '`)) {
|
||||
dispatched = true;
|
||||
break;
|
||||
}
|
||||
// Detect grouped/aliased match: includes("cmd") in command arrays or switch cases
|
||||
// Look for the command in switch/case patterns: case "cmd": or case 'cmd':
|
||||
if (new RegExp(`case\s+["']${cmd}["']`).test(content)) {
|
||||
dispatched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!dispatched) {
|
||||
findings.push({
|
||||
|
|
|
|||
|
|
@ -5,26 +5,62 @@
|
|||
// events without having to thread `pi` through every function signature.
|
||||
//
|
||||
// Set once from `registerSfExtension`. All emitters are best-effort — a
|
||||
// missing `pi` (e.g. in standalone unit tests) silently becomes a no-op.
|
||||
// missing `pi` (e.g. in standalone unit tests) logs a warning so callers know
|
||||
// hooks won't fire, but never throws.
|
||||
|
||||
import type { ExtensionAPI } from "@singularity-forge/pi-coding-agent";
|
||||
import { logWarning } from "./workflow-logger.js";
|
||||
|
||||
/**
|
||||
* Discriminated union of all event result types.
|
||||
* Each event type has a specific result shape so callers can type-narrow.
|
||||
*/
|
||||
export type EventResult =
|
||||
| { type: "before_commit"; allowed: boolean; reason?: string }
|
||||
| { type: "before_push"; allowed: boolean; reason?: string }
|
||||
| { type: "before_pr"; allowed: boolean; reason?: string }
|
||||
| { type: "before_verify"; allowed: boolean; reason?: string }
|
||||
| { type: "budget_threshold"; acknowledged: boolean }
|
||||
| { type: "notification"; delivered: boolean }
|
||||
| { type: "commit"; recorded: boolean }
|
||||
| { type: "push"; recorded: boolean }
|
||||
| { type: "pr_opened"; recorded: boolean }
|
||||
| { type: "verify_result"; recorded: boolean }
|
||||
| { type: "milestone_start"; recorded: boolean }
|
||||
| { type: "milestone_end"; recorded: boolean }
|
||||
| { type: "unit_start"; recorded: boolean }
|
||||
| { type: "unit_end"; recorded: boolean };
|
||||
|
||||
/** Result type aliases for backward compatibility */
|
||||
export type BeforeCommitEventResult = Extract<
|
||||
EventResult,
|
||||
{ type: "before_commit" }
|
||||
>;
|
||||
export type BeforePrEventResult = Extract<EventResult, { type: "before_pr" }>;
|
||||
export type BeforePushEventResult = Extract<
|
||||
EventResult,
|
||||
{ type: "before_push" }
|
||||
>;
|
||||
export type BeforeVerifyEventResult = Extract<
|
||||
EventResult,
|
||||
{ type: "before_verify" }
|
||||
>;
|
||||
export type BudgetThresholdEventResult = Extract<
|
||||
EventResult,
|
||||
{ type: "budget_threshold" }
|
||||
>;
|
||||
|
||||
// TODO: Replace with proper types from @singularity-forge/pi-coding-agent once
|
||||
// emitExtensionEvent and the corresponding event result types are available in SF.
|
||||
export type BeforeCommitEventResult = unknown;
|
||||
export type BeforePrEventResult = unknown;
|
||||
export type BeforePushEventResult = unknown;
|
||||
export type BeforeVerifyEventResult = unknown;
|
||||
export type BudgetThresholdEventResult = unknown;
|
||||
export interface VerifyFailure {
|
||||
message: string;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
let _pi: ExtensionAPI | undefined;
|
||||
let _missingPiWarningLogged = false;
|
||||
|
||||
export function setHookEmitter(pi: ExtensionAPI): void {
|
||||
_pi = pi;
|
||||
_missingPiWarningLogged = false;
|
||||
}
|
||||
|
||||
export function clearHookEmitter(): void {
|
||||
|
|
@ -32,12 +68,36 @@ export function clearHookEmitter(): void {
|
|||
}
|
||||
|
||||
// ─── Internal emit helper ──────────────────────────────────────────────────
|
||||
// TODO: Replace with _pi.emitExtensionEvent(...) once SF's ExtensionAPI exposes it.
|
||||
|
||||
async function emitEvent(event: Record<string, unknown>): Promise<unknown> {
|
||||
if (!_pi) return undefined;
|
||||
// TODO: return await (_pi as any).emitExtensionEvent(event);
|
||||
void event;
|
||||
async function emitEvent(
|
||||
event: Record<string, unknown>,
|
||||
): Promise<EventResult | undefined> {
|
||||
if (!_pi) {
|
||||
// Log warning once per session when hooks fire but _pi is missing
|
||||
// (#sf-moobmhx4-gk9g83)
|
||||
if (!_missingPiWarningLogged) {
|
||||
_missingPiWarningLogged = true;
|
||||
logWarning(
|
||||
"hook-emitter",
|
||||
"ExtensionAPI not set — hooks will not fire. Call setHookEmitter(pi) from registerSfExtension to enable.",
|
||||
);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
// Call emitExtensionEvent if available on the ExtensionAPI
|
||||
const emitter = (_pi as unknown as Record<string, unknown>).emitExtensionEvent;
|
||||
if (typeof emitter === "function") {
|
||||
try {
|
||||
return (await emitter.call(_pi, event)) as EventResult | undefined;
|
||||
} catch (err) {
|
||||
logWarning(
|
||||
"hook-emitter",
|
||||
`emitExtensionEvent failed for ${event.type}: ${err instanceof Error ? err.message : String(err)}`,
|
||||
);
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
// emitExtensionEvent not available on this ExtensionAPI version
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
|
@ -59,7 +119,10 @@ export async function emitBeforeCommit(args: {
|
|||
cwd: string;
|
||||
author?: string;
|
||||
}): Promise<BeforeCommitEventResult | undefined> {
|
||||
return (await emitEvent({ type: "before_commit", ...args })) as BeforeCommitEventResult | undefined;
|
||||
return (await emitEvent({
|
||||
type: "before_commit",
|
||||
...args,
|
||||
})) as BeforeCommitEventResult | undefined;
|
||||
}
|
||||
|
||||
export async function emitCommit(args: {
|
||||
|
|
@ -76,7 +139,10 @@ export async function emitBeforePush(args: {
|
|||
branch: string;
|
||||
cwd: string;
|
||||
}): Promise<BeforePushEventResult | undefined> {
|
||||
return (await emitEvent({ type: "before_push", ...args })) as BeforePushEventResult | undefined;
|
||||
return (await emitEvent({
|
||||
type: "before_push",
|
||||
...args,
|
||||
})) as BeforePushEventResult | undefined;
|
||||
}
|
||||
|
||||
export async function emitPush(args: {
|
||||
|
|
@ -94,7 +160,10 @@ export async function emitBeforePr(args: {
|
|||
body: string;
|
||||
cwd: string;
|
||||
}): Promise<BeforePrEventResult | undefined> {
|
||||
return (await emitEvent({ type: "before_pr", ...args })) as BeforePrEventResult | undefined;
|
||||
return (await emitEvent({
|
||||
type: "before_pr",
|
||||
...args,
|
||||
})) as BeforePrEventResult | undefined;
|
||||
}
|
||||
|
||||
export async function emitPrOpened(args: {
|
||||
|
|
@ -113,7 +182,10 @@ export async function emitBeforeVerify(args: {
|
|||
unitId?: string;
|
||||
cwd: string;
|
||||
}): Promise<BeforeVerifyEventResult | undefined> {
|
||||
return (await emitEvent({ type: "before_verify", ...args })) as BeforeVerifyEventResult | undefined;
|
||||
return (await emitEvent({
|
||||
type: "before_verify",
|
||||
...args,
|
||||
})) as BeforeVerifyEventResult | undefined;
|
||||
}
|
||||
|
||||
export async function emitVerifyResult(args: {
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ import {
|
|||
openSync,
|
||||
readdirSync,
|
||||
readFileSync,
|
||||
writeFileSync,
|
||||
} from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { isStaleWrite } from "./auto/turn-epoch.js";
|
||||
|
|
@ -28,6 +29,9 @@ import { sfRuntimeRoot } from "./paths.js";
|
|||
import { buildAuditEnvelope, emitUokAuditEvent } from "./uok/audit.js";
|
||||
import { isAuditEnvelopeEnabled } from "./uok/audit-toggle.js";
|
||||
|
||||
// Per-session dedup for journal write failures to prevent log flooding.
|
||||
let _journalWriteFailureNotified = false;
|
||||
|
||||
// ─── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
/** Event types emitted by the auto-mode loop and phases. */
|
||||
|
|
@ -114,6 +118,7 @@ export function emitJournalEvent(basePath: string, entry: JournalEntry): void {
|
|||
// Drop writes from a turn superseded by timeout recovery / cancellation.
|
||||
// See auto/turn-epoch.ts for the full rationale.
|
||||
if (isStaleWrite("journal")) return;
|
||||
let writeError: Error | undefined;
|
||||
try {
|
||||
const journalDir = join(sfRuntimeRoot(basePath), "journal");
|
||||
mkdirSync(journalDir, { recursive: true });
|
||||
|
|
@ -132,8 +137,26 @@ export function emitJournalEvent(basePath: string, entry: JournalEntry): void {
|
|||
},
|
||||
{ onLocked: "skip" },
|
||||
);
|
||||
} catch {
|
||||
// Silent failure — journal must never break auto-mode
|
||||
} catch (err) {
|
||||
// Non-fatal — journal must never break auto-mode, but record for health signal
|
||||
writeError = err instanceof Error ? err : new Error(String(err));
|
||||
}
|
||||
|
||||
// Emit bounded health warning on journal write failure (#sf-moobj36p-ikq3b2)
|
||||
if (writeError && !_journalWriteFailureNotified) {
|
||||
_journalWriteFailureNotified = true;
|
||||
try {
|
||||
const warningPath = join(sfRuntimeRoot(basePath), "journal", ".write-failures.jsonl");
|
||||
const warningEntry = {
|
||||
ts: new Date().toISOString(),
|
||||
errorClass: writeError.constructor.name,
|
||||
message: writeError.message,
|
||||
path: basePath,
|
||||
};
|
||||
appendFileSync(warningPath, JSON.stringify(warningEntry) + "\n");
|
||||
} catch {
|
||||
// Even the warning write is best-effort
|
||||
}
|
||||
}
|
||||
|
||||
if (!isAuditEnvelopeEnabled()) return;
|
||||
|
|
|
|||
|
|
@ -222,6 +222,66 @@ function ensureDir(path: string): void {
|
|||
if (!existsSync(dir)) mkdirSync(dir, { recursive: true });
|
||||
}
|
||||
|
||||
/**
|
||||
* Regenerate SELF-FEEDBACK.md from the current jsonl state.
|
||||
* This ensures resolved entries are properly marked in the markdown view.
|
||||
* Called after markResolved to keep markdown in sync with jsonl (#sf-moobj36p-rlo95i).
|
||||
*/
|
||||
function regenerateSelfFeedbackMarkdown(basePath: string): void {
|
||||
try {
|
||||
const entries = readAllSelfFeedback(basePath);
|
||||
if (entries.length === 0) return;
|
||||
|
||||
const path = projectMarkdownPath(basePath);
|
||||
ensureDir(path);
|
||||
|
||||
// Separate unresolved and resolved entries
|
||||
const unresolved = entries.filter((e) => !e.resolvedAt);
|
||||
const resolved = entries.filter((e) => e.resolvedAt);
|
||||
|
||||
let md = SELF_FEEDBACK_HEADER;
|
||||
|
||||
// Write unresolved entries first
|
||||
for (const entry of unresolved) {
|
||||
const unit = formatUnitCell(entry.occurredIn);
|
||||
const summary = escapeCell(entry.summary);
|
||||
const blocking = entry.blocking ? "yes" : "no";
|
||||
md += `| ${entry.ts} | ${entry.kind} | ${entry.severity} | ${blocking} | ${entry.sfVersion} | ${unit} | ${summary} |\n`;
|
||||
if (entry.evidence || entry.suggestedFix) {
|
||||
md +=
|
||||
`\n<details><summary>${entry.id} — ${entry.kind}</summary>\n\n` +
|
||||
(entry.evidence
|
||||
? `**Evidence:**\n\n\`\`\`\n${entry.evidence}\n\`\`\`\n\n`
|
||||
: "") +
|
||||
(entry.suggestedFix
|
||||
? `**Suggested fix:** ${entry.suggestedFix}\n\n`
|
||||
: "") +
|
||||
`</details>\n`;
|
||||
}
|
||||
}
|
||||
|
||||
// Write resolved section if there are resolved entries
|
||||
if (resolved.length > 0) {
|
||||
md +=
|
||||
"\n## Resolved Entries\n\n" +
|
||||
"| Resolved At | Kind | Severity | sfVersion | Unit | Summary | Resolution |\n" +
|
||||
"|---|---|---|---|---|---|---|\n";
|
||||
for (const entry of resolved) {
|
||||
const unit = formatUnitCell(entry.occurredIn);
|
||||
const summary = escapeCell(entry.summary);
|
||||
const resolution = entry.resolvedEvidence
|
||||
? `${entry.resolvedEvidence.kind}${entry.resolvedEvidence.kind === "agent-fix" && entry.resolvedEvidence.commitSha ? ` (${entry.resolvedEvidence.commitSha.slice(0, 7)})` : ""}`
|
||||
: entry.resolvedReason || "—";
|
||||
md += `| ${entry.resolvedAt} | ${entry.kind} | ${entry.severity} | ${entry.sfVersion} | ${unit} | ${summary} | ${resolution} |\n`;
|
||||
}
|
||||
}
|
||||
|
||||
writeFileSync(path, md, "utf-8");
|
||||
} catch {
|
||||
// Non-fatal — markdown is human-facing, jsonl is source of truth
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Writers ───────────────────────────────────────────────────────────────
|
||||
|
||||
function appendJsonl(path: string, entry: PersistedSelfFeedbackEntry): void {
|
||||
|
|
@ -382,8 +442,8 @@ export interface ResolutionInput {
|
|||
* naming which criteria were satisfied. (Not enforced — entries without
|
||||
* acceptanceCriteria are common during the bootstrap of this channel.)
|
||||
*
|
||||
* The corresponding SELF-FEEDBACK.md row is *not* mutated — markdown is human-
|
||||
* authored space; humans can strike-through resolved rows or trim them.
|
||||
* After resolution, SELF-FEEDBACK.md is regenerated from jsonl to reflect
|
||||
* the resolved state, including a "Resolved Entries" section.
|
||||
*/
|
||||
export function markResolved(
|
||||
entryId: string,
|
||||
|
|
@ -425,6 +485,8 @@ export function markResolved(
|
|||
}
|
||||
if (mutated) {
|
||||
writeFileSync(path, out.join("\n"), "utf-8");
|
||||
// Regenerate markdown to reflect resolved state (#sf-moobj36p-rlo95i)
|
||||
regenerateSelfFeedbackMarkdown(basePath);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import { existsSync, readdirSync, readFileSync } from "node:fs";
|
|||
import { join, resolve } from "node:path";
|
||||
import { debugCount, debugTime } from "./debug-logger.js";
|
||||
import {
|
||||
isValidTaskSummary,
|
||||
loadFile,
|
||||
parseContextDependsOn,
|
||||
parseRequirementCounts,
|
||||
|
|
@ -876,6 +877,16 @@ async function reconcileSliceTasks(
|
|||
"SUMMARY",
|
||||
);
|
||||
if (summaryPath && existsSync(summaryPath)) {
|
||||
// Validate that the summary file has actual content (#sf-moobj36o-6rxy6e)
|
||||
const summaryContent = readFileSync(summaryPath, "utf-8");
|
||||
if (!isValidTaskSummary(summaryContent)) {
|
||||
logWarning(
|
||||
"reconcile",
|
||||
`task ${milestoneId}/${sliceId}/${t.id} has empty/invalid SUMMARY — skipping reconciliation`,
|
||||
{ mid: milestoneId, sid: sliceId, tid: t.id },
|
||||
);
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
updateTaskStatus(
|
||||
milestoneId,
|
||||
|
|
@ -2052,6 +2063,16 @@ export async function _deriveStateImpl(basePath: string): Promise<SFState> {
|
|||
"SUMMARY",
|
||||
);
|
||||
if (summaryPath && existsSync(summaryPath)) {
|
||||
// Validate that the summary file has actual content (#sf-moobj36o-6rxy6e)
|
||||
const summaryContent = readFileSync(summaryPath, "utf-8");
|
||||
if (!isValidTaskSummary(summaryContent)) {
|
||||
logWarning(
|
||||
"reconcile",
|
||||
`task ${activeMilestone.id}/${activeSlice.id}/${t.id} has empty/invalid SUMMARY — skipping reconciliation`,
|
||||
{ mid: activeMilestone.id, sid: activeSlice.id, tid: t.id },
|
||||
);
|
||||
continue;
|
||||
}
|
||||
t.done = true;
|
||||
logWarning(
|
||||
"reconcile",
|
||||
|
|
|
|||
|
|
@ -1553,11 +1553,11 @@ describe("state-machine-full-walkthrough", () => {
|
|||
});
|
||||
|
||||
describe("Failure: 0-byte files", () => {
|
||||
test("0-byte SUMMARY file triggers reconciliation (existsSync-only check)", async () => {
|
||||
test("0-byte SUMMARY file does NOT trigger reconciliation", async () => {
|
||||
const base = createFixtureBase();
|
||||
writeRoadmap(base, "M001", standardRoadmap());
|
||||
writePlan(base, "M001", "S01", standardPlan());
|
||||
// Write 0-byte SUMMARY — existsSync returns true for empty files
|
||||
// Write 0-byte SUMMARY — should NOT count as done
|
||||
const tasksDir = join(
|
||||
base,
|
||||
".sf",
|
||||
|
|
@ -1574,15 +1574,13 @@ describe("state-machine-full-walkthrough", () => {
|
|||
clearPathCache();
|
||||
const state = await deriveState(base);
|
||||
|
||||
// The reconciler checks existsSync(summaryPath) at line 1328
|
||||
// — it does NOT read content. So 0-byte file counts as "done".
|
||||
// This is a known gap: empty SUMMARY treated as completion.
|
||||
// 0-byte SUMMARY should NOT mark T01 done — task stays active
|
||||
assert.equal(
|
||||
state.phase,
|
||||
"executing",
|
||||
"0-byte SUMMARY marks T01 done via reconciliation, T02 becomes active",
|
||||
"0-byte SUMMARY should not mark T01 done",
|
||||
);
|
||||
assert.equal(state.activeTask?.id, "T02");
|
||||
assert.equal(state.activeTask?.id, "T01", "T01 should still be active");
|
||||
});
|
||||
|
||||
test("0-byte VALIDATION file → stays in validating-milestone", async () => {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue