759 lines
25 KiB
JavaScript
759 lines
25 KiB
JavaScript
/**
|
|
* auto/phases-pre-dispatch.js — runPreDispatch phase.
|
|
*/
|
|
import { basename, dirname, join, parse as parsePath } from "node:path";
|
|
import { importExtensionModule } from "@singularity-forge/coding-agent";
|
|
import {
|
|
clearCurrentPhase,
|
|
setCurrentPhase,
|
|
} from "../../shared/sf-phase-state.js";
|
|
import { atomicWriteSync } from "../atomic-write.js";
|
|
import { resetCompletionNudgeState } from "../auto-completion-nudge.js";
|
|
import {
|
|
isAwaitingUserInput,
|
|
USER_DRIVEN_DEEP_UNITS,
|
|
} from "../auto-post-unit.js";
|
|
import {
|
|
buildLoopRemediationSteps,
|
|
diagnoseExpectedArtifact,
|
|
verifyExpectedArtifact,
|
|
} from "../auto-recovery.js";
|
|
import {
|
|
formatToolCallSummary,
|
|
resetToolCallCounts,
|
|
} from "../auto-tool-tracking.js";
|
|
import {
|
|
appendAutonomousSolverCheckpoint,
|
|
assessAutonomousSolverTurn,
|
|
beginAutonomousSolverIteration,
|
|
buildAutonomousSolverMissingCheckpointRepairPrompt,
|
|
buildAutonomousSolverPromptBlock,
|
|
buildAutonomousSolverSteeringPromptBlock,
|
|
classifyAutonomousSolverMissingCheckpointFailure,
|
|
consumePendingAutonomousSolverSteering,
|
|
getConfiguredAutonomousSolverMaxIterations,
|
|
recordAutonomousSolverMissingCheckpointRetry,
|
|
} from "../autonomous-solver.js";
|
|
import { resumeAutoAfterProviderDelay } from "../bootstrap/provider-error-resume.js";
|
|
import { debugLog } from "../debug-logger.js";
|
|
import { PROJECT_FILES } from "../detection.js";
|
|
import { MergeConflictError } from "../git-service.js";
|
|
import { recordLearnedOutcome } from "../learning/runtime.js";
|
|
import { sfRoot } from "../paths.js";
|
|
import { resolvePersistModelChanges } from "../preferences.js";
|
|
import {
|
|
approveProductionMutationWithLlmPolicy,
|
|
ensureProductionMutationApprovalTemplate,
|
|
readProductionMutationApprovalStatus,
|
|
} from "../production-mutation-approval.js";
|
|
import { pauseAutoForProviderError } from "../provider-error-pause.js";
|
|
import {
|
|
buildReasoningAssistPrompt,
|
|
injectReasoningGuidance,
|
|
isReasoningAssistEnabled,
|
|
} from "../reasoning-assist.js";
|
|
import {
|
|
loadEvidenceFromDisk,
|
|
resetEvidence,
|
|
} from "../safety/evidence-collector.js";
|
|
import { getDirtyFiles } from "../safety/file-change-validator.js";
|
|
import {
|
|
cleanupCheckpoint,
|
|
createCheckpoint,
|
|
rollbackToCheckpoint,
|
|
} from "../safety/git-checkpoint.js";
|
|
import { resolveSafetyHarnessConfig } from "../safety/safety-harness.js";
|
|
import { recordSelfFeedback } from "../self-feedback.js";
|
|
import {
|
|
checkpointWal,
|
|
getMilestoneSlices,
|
|
getSliceTaskCounts,
|
|
getTask,
|
|
isDbAvailable,
|
|
} from "../sf-db.js";
|
|
import { getEligibleSlices } from "../slice-parallel-eligibility.js";
|
|
import { startSliceParallel } from "../slice-parallel-orchestrator.js";
|
|
import { handleProductAudit } from "../tools/product-audit-tool.js";
|
|
import { parseUnitId } from "../unit-id.js";
|
|
import {
|
|
collectSessionTokenUsage,
|
|
collectWorktreeFingerprint,
|
|
countChangedFiles,
|
|
resetRunawayGuardState,
|
|
} from "../uok/auto-runaway-guard.js";
|
|
import { resolveUokFlags } from "../uok/flags.js";
|
|
import { UokGateRunner } from "../uok/gate-runner.js";
|
|
import { emitModelAutoResolvedEvent } from "../uok/model-route-evidence.js";
|
|
import {
|
|
ensurePlanV2Graph as ensurePlanningFlowGraph,
|
|
isEmptyPlanV2GraphResult,
|
|
isMissingFinalizedContextResult,
|
|
} from "../uok/plan.js";
|
|
import { buildUokProgressEvent } from "../uok/progress-event.js";
|
|
import {
|
|
clearUnitRuntimeRecord,
|
|
writeUnitRuntimeRecord,
|
|
} from "../uok/unit-runtime.js";
|
|
import {
|
|
_resetLogs,
|
|
drainAndSummarize,
|
|
drainLogs,
|
|
formatForNotification,
|
|
hasAnyIssues,
|
|
logError,
|
|
logWarning,
|
|
} from "../workflow-logger.js";
|
|
import {
|
|
getRequiredWorkflowToolsForAutoUnit,
|
|
getWorkflowTransportSupportError,
|
|
} from "../workflow-tools.js";
|
|
import { resolveWorktreeProjectRoot } from "../worktree-root.js";
|
|
import { detectStuck } from "./detect-stuck.js";
|
|
import {
|
|
FINALIZE_POST_TIMEOUT_MS,
|
|
FINALIZE_PRE_TIMEOUT_MS,
|
|
withTimeout,
|
|
} from "./finalize-timeout.js";
|
|
import { runUnit } from "./run-unit.js";
|
|
import { getErrorMessage } from "../error-utils.js";
|
|
import {
|
|
BUDGET_THRESHOLDS,
|
|
MAX_FINALIZE_TIMEOUTS,
|
|
MAX_RECOVERY_CHARS,
|
|
} from "./types.js";
|
|
import { closeoutAndStop, generateMilestoneReport, maybeFireProductAudit, shouldRunPlanningFlowGate } from "./phases-helpers.js";
|
|
|
|
// ─── runPreDispatch ───────────────────────────────────────────────────────────
|
|
/**
|
|
* Phase 1: Pre-dispatch — resource guard, health gate, state derivation,
|
|
* milestone transition, terminal conditions.
|
|
* Returns break to exit the loop, or next with PreDispatchData on success.
|
|
*/
|
|
export async function runPreDispatch(ic, loopState) {
|
|
const { ctx, pi, s, deps, prefs } = ic;
|
|
const uokFlags = resolveUokFlags(prefs);
|
|
const runPreDispatchGate = async (input) => {
|
|
if (!uokFlags.gates) return;
|
|
const gateRunner = new UokGateRunner();
|
|
gateRunner.register({
|
|
id: input.gateId,
|
|
type: input.gateType,
|
|
execute: async () => ({
|
|
outcome: input.outcome,
|
|
failureClass: input.failureClass,
|
|
rationale: input.rationale,
|
|
findings: input.findings ?? "",
|
|
}),
|
|
});
|
|
await gateRunner.run(input.gateId, {
|
|
basePath: s.basePath,
|
|
traceId: `pre-dispatch:${ic.flowId}`,
|
|
turnId: `iter-${ic.iteration}`,
|
|
milestoneId: input.milestoneId ?? s.currentMilestoneId ?? undefined,
|
|
unitType: "pre-dispatch",
|
|
unitId: `iter-${ic.iteration}`,
|
|
});
|
|
};
|
|
// Resource version guard
|
|
const staleMsg = deps.checkResourcesStale(s.resourceVersionOnStart);
|
|
if (staleMsg) {
|
|
await runPreDispatchGate({
|
|
gateId: "resource-version-guard",
|
|
gateType: "policy",
|
|
outcome: "fail",
|
|
failureClass: "policy",
|
|
rationale: "resource version guard blocked dispatch",
|
|
findings: staleMsg,
|
|
});
|
|
await deps.stopAuto(ctx, pi, staleMsg);
|
|
debugLog("autoLoop", { phase: "exit", reason: "resources-stale" });
|
|
return { action: "break", reason: "resources-stale" };
|
|
}
|
|
await runPreDispatchGate({
|
|
gateId: "resource-version-guard",
|
|
gateType: "policy",
|
|
outcome: "pass",
|
|
failureClass: "none",
|
|
rationale: "resource version guard passed",
|
|
});
|
|
deps.invalidateAllCaches();
|
|
s.lastPromptCharCount = undefined;
|
|
s.lastBaselineCharCount = undefined;
|
|
// Pre-dispatch health gate
|
|
try {
|
|
const healthGate = await deps.preDispatchHealthGate(s.basePath);
|
|
if (healthGate.fixesApplied.length > 0) {
|
|
ctx.ui.notify(
|
|
`Pre-dispatch: ${healthGate.fixesApplied.join(", ")}`,
|
|
"info",
|
|
);
|
|
}
|
|
if (!healthGate.proceed) {
|
|
await runPreDispatchGate({
|
|
gateId: "pre-dispatch-health-gate",
|
|
gateType: "execution",
|
|
outcome: "manual-attention",
|
|
failureClass: "manual-attention",
|
|
rationale: "pre-dispatch health gate blocked dispatch",
|
|
findings: healthGate.reason,
|
|
});
|
|
ctx.ui.notify(
|
|
healthGate.reason ||
|
|
"Pre-dispatch health check failed — run /doctor for details.",
|
|
"error",
|
|
);
|
|
await deps.pauseAuto(ctx, pi);
|
|
debugLog("autoLoop", { phase: "exit", reason: "health-gate-failed" });
|
|
return { action: "break", reason: "health-gate-failed" };
|
|
}
|
|
await runPreDispatchGate({
|
|
gateId: "pre-dispatch-health-gate",
|
|
gateType: "execution",
|
|
outcome: "pass",
|
|
failureClass: "none",
|
|
rationale: "pre-dispatch health gate passed",
|
|
findings:
|
|
healthGate.fixesApplied.length > 0
|
|
? healthGate.fixesApplied.join(", ")
|
|
: "",
|
|
});
|
|
} catch (e) {
|
|
await runPreDispatchGate({
|
|
gateId: "pre-dispatch-health-gate",
|
|
gateType: "execution",
|
|
outcome: "manual-attention",
|
|
failureClass: "manual-attention",
|
|
rationale: "pre-dispatch health gate threw unexpectedly",
|
|
findings: String(e),
|
|
});
|
|
logWarning("engine", "Pre-dispatch health gate threw unexpectedly", {
|
|
error: String(e),
|
|
});
|
|
}
|
|
// Sync project root artifacts into worktree
|
|
if (
|
|
s.originalBasePath &&
|
|
s.basePath !== s.originalBasePath &&
|
|
s.currentMilestoneId
|
|
) {
|
|
deps.syncProjectRootToWorktree(
|
|
s.originalBasePath,
|
|
s.basePath,
|
|
s.currentMilestoneId,
|
|
);
|
|
}
|
|
// Derive state
|
|
let state = await deps.deriveState(s.basePath);
|
|
if (
|
|
uokFlags.planningFlow &&
|
|
isDbAvailable() &&
|
|
shouldRunPlanningFlowGate(state.phase)
|
|
) {
|
|
let compiled = ensurePlanningFlowGraph(s.basePath, state);
|
|
// Empty-graph recovery: stale DB caches can yield 0 nodes right after a
|
|
// task-complete write. Invalidate caches, re-derive state, and retry once.
|
|
if (isEmptyPlanV2GraphResult(compiled)) {
|
|
deps.invalidateAllCaches();
|
|
state = await deps.deriveState(s.basePath);
|
|
compiled = shouldRunPlanningFlowGate(state.phase)
|
|
? ensurePlanningFlowGraph(s.basePath, state)
|
|
: {
|
|
ok: true,
|
|
reason: "empty planning-flow graph recovered by state rederive",
|
|
nodeCount: 0,
|
|
};
|
|
}
|
|
if (!compiled.ok) {
|
|
const reason = compiled.reason ?? "Planning flow compilation failed";
|
|
if (isMissingFinalizedContextResult(compiled)) {
|
|
await runPreDispatchGate({
|
|
gateId: "planning-flow-gate",
|
|
gateType: "policy",
|
|
outcome: "pass",
|
|
failureClass: "none",
|
|
rationale: "plan v2 missing context recovery deferred to dispatch",
|
|
findings: reason,
|
|
milestoneId: state.activeMilestone?.id ?? undefined,
|
|
});
|
|
} else {
|
|
await runPreDispatchGate({
|
|
gateId: "planning-flow-gate",
|
|
gateType: "policy",
|
|
outcome: "manual-attention",
|
|
failureClass: "manual-attention",
|
|
rationale: "planning flow compile gate failed",
|
|
findings: reason,
|
|
milestoneId: state.activeMilestone?.id ?? undefined,
|
|
});
|
|
ctx.ui.notify(
|
|
`Plan gate failed-closed: ${reason}\n\nIf this keeps happening, try: /doctor heal`,
|
|
"error",
|
|
);
|
|
await deps.pauseAuto(ctx, pi);
|
|
return { action: "break", reason: "planning-flow-gate-failed" };
|
|
}
|
|
}
|
|
await runPreDispatchGate({
|
|
gateId: "planning-flow-gate",
|
|
gateType: "policy",
|
|
outcome: "pass",
|
|
failureClass: "none",
|
|
rationale: "planning flow compile gate passed",
|
|
milestoneId: state.activeMilestone?.id ?? undefined,
|
|
});
|
|
}
|
|
deps.syncCmuxSidebar(prefs, state);
|
|
let mid = state.activeMilestone?.id;
|
|
let midTitle = state.activeMilestone?.title;
|
|
debugLog("autoLoop", {
|
|
phase: "state-derived",
|
|
iteration: ic.iteration,
|
|
mid,
|
|
statePhase: state.phase,
|
|
});
|
|
// ── Slice-level parallelism gate (#2340) ─────────────────────────────
|
|
// When slice_parallel is enabled, check if multiple slices are eligible
|
|
// for parallel execution. If so, dispatch them in parallel and stop the
|
|
// sequential loop. Workers are spawned via slice-parallel-orchestrator.ts.
|
|
if (
|
|
prefs?.slice_parallel?.enabled &&
|
|
mid &&
|
|
!process.env.SF_PARALLEL_WORKER &&
|
|
isDbAvailable()
|
|
) {
|
|
try {
|
|
const dbSlices = getMilestoneSlices(mid);
|
|
if (dbSlices.length > 0) {
|
|
const doneIds = new Set(
|
|
dbSlices
|
|
.filter((sl) => sl.status === "complete" || sl.status === "done")
|
|
.map((sl) => sl.id),
|
|
);
|
|
const sliceInputs = dbSlices.map((sl) => ({
|
|
id: sl.id,
|
|
done: doneIds.has(sl.id),
|
|
depends: sl.depends ?? [],
|
|
}));
|
|
const eligible = getEligibleSlices(sliceInputs, doneIds);
|
|
if (eligible.length > 1) {
|
|
debugLog("autoLoop", {
|
|
phase: "slice-parallel-dispatch",
|
|
iteration: ic.iteration,
|
|
mid,
|
|
eligibleSlices: eligible.map((e) => e.id),
|
|
});
|
|
ctx.ui.notify(
|
|
`Slice-parallel: dispatching ${eligible.length} eligible slices for ${mid}.`,
|
|
"info",
|
|
);
|
|
const result = await startSliceParallel(s.basePath, mid, eligible, {
|
|
maxWorkers: prefs.slice_parallel.max_workers ?? 2,
|
|
useExecutionGraph: uokFlags.executionGraph,
|
|
shellWrapper: prefs.shell_wrapper,
|
|
});
|
|
if (result.started.length > 0) {
|
|
ctx.ui.notify(
|
|
`Slice-parallel: started ${result.started.length} worker(s): ${result.started.join(", ")}.`,
|
|
"info",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Slice-parallel dispatched for ${mid}`,
|
|
);
|
|
return { action: "break", reason: "slice-parallel-dispatched" };
|
|
}
|
|
// Fall through to sequential if no workers started
|
|
}
|
|
}
|
|
} catch (err) {
|
|
debugLog("autoLoop", {
|
|
phase: "slice-parallel-check-error",
|
|
error: getErrorMessage(err),
|
|
});
|
|
// Non-fatal — fall through to sequential dispatch
|
|
}
|
|
}
|
|
// ── Milestone transition ────────────────────────────────────────────
|
|
if (mid && s.currentMilestoneId && mid !== s.currentMilestoneId) {
|
|
deps.emitJournalEvent({
|
|
ts: new Date().toISOString(),
|
|
flowId: ic.flowId,
|
|
seq: ic.nextSeq(),
|
|
eventType: "milestone-transition",
|
|
data: { from: s.currentMilestoneId, to: mid },
|
|
});
|
|
ctx.ui.notify(
|
|
`Milestone ${s.currentMilestoneId} complete. Advancing to ${mid}: ${midTitle}.`,
|
|
"info",
|
|
);
|
|
deps.sendDesktopNotification(
|
|
"SF",
|
|
`Milestone ${s.currentMilestoneId} complete!`,
|
|
"success",
|
|
"milestone",
|
|
basename(s.originalBasePath || s.basePath),
|
|
);
|
|
deps.logCmuxEvent(
|
|
prefs,
|
|
`Milestone ${s.currentMilestoneId} complete. Advancing to ${mid}.`,
|
|
"success",
|
|
);
|
|
const vizPrefs = prefs;
|
|
if (vizPrefs?.auto_visualize) {
|
|
ctx.ui.notify("Run /visualize to see progress overview.", "info");
|
|
}
|
|
if (vizPrefs?.auto_report !== false) {
|
|
try {
|
|
await generateMilestoneReport(s, ctx, s.currentMilestoneId);
|
|
} catch (err) {
|
|
ctx.ui.notify(
|
|
`Report generation failed: ${getErrorMessage(err)}`,
|
|
"warning",
|
|
);
|
|
}
|
|
}
|
|
// Reset dispatch counters for new milestone
|
|
s.unitDispatchCount.clear();
|
|
s.unitRecoveryCount.clear();
|
|
s.unitLifetimeDispatches.clear();
|
|
loopState.recentUnits.length = 0;
|
|
loopState.stuckRecoveryAttempts = 0;
|
|
// Worktree lifecycle on milestone transition — merge current, enter next
|
|
try {
|
|
deps.resolver.mergeAndExit(s.currentMilestoneId, ctx.ui);
|
|
} catch (mergeErr) {
|
|
if (mergeErr instanceof MergeConflictError) {
|
|
// Real code conflicts — stop the loop instead of retrying forever (#2330)
|
|
ctx.ui.notify(
|
|
`Merge conflict: ${mergeErr.conflictedFiles.join(", ")}. Resolve conflicts manually and run /autonomous to resume.`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Merge conflict on milestone ${s.currentMilestoneId}`,
|
|
);
|
|
return { action: "break", reason: "merge-conflict" };
|
|
}
|
|
// Non-conflict merge errors — stop auto to avoid advancing with unmerged work
|
|
logError("engine", "Milestone merge failed with non-conflict error", {
|
|
milestone: s.currentMilestoneId,
|
|
error: String(mergeErr),
|
|
});
|
|
ctx.ui.notify(
|
|
`Merge failed: ${getErrorMessage(mergeErr)}. Resolve and run /autonomous to resume.`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Merge error on milestone ${s.currentMilestoneId}: ${String(mergeErr)}`,
|
|
);
|
|
return { action: "break", reason: "merge-failed" };
|
|
}
|
|
// Fire product-audit after successful merge (guards against double-fire via s.productAuditMilestoneId)
|
|
await maybeFireProductAudit(s, ctx);
|
|
// PR creation (auto_pr) is handled inside mergeMilestoneToMain (#2302)
|
|
deps.invalidateAllCaches();
|
|
state = await deps.deriveState(s.basePath);
|
|
mid = state.activeMilestone?.id;
|
|
midTitle = state.activeMilestone?.title;
|
|
if (mid) {
|
|
if (deps.getIsolationMode() !== "none") {
|
|
deps.captureIntegrationBranch(s.basePath, mid);
|
|
}
|
|
deps.resolver.enterMilestone(mid, ctx.ui);
|
|
} else {
|
|
// mid is undefined — no milestone to capture integration branch for
|
|
}
|
|
const pendingIds = state.registry
|
|
.filter((m) => m.status !== "complete" && m.status !== "parked")
|
|
.map((m) => m.id);
|
|
deps.pruneQueueOrder(s.basePath, pendingIds);
|
|
// Archive the old completed-units.json instead of wiping it (#2313).
|
|
try {
|
|
const completedKeysPath = join(
|
|
sfRoot(s.basePath),
|
|
"completed-units.json",
|
|
);
|
|
if (existsSync(completedKeysPath) && s.currentMilestoneId) {
|
|
const archivePath = join(
|
|
sfRoot(s.basePath),
|
|
`completed-units-${s.currentMilestoneId}.json`,
|
|
);
|
|
cpSync(completedKeysPath, archivePath);
|
|
}
|
|
atomicWriteSync(completedKeysPath, JSON.stringify([], null, 2));
|
|
} catch (e) {
|
|
logWarning(
|
|
"engine",
|
|
"Failed to archive completed-units on milestone transition",
|
|
{ error: String(e) },
|
|
);
|
|
}
|
|
// Rebuild STATE.md immediately so it reflects the new active milestone.
|
|
// This bypasses the 30-second throttle in the normal rebuild path —
|
|
// milestone transitions are rare and important enough to warrant an
|
|
// immediate write.
|
|
try {
|
|
await deps.rebuildState(s.basePath);
|
|
} catch (e) {
|
|
logWarning(
|
|
"engine",
|
|
"STATE.md rebuild failed after milestone transition",
|
|
{ error: String(e) },
|
|
);
|
|
}
|
|
}
|
|
if (mid) {
|
|
s.currentMilestoneId = mid;
|
|
deps.setActiveMilestoneId(s.basePath, mid);
|
|
}
|
|
// ── Terminal conditions ──────────────────────────────────────────────
|
|
if (!mid) {
|
|
if (s.currentUnit) {
|
|
await deps.closeoutUnit(
|
|
ctx,
|
|
s.basePath,
|
|
s.currentUnit.type,
|
|
s.currentUnit.id,
|
|
s.currentUnit.startedAt,
|
|
deps.buildSnapshotOpts(s.currentUnit.type, s.currentUnit.id),
|
|
);
|
|
}
|
|
const incomplete = state.registry.filter(
|
|
(m) => m.status !== "complete" && m.status !== "parked",
|
|
);
|
|
if (incomplete.length === 0 && state.registry.length > 0) {
|
|
// All milestones complete — merge milestone branch before stopping
|
|
if (s.currentMilestoneId) {
|
|
try {
|
|
deps.resolver.mergeAndExit(s.currentMilestoneId, ctx.ui);
|
|
// Prevent stopAuto from attempting the same merge (#2645)
|
|
s.milestoneMergedInPhases = true;
|
|
// Fire product-audit after successful merge (guards against double-fire via s.productAuditMilestoneId)
|
|
await maybeFireProductAudit(s, ctx);
|
|
} catch (mergeErr) {
|
|
if (mergeErr instanceof MergeConflictError) {
|
|
ctx.ui.notify(
|
|
`Merge conflict: ${mergeErr.conflictedFiles.join(", ")}. Resolve conflicts manually and run /autonomous to resume.`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Merge conflict on milestone ${s.currentMilestoneId}`,
|
|
);
|
|
return { action: "break", reason: "merge-conflict" };
|
|
}
|
|
logError("engine", "Milestone merge failed with non-conflict error", {
|
|
milestone: s.currentMilestoneId,
|
|
error: String(mergeErr),
|
|
});
|
|
ctx.ui.notify(
|
|
`Merge failed: ${getErrorMessage(mergeErr)}. Resolve and run /autonomous to resume.`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Merge error on milestone ${s.currentMilestoneId}: ${String(mergeErr)}`,
|
|
);
|
|
return { action: "break", reason: "merge-failed" };
|
|
}
|
|
// PR creation (auto_pr) is handled inside mergeMilestoneToMain (#2302)
|
|
}
|
|
deps.sendDesktopNotification(
|
|
"SF",
|
|
"All milestones complete!",
|
|
"success",
|
|
"milestone",
|
|
basename(s.originalBasePath || s.basePath),
|
|
);
|
|
deps.logCmuxEvent(prefs, "All milestones complete.", "success");
|
|
await deps.stopAuto(ctx, pi, "All milestones complete");
|
|
} else if (incomplete.length === 0 && state.registry.length === 0) {
|
|
// Empty registry — no milestones visible, likely a path resolution bug
|
|
const diag = `basePath=${s.basePath}, phase=${state.phase}`;
|
|
ctx.ui.notify(
|
|
`No milestones visible in current scope. Possible path resolution issue.\n Diagnostic: ${diag}`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`No milestones found — check basePath resolution`,
|
|
);
|
|
} else if (state.phase === "blocked") {
|
|
const blockerMsg = `Blocked: ${state.blockers.join(", ")}`;
|
|
// Pause instead of hard-stop so the session is resumable with `/autonomous`.
|
|
// Hard-stop here was causing premature termination when slice dependencies
|
|
// were temporarily unresolvable (e.g. after reassessment added new slices).
|
|
await deps.pauseAuto(ctx, pi);
|
|
ctx.ui.notify(
|
|
`${blockerMsg}. Fix and run /autonomous to resume.`,
|
|
"warning",
|
|
);
|
|
deps.sendDesktopNotification(
|
|
"SF",
|
|
blockerMsg,
|
|
"warning",
|
|
"attention",
|
|
basename(s.originalBasePath || s.basePath),
|
|
);
|
|
deps.logCmuxEvent(prefs, blockerMsg, "warning");
|
|
} else {
|
|
const ids = incomplete.map((m) => m.id).join(", ");
|
|
const diag = `basePath=${s.basePath}, milestones=[${state.registry.map((m) => `${m.id}:${m.status}`).join(", ")}], phase=${state.phase}`;
|
|
ctx.ui.notify(
|
|
`Unexpected: ${incomplete.length} incomplete milestone(s) (${ids}) but no active milestone.\n Diagnostic: ${diag}`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`No active milestone — ${incomplete.length} incomplete (${ids}), see diagnostic above`,
|
|
);
|
|
}
|
|
debugLog("autoLoop", { phase: "exit", reason: "no-active-milestone" });
|
|
deps.emitJournalEvent({
|
|
ts: new Date().toISOString(),
|
|
flowId: ic.flowId,
|
|
seq: ic.nextSeq(),
|
|
eventType: "terminal",
|
|
data: { reason: "no-active-milestone" },
|
|
});
|
|
return { action: "break", reason: "no-active-milestone" };
|
|
}
|
|
if (!midTitle) {
|
|
midTitle = mid;
|
|
ctx.ui.notify(
|
|
`Milestone ${mid} has no title in roadmap — using ID as fallback.`,
|
|
"warning",
|
|
);
|
|
}
|
|
// Mid-merge safety check
|
|
const mergeReconcileResult = deps.reconcileMergeState(s.basePath, ctx);
|
|
if (mergeReconcileResult === "blocked") {
|
|
await deps.pauseAuto(ctx, pi);
|
|
debugLog("autoLoop", {
|
|
phase: "exit",
|
|
reason: "merge-reconciliation-blocked",
|
|
});
|
|
return { action: "break", reason: "merge-reconciliation-blocked" };
|
|
}
|
|
if (mergeReconcileResult === "reconciled") {
|
|
deps.invalidateAllCaches();
|
|
state = await deps.deriveState(s.basePath);
|
|
mid = state.activeMilestone?.id;
|
|
midTitle = state.activeMilestone?.title;
|
|
}
|
|
if (!mid || !midTitle) {
|
|
const noMilestoneReason = !mid
|
|
? "No active milestone after merge reconciliation"
|
|
: `Milestone ${mid} has no title after reconciliation`;
|
|
await closeoutAndStop(ctx, pi, s, deps, noMilestoneReason);
|
|
debugLog("autoLoop", {
|
|
phase: "exit",
|
|
reason: "no-milestone-after-reconciliation",
|
|
});
|
|
return { action: "break", reason: "no-milestone-after-reconciliation" };
|
|
}
|
|
// Terminal: complete
|
|
if (state.phase === "complete") {
|
|
// Milestone merge on complete (before closeout so branch state is clean)
|
|
if (s.currentMilestoneId) {
|
|
try {
|
|
deps.resolver.mergeAndExit(s.currentMilestoneId, ctx.ui);
|
|
// Prevent stopAuto from attempting the same merge (#2645)
|
|
s.milestoneMergedInPhases = true;
|
|
// Fire product-audit after successful merge (guards against double-fire via s.productAuditMilestoneId)
|
|
await maybeFireProductAudit(s, ctx);
|
|
} catch (mergeErr) {
|
|
if (mergeErr instanceof MergeConflictError) {
|
|
ctx.ui.notify(
|
|
`Merge conflict: ${mergeErr.conflictedFiles.join(", ")}. Resolve conflicts manually and run /autonomous to resume.`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Merge conflict on milestone ${s.currentMilestoneId}`,
|
|
);
|
|
return { action: "break", reason: "merge-conflict" };
|
|
}
|
|
logError("engine", "Milestone merge failed with non-conflict error", {
|
|
milestone: s.currentMilestoneId,
|
|
error: String(mergeErr),
|
|
});
|
|
ctx.ui.notify(
|
|
`Merge failed: ${getErrorMessage(mergeErr)}. Resolve and run /autonomous to resume.`,
|
|
"error",
|
|
);
|
|
await deps.stopAuto(
|
|
ctx,
|
|
pi,
|
|
`Merge error on milestone ${s.currentMilestoneId}: ${String(mergeErr)}`,
|
|
);
|
|
return { action: "break", reason: "merge-failed" };
|
|
}
|
|
// PR creation (auto_pr) is handled inside mergeMilestoneToMain (#2302)
|
|
}
|
|
deps.sendDesktopNotification(
|
|
"SF",
|
|
`Milestone ${mid} complete!`,
|
|
"success",
|
|
"milestone",
|
|
basename(s.originalBasePath || s.basePath),
|
|
);
|
|
deps.logCmuxEvent(prefs, `Milestone ${mid} complete.`, "success");
|
|
await closeoutAndStop(ctx, pi, s, deps, `Milestone ${mid} complete`);
|
|
debugLog("autoLoop", { phase: "exit", reason: "milestone-complete" });
|
|
deps.emitJournalEvent({
|
|
ts: new Date().toISOString(),
|
|
flowId: ic.flowId,
|
|
seq: ic.nextSeq(),
|
|
eventType: "terminal",
|
|
data: { reason: "milestone-complete", milestoneId: mid },
|
|
});
|
|
return { action: "break", reason: "milestone-complete" };
|
|
}
|
|
// Terminal: blocked — pause instead of hard-stop so the session is resumable.
|
|
if (state.phase === "blocked") {
|
|
const blockerMsg = `Blocked: ${state.blockers.join(", ")}`;
|
|
if (s.currentUnit) {
|
|
await deps.closeoutUnit(
|
|
ctx,
|
|
s.basePath,
|
|
s.currentUnit.type,
|
|
s.currentUnit.id,
|
|
s.currentUnit.startedAt,
|
|
deps.buildSnapshotOpts(s.currentUnit.type, s.currentUnit.id),
|
|
);
|
|
}
|
|
await deps.pauseAuto(ctx, pi);
|
|
ctx.ui.notify(
|
|
`${blockerMsg}. Fix and run /autonomous to resume.`,
|
|
"warning",
|
|
);
|
|
deps.sendDesktopNotification(
|
|
"SF",
|
|
blockerMsg,
|
|
"warning",
|
|
"attention",
|
|
basename(s.originalBasePath || s.basePath),
|
|
);
|
|
deps.logCmuxEvent(prefs, blockerMsg, "warning");
|
|
debugLog("autoLoop", { phase: "exit", reason: "blocked" });
|
|
deps.emitJournalEvent({
|
|
ts: new Date().toISOString(),
|
|
flowId: ic.flowId,
|
|
seq: ic.nextSeq(),
|
|
eventType: "terminal",
|
|
data: { reason: "blocked", blockers: state.blockers },
|
|
});
|
|
return { action: "break", reason: "blocked" };
|
|
}
|
|
return { action: "next", data: { state, mid, midTitle } };
|
|
}
|