fix: harden startup doctor and tool schemas

This commit is contained in:
Mikael Hugo 2026-05-05 14:03:36 +02:00
parent 00c9a1e0b5
commit 3ba2f8a501
13 changed files with 5748 additions and 4393 deletions

View file

@ -18,6 +18,7 @@ async function getMistralClass(): Promise<typeof Mistral> {
}
return _MistralClass;
}
import { getEnvApiKey } from "../env-api-keys.js";
import { calculateCost } from "../models.js";
import type {
@ -38,7 +39,11 @@ import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { shortHash } from "../utils/hash.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { buildBaseOptions, clampReasoning, resolveReasoningLevel } from "./simple-options.js";
import {
buildBaseOptions,
clampReasoning,
resolveReasoningLevel,
} from "./simple-options.js";
import { transformMessagesWithReport } from "./transform-messages.js";
const MISTRAL_TOOL_CALL_ID_LENGTH = 9;
@ -48,14 +53,22 @@ const MAX_MISTRAL_ERROR_BODY_CHARS = 4000;
* Provider-specific options for the Mistral API.
*/
export interface MistralOptions extends StreamOptions {
toolChoice?: "auto" | "none" | "any" | "required" | { type: "function"; function: { name: string } };
toolChoice?:
| "auto"
| "none"
| "any"
| "required"
| { type: "function"; function: { name: string } };
promptMode?: "reasoning";
}
/**
* Stream responses from Mistral using `chat.stream`.
*/
export const streamMistral: StreamFunction<"mistral-conversations", MistralOptions> = (
export const streamMistral: StreamFunction<
"mistral-conversations",
MistralOptions
> = (
model: Model<"mistral-conversations">,
context: Context,
options?: MistralOptions,
@ -79,14 +92,27 @@ export const streamMistral: StreamFunction<"mistral-conversations", MistralOptio
});
const normalizeMistralToolCallId = createMistralToolCallIdNormalizer();
const transformedMessages = transformMessagesWithReport(context.messages, model, (id) => normalizeMistralToolCallId(id), "mistral-conversations");
const transformedMessages = transformMessagesWithReport(
context.messages,
model,
(id) => normalizeMistralToolCallId(id),
"mistral-conversations",
);
let payload = buildChatPayload(model, context, transformedMessages, options);
let payload = buildChatPayload(
model,
context,
transformedMessages,
options,
);
const nextPayload = await options?.onPayload?.(payload, model);
if (nextPayload !== undefined) {
payload = nextPayload as ChatCompletionStreamRequest;
}
const mistralStream = await mistral.chat.stream(payload, buildRequestOptions(model, options));
const mistralStream = await mistral.chat.stream(
payload,
buildRequestOptions(model, options),
);
stream.push({ type: "start", partial: output });
await consumeChatStream(model, output, stream, mistralStream);
@ -114,7 +140,10 @@ export const streamMistral: StreamFunction<"mistral-conversations", MistralOptio
/**
* Maps provider-agnostic `SimpleStreamOptions` to Mistral options.
*/
export const streamSimpleMistral: StreamFunction<"mistral-conversations", SimpleStreamOptions> = (
export const streamSimpleMistral: StreamFunction<
"mistral-conversations",
SimpleStreamOptions
> = (
model: Model<"mistral-conversations">,
context: Context,
options?: SimpleStreamOptions,
@ -125,11 +154,15 @@ export const streamSimpleMistral: StreamFunction<"mistral-conversations", Simple
}
const base = buildBaseOptions(model, options, apiKey);
const reasoning = clampReasoning(resolveReasoningLevel(model, options?.reasoning));
const reasoning = clampReasoning(
resolveReasoningLevel(model, options?.reasoning),
);
return streamMistral(model, context, {
...base,
promptMode: shouldUseMistralReasoningPromptMode(model, reasoning) ? "reasoning" : undefined,
promptMode: shouldUseMistralReasoningPromptMode(model, reasoning)
? "reasoning"
: undefined,
} satisfies MistralOptions);
};
@ -186,7 +219,8 @@ function createMistralToolCallIdNormalizer(): (id: string) => string {
function deriveMistralToolCallId(id: string, attempt: number): string {
const normalized = id.replace(/[^a-zA-Z0-9]/g, "");
if (attempt === 0 && normalized.length === MISTRAL_TOOL_CALL_ID_LENGTH) return normalized;
if (attempt === 0 && normalized.length === MISTRAL_TOOL_CALL_ID_LENGTH)
return normalized;
const seedBase = normalized || id;
const seed = attempt === 0 ? seedBase : `${seedBase}:${attempt}`;
return shortHash(seed)
@ -197,12 +231,15 @@ function deriveMistralToolCallId(id: string, attempt: number): string {
function formatMistralError(error: unknown): string {
if (error instanceof Error) {
const sdkError = error as Error & { statusCode?: unknown; body?: unknown };
const statusCode = typeof sdkError.statusCode === "number" ? sdkError.statusCode : undefined;
const bodyText = typeof sdkError.body === "string" ? sdkError.body.trim() : undefined;
const statusCode =
typeof sdkError.statusCode === "number" ? sdkError.statusCode : undefined;
const bodyText =
typeof sdkError.body === "string" ? sdkError.body.trim() : undefined;
if (statusCode !== undefined && bodyText) {
return `Mistral API error (${statusCode}): ${truncateErrorText(bodyText, MAX_MISTRAL_ERROR_BODY_CHARS)}`;
}
if (statusCode !== undefined) return `Mistral API error (${statusCode}): ${error.message}`;
if (statusCode !== undefined)
return `Mistral API error (${statusCode}): ${error.message}`;
return error.message;
}
return safeJsonStringify(error);
@ -222,7 +259,10 @@ function safeJsonStringify(value: unknown): string {
}
}
function buildRequestOptions(model: Model<"mistral-conversations">, options?: MistralOptions): RequestOptions {
function buildRequestOptions(
model: Model<"mistral-conversations">,
options?: MistralOptions,
): RequestOptions {
const requestOptions: RequestOptions = {};
if (options?.signal) requestOptions.signal = options.signal;
requestOptions.retries = { strategy: "none" };
@ -257,9 +297,11 @@ function buildChatPayload(
};
if (context.tools?.length) payload.tools = toFunctionTools(context.tools);
if (options?.temperature !== undefined) payload.temperature = options.temperature;
if (options?.temperature !== undefined)
payload.temperature = options.temperature;
if (options?.maxTokens !== undefined) payload.maxTokens = options.maxTokens;
if (options?.toolChoice) payload.toolChoice = mapToolChoice(options.toolChoice);
if (options?.toolChoice)
payload.toolChoice = mapToolChoice(options.toolChoice);
if (options?.promptMode) payload.promptMode = options.promptMode as any;
if (context.systemPrompt) {
@ -312,7 +354,8 @@ async function consumeChatStream(
output.usage.output = chunk.usage.completionTokens || 0;
output.usage.cacheRead = 0;
output.usage.cacheWrite = 0;
output.usage.totalTokens = chunk.usage.totalTokens || output.usage.input + output.usage.output;
output.usage.totalTokens =
chunk.usage.totalTokens || output.usage.input + output.usage.output;
calculateCost(model, output.usage);
}
@ -325,7 +368,8 @@ async function consumeChatStream(
const delta = choice.delta;
if (delta.content !== null && delta.content !== undefined) {
const contentItems = typeof delta.content === "string" ? [delta.content] : delta.content;
const contentItems =
typeof delta.content === "string" ? [delta.content] : delta.content;
for (const item of contentItems) {
if (typeof item === "string") {
const textDelta = sanitizeSurrogates(item);
@ -333,7 +377,11 @@ async function consumeChatStream(
finishCurrentBlock(currentBlock);
currentBlock = { type: "text", text: "" };
output.content.push(currentBlock);
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
stream.push({
type: "text_start",
contentIndex: blockIndex(),
partial: output,
});
}
currentBlock.text += textDelta;
stream.push({
@ -356,7 +404,11 @@ async function consumeChatStream(
finishCurrentBlock(currentBlock);
currentBlock = { type: "thinking", thinking: "" };
output.content.push(currentBlock);
stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output });
stream.push({
type: "thinking_start",
contentIndex: blockIndex(),
partial: output,
});
}
currentBlock.thinking += thinkingDelta;
stream.push({
@ -374,7 +426,11 @@ async function consumeChatStream(
finishCurrentBlock(currentBlock);
currentBlock = { type: "text", text: "" };
output.content.push(currentBlock);
stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output });
stream.push({
type: "text_start",
contentIndex: blockIndex(),
partial: output,
});
}
currentBlock.text += textDelta;
stream.push({
@ -418,7 +474,11 @@ async function consumeChatStream(
};
output.content.push(block);
toolBlocksByKey.set(key, output.content.length - 1);
stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output });
stream.push({
type: "toolcall_start",
contentIndex: output.content.length - 1,
partial: output,
});
}
const argsDelta =
@ -426,7 +486,9 @@ async function consumeChatStream(
? toolCall.function.arguments
: JSON.stringify(toolCall.function.arguments || {});
block.partialArgs = (block.partialArgs || "") + argsDelta;
block.arguments = parseStreamingJson<Record<string, unknown>>(block.partialArgs);
block.arguments = parseStreamingJson<Record<string, unknown>>(
block.partialArgs,
);
stream.push({
type: "toolcall_delta",
contentIndex: toolBlocksByKey.get(key)!,
@ -441,7 +503,9 @@ async function consumeChatStream(
const block = output.content[index];
if (block.type !== "toolCall") continue;
const toolBlock = block as ToolCall & { partialArgs?: string };
toolBlock.arguments = parseStreamingJson<Record<string, unknown>>(toolBlock.partialArgs);
toolBlock.arguments = parseStreamingJson<Record<string, unknown>>(
toolBlock.partialArgs,
);
delete toolBlock.partialArgs;
stream.push({
type: "toolcall_end",
@ -452,19 +516,61 @@ async function consumeChatStream(
}
}
function toFunctionTools(tools: Tool[]): Array<FunctionTool & { type: "function" }> {
export function sanitizeMistralToolParameters(
value: unknown,
): Record<string, unknown> {
const sanitized = sanitizeJsonSchemaValue(value);
if (isPlainRecord(sanitized)) return sanitized;
return { type: "object", properties: {} };
}
function sanitizeJsonSchemaValue(value: unknown): unknown {
if (value === null) return null;
if (Array.isArray(value)) {
return value
.map((item) => sanitizeJsonSchemaValue(item))
.filter((item) => item !== undefined);
}
if (isPlainRecord(value)) {
const result: Record<string, unknown> = {};
for (const [key, item] of Object.entries(value)) {
const sanitized = sanitizeJsonSchemaValue(item);
if (sanitized !== undefined) result[key] = sanitized;
}
return result;
}
if (
typeof value === "string" ||
typeof value === "number" ||
typeof value === "boolean"
) {
return value;
}
return undefined;
}
function isPlainRecord(value: unknown): value is Record<string, unknown> {
return typeof value === "object" && value !== null && !Array.isArray(value);
}
function toFunctionTools(
tools: Tool[],
): Array<FunctionTool & { type: "function" }> {
return tools.map((tool) => ({
type: "function",
function: {
name: tool.name,
description: tool.description,
parameters: tool.parameters as unknown as Record<string, unknown>,
parameters: sanitizeMistralToolParameters(tool.parameters),
strict: false,
},
}));
}
function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompletionStreamRequestMessage[] {
function toChatMessages(
messages: Message[],
supportsImages: boolean,
): ChatCompletionStreamRequestMessage[] {
const result: ChatCompletionStreamRequestMessage[] = [];
for (const msg of messages) {
@ -477,27 +583,41 @@ function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompl
const content: ContentChunk[] = msg.content
.filter((item) => item.type === "text" || supportsImages)
.map((item) => {
if (item.type === "text") return { type: "text", text: sanitizeSurrogates(item.text) };
return { type: "image_url", imageUrl: `data:${item.mimeType};base64,${item.data}` };
if (item.type === "text")
return { type: "text", text: sanitizeSurrogates(item.text) };
return {
type: "image_url",
imageUrl: `data:${item.mimeType};base64,${item.data}`,
};
});
if (content.length > 0) {
result.push({ role: "user", content });
continue;
}
if (hadImages && !supportsImages) {
result.push({ role: "user", content: "(image omitted: model does not support images)" });
result.push({
role: "user",
content: "(image omitted: model does not support images)",
});
}
continue;
}
if (msg.role === "assistant") {
const contentParts: ContentChunk[] = [];
const toolCalls: Array<{ id: string; type: "function"; function: { name: string; arguments: string } }> = [];
const toolCalls: Array<{
id: string;
type: "function";
function: { name: string; arguments: string };
}> = [];
for (const block of msg.content) {
if (block.type === "text") {
if (block.text.trim().length > 0) {
contentParts.push({ type: "text", text: sanitizeSurrogates(block.text) });
contentParts.push({
type: "text",
text: sanitizeSurrogates(block.text),
});
}
continue;
}
@ -505,7 +625,9 @@ function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompl
if (block.thinking.trim().length > 0) {
contentParts.push({
type: "thinking",
thinking: [{ type: "text", text: sanitizeSurrogates(block.thinking) }],
thinking: [
{ type: "text", text: sanitizeSurrogates(block.thinking) },
],
});
}
continue;
@ -516,24 +638,37 @@ function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompl
toolCalls.push({
id: block.id,
type: "function",
function: { name: block.name, arguments: JSON.stringify(block.arguments || {}) },
function: {
name: block.name,
arguments: JSON.stringify(block.arguments || {}),
},
});
}
const assistantMessage: ChatCompletionStreamRequestMessage = { role: "assistant" };
const assistantMessage: ChatCompletionStreamRequestMessage = {
role: "assistant",
};
if (contentParts.length > 0) assistantMessage.content = contentParts;
if (toolCalls.length > 0) assistantMessage.toolCalls = toolCalls;
if (contentParts.length > 0 || toolCalls.length > 0) result.push(assistantMessage);
if (contentParts.length > 0 || toolCalls.length > 0)
result.push(assistantMessage);
continue;
}
const toolContent: ContentChunk[] = [];
const textResult = msg.content
.filter((part) => part.type === "text")
.map((part) => (part.type === "text" ? sanitizeSurrogates(part.text) : ""))
.map((part) =>
part.type === "text" ? sanitizeSurrogates(part.text) : "",
)
.join("\n");
const hasImages = msg.content.some((part) => part.type === "image");
const toolText = buildToolResultText(textResult, hasImages, supportsImages, msg.isError);
const toolText = buildToolResultText(
textResult,
hasImages,
supportsImages,
msg.isError,
);
toolContent.push({ type: "text", text: toolText });
for (const part of msg.content) {
if (!supportsImages) continue;
@ -554,18 +689,28 @@ function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompl
return result;
}
function buildToolResultText(text: string, hasImages: boolean, supportsImages: boolean, isError: boolean): string {
function buildToolResultText(
text: string,
hasImages: boolean,
supportsImages: boolean,
isError: boolean,
): string {
const trimmed = text.trim();
const errorPrefix = isError ? "[tool error] " : "";
if (trimmed.length > 0) {
const imageSuffix = hasImages && !supportsImages ? "\n[tool image omitted: model does not support images]" : "";
const imageSuffix =
hasImages && !supportsImages
? "\n[tool image omitted: model does not support images]"
: "";
return `${errorPrefix}${trimmed}${imageSuffix}`;
}
if (hasImages) {
if (supportsImages) {
return isError ? "[tool error] (see attached image)" : "(see attached image)";
return isError
? "[tool error] (see attached image)"
: "(see attached image)";
}
return isError
? "[tool error] (image omitted: model does not support images)"
@ -577,9 +722,20 @@ function buildToolResultText(text: string, hasImages: boolean, supportsImages: b
function mapToolChoice(
choice: MistralOptions["toolChoice"],
): "auto" | "none" | "any" | "required" | { type: "function"; function: { name: string } } | undefined {
):
| "auto"
| "none"
| "any"
| "required"
| { type: "function"; function: { name: string } }
| undefined {
if (!choice) return undefined;
if (choice === "auto" || choice === "none" || choice === "any" || choice === "required") {
if (
choice === "auto" ||
choice === "none" ||
choice === "any" ||
choice === "required"
) {
return choice as any;
}
return {

View file

@ -1,188 +1,280 @@
// SF — Provider Capabilities Registry Tests (ADR-005 Phase 1)
import { describe, test } from 'vitest';
import assert from "node:assert/strict";
import assert from "node:assert/strict";
import { describe, test } from "vitest";
import {
PROVIDER_CAPABILITIES,
getProviderCapabilities,
getUnsupportedFeatures,
mergeCapabilityOverrides,
getRegisteredApis,
sanitizeMistralToolParameters,
shouldUseMistralReasoningPromptMode,
} from "./mistral.js";
import {
getProviderCapabilities,
getRegisteredApis,
getUnsupportedFeatures,
mergeCapabilityOverrides,
PROVIDER_CAPABILITIES,
} from "./provider-capabilities.js";
import { shouldUseMistralReasoningPromptMode } from "./mistral.js";
// ─── Registry Completeness ──────────────────────────────────────────────────
describe("PROVIDER_CAPABILITIES registry", () => {
const EXPECTED_APIS = [
"anthropic-messages",
"anthropic-vertex",
"openai-responses",
"azure-openai-responses",
"openai-codex-responses",
"openai-completions",
"google-generative-ai",
"google-gemini-cli",
"google-vertex",
"mistral-conversations",
"bedrock-converse-stream",
"ollama-chat",
];
const EXPECTED_APIS = [
"anthropic-messages",
"anthropic-vertex",
"openai-responses",
"azure-openai-responses",
"openai-codex-responses",
"openai-completions",
"google-generative-ai",
"google-gemini-cli",
"google-vertex",
"mistral-conversations",
"bedrock-converse-stream",
"ollama-chat",
];
test("covers all expected API providers", () => {
for (const api of EXPECTED_APIS) {
assert.ok(
PROVIDER_CAPABILITIES[api],
`Missing capability entry for API: ${api}`,
);
}
});
test("covers all expected API providers", () => {
for (const api of EXPECTED_APIS) {
assert.ok(
PROVIDER_CAPABILITIES[api],
`Missing capability entry for API: ${api}`,
);
}
});
test("getRegisteredApis returns all entries", () => {
const registered = getRegisteredApis();
for (const api of EXPECTED_APIS) {
assert.ok(registered.includes(api), `getRegisteredApis missing: ${api}`);
}
});
test("getRegisteredApis returns all entries", () => {
const registered = getRegisteredApis();
for (const api of EXPECTED_APIS) {
assert.ok(registered.includes(api), `getRegisteredApis missing: ${api}`);
}
});
test("all entries have required fields", () => {
for (const [api, caps] of Object.entries(PROVIDER_CAPABILITIES)) {
assert.equal(typeof caps.toolCalling, "boolean", `${api}.toolCalling`);
assert.equal(typeof caps.maxTools, "number", `${api}.maxTools`);
assert.equal(typeof caps.imageToolResults, "boolean", `${api}.imageToolResults`);
assert.equal(typeof caps.structuredOutput, "boolean", `${api}.structuredOutput`);
assert.ok(caps.toolCallIdFormat, `${api}.toolCallIdFormat`);
assert.equal(typeof caps.toolCallIdFormat.maxLength, "number", `${api}.toolCallIdFormat.maxLength`);
assert.ok(caps.toolCallIdFormat.allowedChars instanceof RegExp, `${api}.toolCallIdFormat.allowedChars`);
assert.ok(
["full", "text-only", "none"].includes(caps.thinkingPersistence),
`${api}.thinkingPersistence is "${caps.thinkingPersistence}"`,
);
assert.ok(Array.isArray(caps.unsupportedSchemaFeatures), `${api}.unsupportedSchemaFeatures`);
}
});
test("all entries have required fields", () => {
for (const [api, caps] of Object.entries(PROVIDER_CAPABILITIES)) {
assert.equal(typeof caps.toolCalling, "boolean", `${api}.toolCalling`);
assert.equal(typeof caps.maxTools, "number", `${api}.maxTools`);
assert.equal(
typeof caps.imageToolResults,
"boolean",
`${api}.imageToolResults`,
);
assert.equal(
typeof caps.structuredOutput,
"boolean",
`${api}.structuredOutput`,
);
assert.ok(caps.toolCallIdFormat, `${api}.toolCallIdFormat`);
assert.equal(
typeof caps.toolCallIdFormat.maxLength,
"number",
`${api}.toolCallIdFormat.maxLength`,
);
assert.ok(
caps.toolCallIdFormat.allowedChars instanceof RegExp,
`${api}.toolCallIdFormat.allowedChars`,
);
assert.ok(
["full", "text-only", "none"].includes(caps.thinkingPersistence),
`${api}.thinkingPersistence is "${caps.thinkingPersistence}"`,
);
assert.ok(
Array.isArray(caps.unsupportedSchemaFeatures),
`${api}.unsupportedSchemaFeatures`,
);
}
});
});
// ─── Provider-specific Values ───────────────────────────────────────────────
describe("provider-specific capabilities", () => {
test("Anthropic supports full thinking persistence", () => {
assert.equal(PROVIDER_CAPABILITIES["anthropic-messages"].thinkingPersistence, "full");
});
test("Anthropic supports full thinking persistence", () => {
assert.equal(
PROVIDER_CAPABILITIES["anthropic-messages"].thinkingPersistence,
"full",
);
});
test("Anthropic supports image tool results", () => {
assert.equal(PROVIDER_CAPABILITIES["anthropic-messages"].imageToolResults, true);
});
test("Anthropic supports image tool results", () => {
assert.equal(
PROVIDER_CAPABILITIES["anthropic-messages"].imageToolResults,
true,
);
});
test("Anthropic tool call ID is 64 chars max", () => {
assert.equal(PROVIDER_CAPABILITIES["anthropic-messages"].toolCallIdFormat.maxLength, 64);
});
test("Anthropic tool call ID is 64 chars max", () => {
assert.equal(
PROVIDER_CAPABILITIES["anthropic-messages"].toolCallIdFormat.maxLength,
64,
);
});
test("Mistral tool call ID is 9 chars max", () => {
assert.equal(PROVIDER_CAPABILITIES["mistral-conversations"].toolCallIdFormat.maxLength, 9);
});
test("Mistral tool call ID is 9 chars max", () => {
assert.equal(
PROVIDER_CAPABILITIES["mistral-conversations"].toolCallIdFormat.maxLength,
9,
);
});
test("Mistral has no thinking persistence", () => {
assert.equal(PROVIDER_CAPABILITIES["mistral-conversations"].thinkingPersistence, "none");
});
test("Mistral has no thinking persistence", () => {
assert.equal(
PROVIDER_CAPABILITIES["mistral-conversations"].thinkingPersistence,
"none",
);
});
test("Mistral reasoning prompt mode is limited to Magistral models", () => {
const baseModel = {
id: "mistral-small-latest",
reasoning: true,
} as any;
test("Mistral reasoning prompt mode is limited to Magistral models", () => {
const baseModel = {
id: "mistral-small-latest",
reasoning: true,
} as any;
assert.equal(shouldUseMistralReasoningPromptMode(baseModel, "medium"), false);
assert.equal(
shouldUseMistralReasoningPromptMode({ ...baseModel, id: "magistral-medium-latest" }, "medium"),
true,
);
});
assert.equal(
shouldUseMistralReasoningPromptMode(baseModel, "medium"),
false,
);
assert.equal(
shouldUseMistralReasoningPromptMode(
{ ...baseModel, id: "magistral-medium-latest" },
"medium",
),
true,
);
});
test("Google does not support patternProperties", () => {
assert.ok(
PROVIDER_CAPABILITIES["google-generative-ai"].unsupportedSchemaFeatures.includes("patternProperties"),
);
});
test("Mistral tool schema drops TypeBox symbol metadata", () => {
const kind = Symbol("TypeBox.Kind");
const schema = {
type: "object",
required: ["path"],
properties: {
path: {
type: "string",
[kind]: "String",
},
},
[kind]: "Object",
};
test("Google does not support const", () => {
assert.ok(
PROVIDER_CAPABILITIES["google-generative-ai"].unsupportedSchemaFeatures.includes("const"),
);
});
const sanitized = sanitizeMistralToolParameters(schema);
test("OpenAI Responses does not support image tool results", () => {
assert.equal(PROVIDER_CAPABILITIES["openai-responses"].imageToolResults, false);
});
assert.deepEqual(Object.getOwnPropertySymbols(sanitized), []);
assert.deepEqual(
Object.getOwnPropertySymbols((sanitized.properties as any).path),
[],
);
assert.deepEqual(sanitized, {
type: "object",
required: ["path"],
properties: {
path: {
type: "string",
},
},
});
});
test("OpenAI Responses has text-only thinking persistence", () => {
assert.equal(PROVIDER_CAPABILITIES["openai-responses"].thinkingPersistence, "text-only");
});
test("Google does not support patternProperties", () => {
assert.ok(
PROVIDER_CAPABILITIES[
"google-generative-ai"
].unsupportedSchemaFeatures.includes("patternProperties"),
);
});
test("Google does not support const", () => {
assert.ok(
PROVIDER_CAPABILITIES[
"google-generative-ai"
].unsupportedSchemaFeatures.includes("const"),
);
});
test("OpenAI Responses does not support image tool results", () => {
assert.equal(
PROVIDER_CAPABILITIES["openai-responses"].imageToolResults,
false,
);
});
test("OpenAI Responses has text-only thinking persistence", () => {
assert.equal(
PROVIDER_CAPABILITIES["openai-responses"].thinkingPersistence,
"text-only",
);
});
});
// ─── getProviderCapabilities ────────────────────────────────────────────────
describe("getProviderCapabilities", () => {
test("returns known provider capabilities", () => {
const caps = getProviderCapabilities("anthropic-messages");
assert.equal(caps.toolCalling, true);
assert.equal(caps.thinkingPersistence, "full");
});
test("returns known provider capabilities", () => {
const caps = getProviderCapabilities("anthropic-messages");
assert.equal(caps.toolCalling, true);
assert.equal(caps.thinkingPersistence, "full");
});
test("returns permissive defaults for unknown providers", () => {
const caps = getProviderCapabilities("unknown-provider-xyz");
assert.equal(caps.toolCalling, true);
assert.equal(caps.imageToolResults, true);
assert.deepEqual(caps.unsupportedSchemaFeatures, []);
});
test("returns permissive defaults for unknown providers", () => {
const caps = getProviderCapabilities("unknown-provider-xyz");
assert.equal(caps.toolCalling, true);
assert.equal(caps.imageToolResults, true);
assert.deepEqual(caps.unsupportedSchemaFeatures, []);
});
});
// ─── getUnsupportedFeatures ─────────────────────────────────────────────────
describe("getUnsupportedFeatures", () => {
test("returns unsupported features for Google", () => {
const unsupported = getUnsupportedFeatures("google-generative-ai", ["patternProperties", "const"]);
assert.deepEqual(unsupported, ["patternProperties", "const"]);
});
test("returns unsupported features for Google", () => {
const unsupported = getUnsupportedFeatures("google-generative-ai", [
"patternProperties",
"const",
]);
assert.deepEqual(unsupported, ["patternProperties", "const"]);
});
test("returns empty for Anthropic with any features", () => {
const unsupported = getUnsupportedFeatures("anthropic-messages", ["patternProperties", "const"]);
assert.deepEqual(unsupported, []);
});
test("returns empty for Anthropic with any features", () => {
const unsupported = getUnsupportedFeatures("anthropic-messages", [
"patternProperties",
"const",
]);
assert.deepEqual(unsupported, []);
});
test("returns empty for unknown provider", () => {
const unsupported = getUnsupportedFeatures("unknown-xyz", ["patternProperties"]);
assert.deepEqual(unsupported, []);
});
test("returns empty for unknown provider", () => {
const unsupported = getUnsupportedFeatures("unknown-xyz", [
"patternProperties",
]);
assert.deepEqual(unsupported, []);
});
});
// ─── mergeCapabilityOverrides ───────────────────────────────────────────────
describe("mergeCapabilityOverrides", () => {
test("overrides individual fields", () => {
const merged = mergeCapabilityOverrides("openai-responses", {
imageToolResults: true,
});
assert.equal(merged.imageToolResults, true);
// Non-overridden fields preserved
assert.equal(merged.toolCalling, true);
assert.equal(merged.thinkingPersistence, "text-only");
});
test("overrides individual fields", () => {
const merged = mergeCapabilityOverrides("openai-responses", {
imageToolResults: true,
});
assert.equal(merged.imageToolResults, true);
// Non-overridden fields preserved
assert.equal(merged.toolCalling, true);
assert.equal(merged.thinkingPersistence, "text-only");
});
test("deep-merges toolCallIdFormat", () => {
const merged = mergeCapabilityOverrides("anthropic-messages", {
toolCallIdFormat: { maxLength: 128 },
});
assert.equal(merged.toolCallIdFormat.maxLength, 128);
// allowedChars preserved from base
assert.ok(merged.toolCallIdFormat.allowedChars instanceof RegExp);
});
test("deep-merges toolCallIdFormat", () => {
const merged = mergeCapabilityOverrides("anthropic-messages", {
toolCallIdFormat: { maxLength: 128 },
});
assert.equal(merged.toolCallIdFormat.maxLength, 128);
// allowedChars preserved from base
assert.ok(merged.toolCallIdFormat.allowedChars instanceof RegExp);
});
test("uses permissive defaults for unknown provider", () => {
const merged = mergeCapabilityOverrides("unknown-xyz", {
imageToolResults: false,
});
assert.equal(merged.imageToolResults, false);
assert.equal(merged.toolCalling, true); // from default
});
test("uses permissive defaults for unknown provider", () => {
const merged = mergeCapabilityOverrides("unknown-xyz", {
imageToolResults: false,
});
assert.equal(merged.imageToolResults, false);
assert.equal(merged.toolCalling, true); // from default
});
});

File diff suppressed because it is too large Load diff

View file

@ -1,10 +1,96 @@
import { existsSync, readdirSync, rmSync, statSync } from "node:fs";
import { existsSync, readdirSync, renameSync, rmSync, statSync } from "node:fs";
import { join } from "node:path";
import { milestonesDir, resolveMilestoneFile } from "./paths.js";
import { _getAdapter, getAllMilestones, isDbAvailable } from "./sf-db.js";
import { deriveState } from "./state.js";
import { readEvents } from "./workflow-events.js";
import { renderAllProjections } from "./workflow-projections.js";
const LEGACY_MILESTONE_DIR_RE = /^(M\d+)-.+$/;
const LEGACY_SLICE_DIR_RE = /^(S\d+)-.+$/;
function legacyBareId(name, pattern) {
const match = name.match(pattern);
return match?.[1] ?? null;
}
function normalizeLegacyDirectory(
parentDir,
entry,
pattern,
unitPrefix,
issues,
fixesApplied,
shouldFix,
) {
const bareId = legacyBareId(entry, pattern);
if (!bareId) return;
const sourcePath = join(parentDir, entry);
const targetPath = join(parentDir, bareId);
const conflict = existsSync(targetPath);
issues.push({
severity: conflict ? "warning" : "info",
code: "legacy_plan_slug_directory",
scope: "project",
unitId: `${unitPrefix}/${entry}`,
message: conflict
? `Legacy plan directory ${sourcePath} should be renamed to ${targetPath}, but the target already exists. Merge manually before running doctor fix.`
: `Legacy plan directory ${sourcePath} should be renamed to bare ID directory ${targetPath}.`,
file: sourcePath,
fixable: !conflict,
});
if (conflict || !shouldFix?.("legacy_plan_slug_directory")) return;
try {
renameSync(sourcePath, targetPath);
fixesApplied.push(
`renamed legacy plan directory ${sourcePath} -> ${targetPath}`,
);
} catch {
fixesApplied.push(
`failed to rename legacy plan directory ${sourcePath} -> ${targetPath}`,
);
}
}
export function normalizeLegacyPlanSlugDirectories(
basePath,
issues,
fixesApplied,
shouldFix,
) {
const msDir = milestonesDir(basePath);
if (!existsSync(msDir)) return;
for (const milestoneEntry of readdirSync(msDir, { withFileTypes: true })) {
if (!milestoneEntry.isDirectory()) continue;
const milestonePath = join(msDir, milestoneEntry.name);
const slicesPath = join(milestonePath, "slices");
if (existsSync(slicesPath)) {
for (const sliceEntry of readdirSync(slicesPath, {
withFileTypes: true,
})) {
if (!sliceEntry.isDirectory()) continue;
normalizeLegacyDirectory(
slicesPath,
sliceEntry.name,
LEGACY_SLICE_DIR_RE,
milestoneEntry.name,
issues,
fixesApplied,
shouldFix,
);
}
}
normalizeLegacyDirectory(
msDir,
milestoneEntry.name,
LEGACY_MILESTONE_DIR_RE,
"milestone",
issues,
fixesApplied,
shouldFix,
);
}
}
/**
* Check SF engine health: database constraints, projection drift, and corruption.
*
@ -12,237 +98,258 @@ import { renderAllProjections } from "./workflow-projections.js";
* Re-renders stale markdown projections when event log is newer than cached files.
* Non-fatal: issues are reported but never auto-fixed.
*/
export async function checkEngineHealth(basePath, issues, fixesApplied, shouldFix) {
const dbPath = join(basePath, ".sf", "sf.db");
if (!isDbAvailable() && existsSync(dbPath)) {
issues.push({
severity: "warning",
code: "db_unavailable",
scope: "project",
unitId: "project",
message: "Database unavailable — using filesystem state derivation (degraded mode). State queries may be slower and less reliable.",
file: ".sf/sf.db",
fixable: false,
});
}
// ── DB constraint violation detection (full doctor only, not pre-dispatch per D-10) ──
try {
if (isDbAvailable()) {
const adapter = _getAdapter();
// a. Orphaned tasks (task.slice_id points to non-existent slice)
try {
const orphanedTasks = adapter
.prepare(`SELECT t.id, t.slice_id, t.milestone_id
export async function checkEngineHealth(
basePath,
issues,
fixesApplied,
shouldFix,
) {
const dbPath = join(basePath, ".sf", "sf.db");
if (!isDbAvailable() && existsSync(dbPath)) {
issues.push({
severity: "warning",
code: "db_unavailable",
scope: "project",
unitId: "project",
message:
"Database unavailable — using filesystem state derivation (degraded mode). State queries may be slower and less reliable.",
file: ".sf/sf.db",
fixable: false,
});
}
try {
normalizeLegacyPlanSlugDirectories(
basePath,
issues,
fixesApplied,
shouldFix,
);
} catch {
// Non-fatal — legacy directory normalization must never block doctor.
}
// ── DB constraint violation detection (full doctor only, not pre-dispatch per D-10) ──
try {
if (isDbAvailable()) {
const adapter = _getAdapter();
// a. Orphaned tasks (task.slice_id points to non-existent slice)
try {
const orphanedTasks = adapter
.prepare(`SELECT t.id, t.slice_id, t.milestone_id
FROM tasks t
LEFT JOIN slices s ON t.milestone_id = s.milestone_id AND t.slice_id = s.id
WHERE s.id IS NULL`)
.all();
for (const row of orphanedTasks) {
issues.push({
severity: "error",
code: "db_orphaned_task",
scope: "task",
unitId: `${row.milestone_id}/${row.slice_id}/${row.id}`,
message: `Task ${row.id} references slice ${row.slice_id} in milestone ${row.milestone_id} but no such slice exists in the database`,
fixable: false,
});
}
}
catch {
// Non-fatal — orphaned task check failed
}
// b. Orphaned slices (slice.milestone_id points to non-existent milestone)
try {
const orphanedSlices = adapter
.prepare(`SELECT s.id, s.milestone_id
.all();
for (const row of orphanedTasks) {
issues.push({
severity: "error",
code: "db_orphaned_task",
scope: "task",
unitId: `${row.milestone_id}/${row.slice_id}/${row.id}`,
message: `Task ${row.id} references slice ${row.slice_id} in milestone ${row.milestone_id} but no such slice exists in the database`,
fixable: false,
});
}
} catch {
// Non-fatal — orphaned task check failed
}
// b. Orphaned slices (slice.milestone_id points to non-existent milestone)
try {
const orphanedSlices = adapter
.prepare(`SELECT s.id, s.milestone_id
FROM slices s
LEFT JOIN milestones m ON s.milestone_id = m.id
WHERE m.id IS NULL`)
.all();
for (const row of orphanedSlices) {
issues.push({
severity: "error",
code: "db_orphaned_slice",
scope: "slice",
unitId: `${row.milestone_id}/${row.id}`,
message: `Slice ${row.id} references milestone ${row.milestone_id} but no such milestone exists in the database`,
fixable: false,
});
}
}
catch {
// Non-fatal — orphaned slice check failed
}
// c. Tasks marked complete without summaries
try {
const doneTasks = adapter
.prepare(`SELECT id, slice_id, milestone_id FROM tasks
.all();
for (const row of orphanedSlices) {
issues.push({
severity: "error",
code: "db_orphaned_slice",
scope: "slice",
unitId: `${row.milestone_id}/${row.id}`,
message: `Slice ${row.id} references milestone ${row.milestone_id} but no such milestone exists in the database`,
fixable: false,
});
}
} catch {
// Non-fatal — orphaned slice check failed
}
// c. Tasks marked complete without summaries
try {
const doneTasks = adapter
.prepare(`SELECT id, slice_id, milestone_id FROM tasks
WHERE status = 'done' AND (summary IS NULL OR summary = '')`)
.all();
for (const row of doneTasks) {
issues.push({
severity: "warning",
code: "db_done_task_no_summary",
scope: "task",
unitId: `${row.milestone_id}/${row.slice_id}/${row.id}`,
message: `Task ${row.id} is marked done but has no summary in the database`,
fixable: false,
});
}
}
catch {
// Non-fatal — done-task-no-summary check failed
}
// d. Duplicate entity IDs (safety check)
try {
const dupMilestones = adapter
.prepare("SELECT id, COUNT(*) as cnt FROM milestones GROUP BY id HAVING cnt > 1")
.all();
for (const row of dupMilestones) {
issues.push({
severity: "error",
code: "db_duplicate_id",
scope: "milestone",
unitId: row.id,
message: `Duplicate milestone ID "${row.id}" appears ${row.cnt} times in the database`,
fixable: false,
});
}
const dupSlices = adapter
.prepare("SELECT id, milestone_id, COUNT(*) as cnt FROM slices GROUP BY id, milestone_id HAVING cnt > 1")
.all();
for (const row of dupSlices) {
issues.push({
severity: "error",
code: "db_duplicate_id",
scope: "slice",
unitId: `${row.milestone_id}/${row.id}`,
message: `Duplicate slice ID "${row.id}" in milestone ${row.milestone_id} appears ${row.cnt} times`,
fixable: false,
});
}
const dupTasks = adapter
.prepare("SELECT id, slice_id, milestone_id, COUNT(*) as cnt FROM tasks GROUP BY id, slice_id, milestone_id HAVING cnt > 1")
.all();
for (const row of dupTasks) {
issues.push({
severity: "error",
code: "db_duplicate_id",
scope: "task",
unitId: `${row.milestone_id}/${row.slice_id}/${row.id}`,
message: `Duplicate task ID "${row.id}" in slice ${row.slice_id} appears ${row.cnt} times`,
fixable: false,
});
}
}
catch {
// Non-fatal — duplicate ID check failed
}
}
}
catch {
// Non-fatal — DB constraint checks failed entirely
}
// ── Orphaned milestone directories ─────────────────────────────────────
// Detect .sf/milestones/* directories that have no corresponding DB row.
// These are leftover from manual cleanup, failed deletions, or DB resets.
// When DB is available, DB is authoritative. When DB is unavailable,
// fall back to filesystem-derived registry (roadmap-based discovery).
try {
const msDir = milestonesDir(basePath);
if (existsSync(msDir)) {
const validMilestoneIds = new Set();
if (isDbAvailable()) {
// DB-authoritative: only DB rows count as valid
for (const m of getAllMilestones()) {
validMilestoneIds.add(m.id);
}
}
else {
// No DB: fall back to filesystem registry
const state = await deriveState(basePath);
for (const m of state.registry) {
validMilestoneIds.add(m.id);
}
}
for (const entry of readdirSync(msDir)) {
const fullPath = join(msDir, entry);
try {
if (!statSync(fullPath).isDirectory())
continue;
}
catch {
continue;
}
// Extract milestone ID from directory name (handles M001, M001-r5jzab, etc.)
const milestoneId = entry.split("-")[0];
if (!milestoneId)
continue;
if (!validMilestoneIds.has(milestoneId) &&
!validMilestoneIds.has(entry)) {
issues.push({
severity: "warning",
code: "orphaned_milestone_directory",
scope: "project",
unitId: entry,
message: `Milestone directory ${fullPath} exists on disk but has no corresponding database entry or roadmap. It may be leftover from manual cleanup or a DB reset.`,
fixable: true,
});
if (shouldFix?.("orphaned_milestone_directory")) {
try {
rmSync(fullPath, { recursive: true, force: true });
fixesApplied.push(`removed orphaned milestone directory ${fullPath}`);
}
catch {
fixesApplied.push(`failed to remove orphaned milestone directory ${fullPath}`);
}
}
}
}
}
}
catch {
// Non-fatal — orphaned milestone directory check failed
}
// ── Projection drift detection ──────────────────────────────────────────
// If the DB is available, check whether markdown projections are stale
// relative to the event log and re-render them.
try {
if (isDbAvailable()) {
const eventLogPath = join(basePath, ".sf", "event-log.jsonl");
const events = readEvents(eventLogPath);
if (events.length > 0) {
const lastEventTs = new Date(events[events.length - 1].ts).getTime();
const state = await deriveState(basePath);
for (const milestone of state.registry) {
if (milestone.status === "complete")
continue;
const roadmapPath = resolveMilestoneFile(basePath, milestone.id, "ROADMAP");
if (!roadmapPath || !existsSync(roadmapPath)) {
try {
await renderAllProjections(basePath, milestone.id);
fixesApplied.push(`re-rendered missing projections for ${milestone.id}`);
}
catch {
// Non-fatal — projection re-render failed
}
continue;
}
const projectionMtime = statSync(roadmapPath).mtimeMs;
if (lastEventTs > projectionMtime) {
try {
await renderAllProjections(basePath, milestone.id);
fixesApplied.push(`re-rendered stale projections for ${milestone.id}`);
}
catch {
// Non-fatal — projection re-render failed
}
}
}
}
}
}
catch {
// Non-fatal — projection drift check must never block doctor
}
.all();
for (const row of doneTasks) {
issues.push({
severity: "warning",
code: "db_done_task_no_summary",
scope: "task",
unitId: `${row.milestone_id}/${row.slice_id}/${row.id}`,
message: `Task ${row.id} is marked done but has no summary in the database`,
fixable: false,
});
}
} catch {
// Non-fatal — done-task-no-summary check failed
}
// d. Duplicate entity IDs (safety check)
try {
const dupMilestones = adapter
.prepare(
"SELECT id, COUNT(*) as cnt FROM milestones GROUP BY id HAVING cnt > 1",
)
.all();
for (const row of dupMilestones) {
issues.push({
severity: "error",
code: "db_duplicate_id",
scope: "milestone",
unitId: row.id,
message: `Duplicate milestone ID "${row.id}" appears ${row.cnt} times in the database`,
fixable: false,
});
}
const dupSlices = adapter
.prepare(
"SELECT id, milestone_id, COUNT(*) as cnt FROM slices GROUP BY id, milestone_id HAVING cnt > 1",
)
.all();
for (const row of dupSlices) {
issues.push({
severity: "error",
code: "db_duplicate_id",
scope: "slice",
unitId: `${row.milestone_id}/${row.id}`,
message: `Duplicate slice ID "${row.id}" in milestone ${row.milestone_id} appears ${row.cnt} times`,
fixable: false,
});
}
const dupTasks = adapter
.prepare(
"SELECT id, slice_id, milestone_id, COUNT(*) as cnt FROM tasks GROUP BY id, slice_id, milestone_id HAVING cnt > 1",
)
.all();
for (const row of dupTasks) {
issues.push({
severity: "error",
code: "db_duplicate_id",
scope: "task",
unitId: `${row.milestone_id}/${row.slice_id}/${row.id}`,
message: `Duplicate task ID "${row.id}" in slice ${row.slice_id} appears ${row.cnt} times`,
fixable: false,
});
}
} catch {
// Non-fatal — duplicate ID check failed
}
}
} catch {
// Non-fatal — DB constraint checks failed entirely
}
// ── Orphaned milestone directories ─────────────────────────────────────
// Detect .sf/milestones/* directories that have no corresponding DB row.
// These are leftover from manual cleanup, failed deletions, or DB resets.
// When DB is available, DB is authoritative. When DB is unavailable,
// fall back to filesystem-derived registry (roadmap-based discovery).
try {
const msDir = milestonesDir(basePath);
if (existsSync(msDir)) {
const validMilestoneIds = new Set();
if (isDbAvailable()) {
// DB-authoritative: only DB rows count as valid
for (const m of getAllMilestones()) {
validMilestoneIds.add(m.id);
}
} else {
// No DB: fall back to filesystem registry
const state = await deriveState(basePath);
for (const m of state.registry) {
validMilestoneIds.add(m.id);
}
}
for (const entry of readdirSync(msDir)) {
const fullPath = join(msDir, entry);
try {
if (!statSync(fullPath).isDirectory()) continue;
} catch {
continue;
}
// Extract milestone ID from directory name (handles M001, M001-r5jzab, etc.)
const milestoneId = entry.split("-")[0];
if (!milestoneId) continue;
if (
!validMilestoneIds.has(milestoneId) &&
!validMilestoneIds.has(entry)
) {
issues.push({
severity: "warning",
code: "orphaned_milestone_directory",
scope: "project",
unitId: entry,
message: `Milestone directory ${fullPath} exists on disk but has no corresponding database entry or roadmap. It may be leftover from manual cleanup or a DB reset.`,
fixable: true,
});
if (shouldFix?.("orphaned_milestone_directory")) {
try {
rmSync(fullPath, { recursive: true, force: true });
fixesApplied.push(
`removed orphaned milestone directory ${fullPath}`,
);
} catch {
fixesApplied.push(
`failed to remove orphaned milestone directory ${fullPath}`,
);
}
}
}
}
}
} catch {
// Non-fatal — orphaned milestone directory check failed
}
// ── Projection drift detection ──────────────────────────────────────────
// If the DB is available, check whether markdown projections are stale
// relative to the event log and re-render them.
try {
if (isDbAvailable()) {
const eventLogPath = join(basePath, ".sf", "event-log.jsonl");
const events = readEvents(eventLogPath);
if (events.length > 0) {
const lastEventTs = new Date(events[events.length - 1].ts).getTime();
const state = await deriveState(basePath);
for (const milestone of state.registry) {
if (milestone.status === "complete") continue;
const roadmapPath = resolveMilestoneFile(
basePath,
milestone.id,
"ROADMAP",
);
if (!roadmapPath || !existsSync(roadmapPath)) {
try {
await renderAllProjections(basePath, milestone.id);
fixesApplied.push(
`re-rendered missing projections for ${milestone.id}`,
);
} catch {
// Non-fatal — projection re-render failed
}
continue;
}
const projectionMtime = statSync(roadmapPath).mtimeMs;
if (lastEventTs > projectionMtime) {
try {
await renderAllProjections(basePath, milestone.id);
fixesApplied.push(
`re-rendered stale projections for ${milestone.id}`,
);
} catch {
// Non-fatal — projection re-render failed
}
}
}
}
}
} catch {
// Non-fatal — projection drift check must never block doctor
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -15,15 +15,31 @@
*/
import { existsSync, rmSync } from "node:fs";
import { basename, dirname, join } from "node:path";
import { clearLock, isLockProcessAlive, readCrashLock, } from "./crash-recovery.js";
import {
clearLock,
isLockProcessAlive,
readCrashLock,
} from "./crash-recovery.js";
import { rebuildState } from "./doctor.js";
import { runEnvironmentChecks } from "./doctor-environment.js";
import { abortAndReset } from "./git-self-heal.js";
import { resolveMilestoneIntegrationBranch } from "./git-service.js";
import { nativeAddTracked, nativeCommit, nativeGetCurrentBranch, nativeHasChanges, nativeIsRepo, nativeLastCommitEpoch, } from "./native-git-bridge.js";
import {
nativeAddTracked,
nativeCommit,
nativeGetCurrentBranch,
nativeHasChanges,
nativeIsRepo,
nativeLastCommitEpoch,
} from "./native-git-bridge.js";
import { resolveSfRootFile, sfRoot } from "./paths.js";
import { loadEffectiveSFPreferences } from "./preferences.js";
import {
formatProtectedSnapshotDeletionMessage,
listProtectedSnapshotDeletions,
} from "./snapshot-safety.js";
import { deriveState } from "./state.js";
/** In-memory health history for the current auto-mode session. */
let healthHistory = [];
/** Count of consecutive units with unresolved errors. */
@ -39,111 +55,119 @@ let onLevelChange = null;
* Called once when auto-mode starts. Pass null to unregister.
*/
export function setLevelChangeCallback(cb) {
onLevelChange = cb;
previousProgressLevel = "green";
onLevelChange = cb;
previousProgressLevel = "green";
}
/**
* Record a health snapshot after a doctor run.
* Called from the post-unit hook in auto-post-unit.ts.
*/
export function recordHealthSnapshot(errors, warnings, fixesApplied, issues, fixes, scope) {
healthUnitIndex++;
healthHistory.push({
timestamp: Date.now(),
errors,
warnings,
fixesApplied,
unitIndex: healthUnitIndex,
issues: issues ?? [],
fixes: fixes ?? [],
scope,
});
// Keep only the last 50 snapshots to bound memory
if (healthHistory.length > 50) {
healthHistory = healthHistory.slice(-50);
}
if (errors > 0) {
consecutiveErrorUnits++;
}
else {
consecutiveErrorUnits = 0;
}
// Detect progress level transitions and notify
if (onLevelChange) {
const newLevel = consecutiveErrorUnits >= 3
? "red"
: consecutiveErrorUnits >= 1 || getHealthTrend() === "degrading"
? "yellow"
: "green";
if (newLevel !== previousProgressLevel) {
const topIssue = (issues ?? []).find((i) => i.severity === "error") ?? (issues ?? [])[0];
const detail = topIssue ? `: ${topIssue.message}` : "";
onLevelChange(previousProgressLevel, newLevel, `Health ${previousProgressLevel}${newLevel}${detail}`);
previousProgressLevel = newLevel;
}
}
export function recordHealthSnapshot(
errors,
warnings,
fixesApplied,
issues,
fixes,
scope,
) {
healthUnitIndex++;
healthHistory.push({
timestamp: Date.now(),
errors,
warnings,
fixesApplied,
unitIndex: healthUnitIndex,
issues: issues ?? [],
fixes: fixes ?? [],
scope,
});
// Keep only the last 50 snapshots to bound memory
if (healthHistory.length > 50) {
healthHistory = healthHistory.slice(-50);
}
if (errors > 0) {
consecutiveErrorUnits++;
} else {
consecutiveErrorUnits = 0;
}
// Detect progress level transitions and notify
if (onLevelChange) {
const newLevel =
consecutiveErrorUnits >= 3
? "red"
: consecutiveErrorUnits >= 1 || getHealthTrend() === "degrading"
? "yellow"
: "green";
if (newLevel !== previousProgressLevel) {
const topIssue =
(issues ?? []).find((i) => i.severity === "error") ?? (issues ?? [])[0];
const detail = topIssue ? `: ${topIssue.message}` : "";
onLevelChange(
previousProgressLevel,
newLevel,
`Health ${previousProgressLevel}${newLevel}${detail}`,
);
previousProgressLevel = newLevel;
}
}
}
/**
* Get the current health trend.
* Returns "improving", "stable", "degrading", or "unknown" (not enough data).
*/
export function getHealthTrend() {
if (healthHistory.length < 3)
return "unknown";
const recent = healthHistory.slice(-5);
const older = healthHistory.slice(-10, -5);
if (older.length === 0)
return "unknown";
const recentAvg = recent.reduce((sum, s) => sum + s.errors + s.warnings, 0) / recent.length;
const olderAvg = older.reduce((sum, s) => sum + s.errors + s.warnings, 0) / older.length;
const delta = recentAvg - olderAvg;
if (delta > 1)
return "degrading";
if (delta < -1)
return "improving";
return "stable";
if (healthHistory.length < 3) return "unknown";
const recent = healthHistory.slice(-5);
const older = healthHistory.slice(-10, -5);
if (older.length === 0) return "unknown";
const recentAvg =
recent.reduce((sum, s) => sum + s.errors + s.warnings, 0) / recent.length;
const olderAvg =
older.reduce((sum, s) => sum + s.errors + s.warnings, 0) / older.length;
const delta = recentAvg - olderAvg;
if (delta > 1) return "degrading";
if (delta < -1) return "improving";
return "stable";
}
/**
* Get the number of consecutive units with unresolved errors.
*/
export function getConsecutiveErrorUnits() {
return consecutiveErrorUnits;
return consecutiveErrorUnits;
}
/**
* Get health history for display (e.g., dashboard overlay).
*/
export function getHealthHistory() {
return healthHistory;
return healthHistory;
}
/**
* Get the latest health issues from the most recent snapshot.
* Returns issues from the last snapshot that had any, for real-time visibility.
*/
export function getLatestHealthIssues() {
for (let i = healthHistory.length - 1; i >= 0; i--) {
if (healthHistory[i].issues.length > 0)
return healthHistory[i].issues;
}
return [];
for (let i = healthHistory.length - 1; i >= 0; i--) {
if (healthHistory[i].issues.length > 0) return healthHistory[i].issues;
}
return [];
}
/**
* Get the latest fixes applied from the most recent snapshot.
*/
export function getLatestHealthFixes() {
for (let i = healthHistory.length - 1; i >= 0; i--) {
if (healthHistory[i].fixes.length > 0)
return healthHistory[i].fixes;
}
return [];
for (let i = healthHistory.length - 1; i >= 0; i--) {
if (healthHistory[i].fixes.length > 0) return healthHistory[i].fixes;
}
return [];
}
/**
* Reset health tracking state. Called on auto-mode start/stop.
*/
export function resetHealthTracking() {
healthHistory = [];
consecutiveErrorUnits = 0;
healthUnitIndex = 0;
previousProgressLevel = "green";
healthHistory = [];
consecutiveErrorUnits = 0;
healthUnitIndex = 0;
previousProgressLevel = "green";
}
/**
* Clear stale auto runtime locks before startup decides whether to resume.
@ -155,33 +179,31 @@ export function resetHealthTracking() {
* paused-session state.
*/
export function healAutoStartupRuntime(basePath) {
const fixesApplied = [];
try {
const lock = readCrashLock(basePath);
if (lock && !isLockProcessAlive(lock)) {
clearLock(basePath);
fixesApplied.push("cleared stale auto.lock before auto startup");
}
}
catch {
// Non-fatal.
}
try {
const root = sfRoot(basePath);
const lockDir = join(dirname(root), `${basename(root)}.lock`);
if (existsSync(lockDir)) {
const lock = readCrashLock(basePath);
const lockHolderAlive = lock ? isLockProcessAlive(lock) : false;
if (!lockHolderAlive) {
rmSync(lockDir, { recursive: true, force: true });
fixesApplied.push("removed stranded session lock directory");
}
}
}
catch {
// Non-fatal.
}
return fixesApplied;
const fixesApplied = [];
try {
const lock = readCrashLock(basePath);
if (lock && !isLockProcessAlive(lock)) {
clearLock(basePath);
fixesApplied.push("cleared stale auto.lock before auto startup");
}
} catch {
// Non-fatal.
}
try {
const root = sfRoot(basePath);
const lockDir = join(dirname(root), `${basename(root)}.lock`);
if (existsSync(lockDir)) {
const lock = readCrashLock(basePath);
const lockHolderAlive = lock ? isLockProcessAlive(lock) : false;
if (!lockHolderAlive) {
rmSync(lockDir, { recursive: true, force: true });
fixesApplied.push("removed stranded session lock directory");
}
}
} catch {
// Non-fatal.
}
return fixesApplied;
}
/**
* Lightweight pre-dispatch health check. Runs fast checks that should
@ -193,142 +215,164 @@ export function healAutoStartupRuntime(basePath) {
* Returns { proceed: true } if dispatch should continue.
*/
export async function preDispatchHealthGate(basePath) {
const issues = [];
const fixesApplied = [];
// ── Stale crash lock blocks dispatch ──
// If a stale lock exists, the crash recovery path should handle it,
// not a new dispatch. This prevents double-dispatch after crashes.
try {
const lock = readCrashLock(basePath);
if (lock && !isLockProcessAlive(lock)) {
// Auto-clear it since we're about to dispatch anyway
clearLock(basePath);
fixesApplied.push("cleared stale auto.lock before dispatch");
}
}
catch {
// Non-fatal
}
// ── Corrupt merge/rebase state blocks dispatch ──
// Dispatching a unit with MERGE_HEAD present will cause git operations to fail.
try {
const gitDir = join(basePath, ".git");
if (existsSync(gitDir)) {
const blockers = ["MERGE_HEAD", "rebase-apply", "rebase-merge"].filter((f) => existsSync(join(gitDir, f)));
if (blockers.length > 0) {
// Try to auto-heal
try {
const result = abortAndReset(basePath);
fixesApplied.push(`pre-dispatch: cleaned merge state (${result.cleaned.join(", ")})`);
}
catch {
issues.push(`Corrupt git state: ${blockers.join(", ")}. Run /sf doctor fix.`);
}
}
}
}
catch {
// Non-fatal
}
// ── STATE.md existence check ──
// If STATE.md is missing, attempt to rebuild it for the next unit's context.
// Non-blocking — fresh worktrees won't have it until the first unit completes (#889).
try {
const stateFile = resolveSfRootFile(basePath, "STATE");
const milestonesDir = join(sfRoot(basePath), "milestones");
if (existsSync(milestonesDir) && !existsSync(stateFile)) {
try {
await rebuildState(basePath);
fixesApplied.push("rebuilt missing STATE.md before dispatch");
}
catch {
// Rebuild failed — non-blocking, dispatch continues
fixesApplied.push("STATE.md missing — will rebuild after first unit completes");
}
}
}
catch {
// Non-fatal — dispatch continues without STATE.md if rebuild fails
}
// ── Integration branch existence check ──
// If the active milestone's recorded integration branch no longer exists in
// git, the merge-back at the end of the milestone will fail. Block dispatch
// now to surface this before work is lost.
try {
if (nativeIsRepo(basePath)) {
const state = await deriveState(basePath);
if (state.activeMilestone) {
const gitPrefs = loadEffectiveSFPreferences()?.preferences?.git ?? {};
const resolution = resolveMilestoneIntegrationBranch(basePath, state.activeMilestone.id, gitPrefs);
if (resolution.status === "fallback" && resolution.effectiveBranch) {
fixesApplied.push(`using fallback integration branch "${resolution.effectiveBranch}" for milestone ${state.activeMilestone.id}; recorded "${resolution.recordedBranch}" no longer exists`);
}
else if (resolution.recordedBranch &&
resolution.status === "missing") {
issues.push(`${resolution.reason} Restore the branch or update the integration branch before dispatching. Run /sf doctor for details.`);
}
}
}
}
catch {
// Non-fatal — dispatch continues if state/branch check fails
}
// ── Stale uncommitted changes — auto-snapshot before dispatch ──
// If the working tree is dirty and no commit has happened recently,
// create a safety snapshot so work isn't lost if the next unit crashes.
try {
if (nativeIsRepo(basePath)) {
const prefs = loadEffectiveSFPreferences()?.preferences ?? {};
const thresholdMinutes = prefs.stale_commit_threshold_minutes ?? 30;
if (thresholdMinutes > 0 && nativeHasChanges(basePath)) {
const branch = nativeGetCurrentBranch(basePath);
const lastEpoch = nativeLastCommitEpoch(basePath, branch || "HEAD");
const nowEpoch = Math.floor(Date.now() / 1000);
const minutesSinceCommit = lastEpoch > 0 ? (nowEpoch - lastEpoch) / 60 : Infinity;
if (minutesSinceCommit >= thresholdMinutes) {
const mins = Math.floor(minutesSinceCommit);
try {
nativeAddTracked(basePath);
const commitMsg = `sf snapshot: pre-dispatch, uncommitted changes after ${mins}m inactivity`;
const result = nativeCommit(basePath, commitMsg);
if (result) {
fixesApplied.push(`pre-dispatch: created sf snapshot after ${mins}m of uncommitted changes`);
}
}
catch {
// Non-blocking — snapshot failed but dispatch can continue
fixesApplied.push("pre-dispatch: sf snapshot failed");
}
}
}
}
}
catch {
// Non-fatal
}
// ── Disk space check ──
// Catches low-disk conditions before dispatch rather than letting the unit
// fail mid-execution with ENOSPC (which wastes a full LLM turn).
try {
const envResults = runEnvironmentChecks(basePath);
const diskError = envResults.find((r) => r.name === "disk_space" && r.status === "error");
if (diskError) {
issues.push(`${diskError.message}${diskError.detail ? `${diskError.detail}` : ""}`);
}
}
catch {
// Non-fatal — dispatch continues if env check fails
}
// If we had critical issues that couldn't be auto-healed, block dispatch
if (issues.length > 0) {
return {
proceed: false,
reason: `Pre-dispatch health check failed:\n${issues.map((i) => ` - ${i}`).join("\n")}\nRun /sf doctor fix to resolve.`,
issues,
fixesApplied,
};
}
return { proceed: true, issues, fixesApplied };
const issues = [];
const fixesApplied = [];
// ── Stale crash lock blocks dispatch ──
// If a stale lock exists, the crash recovery path should handle it,
// not a new dispatch. This prevents double-dispatch after crashes.
try {
const lock = readCrashLock(basePath);
if (lock && !isLockProcessAlive(lock)) {
// Auto-clear it since we're about to dispatch anyway
clearLock(basePath);
fixesApplied.push("cleared stale auto.lock before dispatch");
}
} catch {
// Non-fatal
}
// ── Corrupt merge/rebase state blocks dispatch ──
// Dispatching a unit with MERGE_HEAD present will cause git operations to fail.
try {
const gitDir = join(basePath, ".git");
if (existsSync(gitDir)) {
const blockers = ["MERGE_HEAD", "rebase-apply", "rebase-merge"].filter(
(f) => existsSync(join(gitDir, f)),
);
if (blockers.length > 0) {
// Try to auto-heal
try {
const result = abortAndReset(basePath);
fixesApplied.push(
`pre-dispatch: cleaned merge state (${result.cleaned.join(", ")})`,
);
} catch {
issues.push(
`Corrupt git state: ${blockers.join(", ")}. Run /sf doctor fix.`,
);
}
}
}
} catch {
// Non-fatal
}
// ── STATE.md existence check ──
// If STATE.md is missing, attempt to rebuild it for the next unit's context.
// Non-blocking — fresh worktrees won't have it until the first unit completes (#889).
try {
const stateFile = resolveSfRootFile(basePath, "STATE");
const milestonesDir = join(sfRoot(basePath), "milestones");
if (existsSync(milestonesDir) && !existsSync(stateFile)) {
try {
await rebuildState(basePath);
fixesApplied.push("rebuilt missing STATE.md before dispatch");
} catch {
// Rebuild failed — non-blocking, dispatch continues
fixesApplied.push(
"STATE.md missing — will rebuild after first unit completes",
);
}
}
} catch {
// Non-fatal — dispatch continues without STATE.md if rebuild fails
}
// ── Integration branch existence check ──
// If the active milestone's recorded integration branch no longer exists in
// git, the merge-back at the end of the milestone will fail. Block dispatch
// now to surface this before work is lost.
try {
if (nativeIsRepo(basePath)) {
const state = await deriveState(basePath);
if (state.activeMilestone) {
const gitPrefs = loadEffectiveSFPreferences()?.preferences?.git ?? {};
const resolution = resolveMilestoneIntegrationBranch(
basePath,
state.activeMilestone.id,
gitPrefs,
);
if (resolution.status === "fallback" && resolution.effectiveBranch) {
fixesApplied.push(
`using fallback integration branch "${resolution.effectiveBranch}" for milestone ${state.activeMilestone.id}; recorded "${resolution.recordedBranch}" no longer exists`,
);
} else if (
resolution.recordedBranch &&
resolution.status === "missing"
) {
issues.push(
`${resolution.reason} Restore the branch or update the integration branch before dispatching. Run /sf doctor for details.`,
);
}
}
}
} catch {
// Non-fatal — dispatch continues if state/branch check fails
}
// ── Stale uncommitted changes — auto-snapshot before dispatch ──
// If the working tree is dirty and no commit has happened recently,
// create a safety snapshot so work isn't lost if the next unit crashes.
try {
if (nativeIsRepo(basePath)) {
const prefs = loadEffectiveSFPreferences()?.preferences ?? {};
const thresholdMinutes = prefs.stale_commit_threshold_minutes ?? 30;
if (thresholdMinutes > 0 && nativeHasChanges(basePath)) {
const branch = nativeGetCurrentBranch(basePath);
const lastEpoch = nativeLastCommitEpoch(basePath, branch || "HEAD");
const nowEpoch = Math.floor(Date.now() / 1000);
const minutesSinceCommit =
lastEpoch > 0 ? (nowEpoch - lastEpoch) / 60 : Infinity;
if (minutesSinceCommit >= thresholdMinutes) {
const mins = Math.floor(minutesSinceCommit);
const protectedDeletions = listProtectedSnapshotDeletions(basePath);
if (protectedDeletions.length > 0) {
issues.push(
formatProtectedSnapshotDeletionMessage(protectedDeletions),
);
} else {
try {
nativeAddTracked(basePath);
const commitMsg = `sf snapshot: pre-dispatch, uncommitted changes after ${mins}m inactivity`;
const result = nativeCommit(basePath, commitMsg);
if (result) {
fixesApplied.push(
`pre-dispatch: created sf snapshot after ${mins}m of uncommitted changes`,
);
}
} catch {
// Non-blocking — snapshot failed but dispatch can continue
fixesApplied.push("pre-dispatch: sf snapshot failed");
}
}
}
}
}
} catch {
// Non-fatal
}
// ── Disk space check ──
// Catches low-disk conditions before dispatch rather than letting the unit
// fail mid-execution with ENOSPC (which wastes a full LLM turn).
try {
const envResults = runEnvironmentChecks(basePath);
const diskError = envResults.find(
(r) => r.name === "disk_space" && r.status === "error",
);
if (diskError) {
issues.push(
`${diskError.message}${diskError.detail ? `${diskError.detail}` : ""}`,
);
}
} catch {
// Non-fatal — dispatch continues if env check fails
}
// If we had critical issues that couldn't be auto-healed, block dispatch
if (issues.length > 0) {
return {
proceed: false,
reason: `Pre-dispatch health check failed:\n${issues.map((i) => ` - ${i}`).join("\n")}\nRun /sf doctor fix to resolve.`,
issues,
fixesApplied,
};
}
return { proceed: true, issues, fixesApplied };
}
// ── Auto-Heal Escalation ──────────────────────────────────────────────────
/** Threshold: escalate to LLM heal after this many consecutive error units. */
@ -343,96 +387,100 @@ let escalationTriggered = false;
* escalation is not needed.
*/
export function checkHealEscalation(errors, unresolvedIssues) {
if (escalationTriggered) {
return {
shouldEscalate: false,
reason: "already escalated this session",
issues: [],
};
}
if (consecutiveErrorUnits < ESCALATION_THRESHOLD) {
return {
shouldEscalate: false,
reason: `${consecutiveErrorUnits}/${ESCALATION_THRESHOLD} consecutive error units`,
issues: [],
};
}
if (errors === 0) {
return {
shouldEscalate: false,
reason: "no errors to escalate",
issues: [],
};
}
const trend = getHealthTrend();
if (trend === "improving") {
return {
shouldEscalate: false,
reason: "health is improving — deferring escalation",
issues: [],
};
}
escalationTriggered = true;
return {
shouldEscalate: true,
reason: `${consecutiveErrorUnits} consecutive units with unresolved errors (trend: ${trend})`,
issues: unresolvedIssues,
};
if (escalationTriggered) {
return {
shouldEscalate: false,
reason: "already escalated this session",
issues: [],
};
}
if (consecutiveErrorUnits < ESCALATION_THRESHOLD) {
return {
shouldEscalate: false,
reason: `${consecutiveErrorUnits}/${ESCALATION_THRESHOLD} consecutive error units`,
issues: [],
};
}
if (errors === 0) {
return {
shouldEscalate: false,
reason: "no errors to escalate",
issues: [],
};
}
const trend = getHealthTrend();
if (trend === "improving") {
return {
shouldEscalate: false,
reason: "health is improving — deferring escalation",
issues: [],
};
}
escalationTriggered = true;
return {
shouldEscalate: true,
reason: `${consecutiveErrorUnits} consecutive units with unresolved errors (trend: ${trend})`,
issues: unresolvedIssues,
};
}
/**
* Reset escalation state. Called on auto-mode start/stop.
*/
export function resetEscalation() {
escalationTriggered = false;
escalationTriggered = false;
}
/**
* Format a health summary for display in the auto-mode dashboard.
* Human-readable with full words, not abbreviations.
*/
export function formatHealthSummary() {
if (healthHistory.length === 0)
return "No health data yet.";
const latest = healthHistory[healthHistory.length - 1];
const trend = getHealthTrend();
const trendLabel = trend === "improving"
? "improving"
: trend === "degrading"
? "degrading"
: trend === "stable"
? "stable"
: "unknown";
const totalFixes = healthHistory.reduce((sum, s) => sum + s.fixesApplied, 0);
const parts = [];
// Error/warning summary
if (latest.errors === 0 && latest.warnings === 0) {
parts.push("No issues");
}
else {
const counts = [];
if (latest.errors > 0)
counts.push(`${latest.errors} error${latest.errors > 1 ? "s" : ""}`);
if (latest.warnings > 0)
counts.push(`${latest.warnings} warning${latest.warnings > 1 ? "s" : ""}`);
parts.push(counts.join(", "));
}
parts.push(`trend ${trendLabel}`);
if (totalFixes > 0) {
parts.push(`${totalFixes} fix${totalFixes > 1 ? "es" : ""} applied`);
}
if (consecutiveErrorUnits > 0) {
parts.push(`${consecutiveErrorUnits} of ${ESCALATION_THRESHOLD} consecutive errors before escalation`);
}
// Include top issue from latest snapshot
if (latest.issues.length > 0) {
const topIssue = latest.issues.find((i) => i.severity === "error") ?? latest.issues[0];
parts.push(`latest: ${topIssue.message}`);
}
return parts.join(" · ");
if (healthHistory.length === 0) return "No health data yet.";
const latest = healthHistory[healthHistory.length - 1];
const trend = getHealthTrend();
const trendLabel =
trend === "improving"
? "improving"
: trend === "degrading"
? "degrading"
: trend === "stable"
? "stable"
: "unknown";
const totalFixes = healthHistory.reduce((sum, s) => sum + s.fixesApplied, 0);
const parts = [];
// Error/warning summary
if (latest.errors === 0 && latest.warnings === 0) {
parts.push("No issues");
} else {
const counts = [];
if (latest.errors > 0)
counts.push(`${latest.errors} error${latest.errors > 1 ? "s" : ""}`);
if (latest.warnings > 0)
counts.push(
`${latest.warnings} warning${latest.warnings > 1 ? "s" : ""}`,
);
parts.push(counts.join(", "));
}
parts.push(`trend ${trendLabel}`);
if (totalFixes > 0) {
parts.push(`${totalFixes} fix${totalFixes > 1 ? "es" : ""} applied`);
}
if (consecutiveErrorUnits > 0) {
parts.push(
`${consecutiveErrorUnits} of ${ESCALATION_THRESHOLD} consecutive errors before escalation`,
);
}
// Include top issue from latest snapshot
if (latest.issues.length > 0) {
const topIssue =
latest.issues.find((i) => i.severity === "error") ?? latest.issues[0];
parts.push(`latest: ${topIssue.message}`);
}
return parts.join(" · ");
}
/**
* Reset all proactive healing state. Called on auto-mode start/stop.
*/
export function resetProactiveHealing() {
resetHealthTracking();
resetEscalation();
resetHealthTracking();
resetEscalation();
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,48 @@
import { execFileSync } from "node:child_process";
import { GIT_NO_PROMPT_ENV } from "./git-constants.js";
const PROTECTED_SNAPSHOT_DELETE_PATHS = [
":(glob)src/resources/extensions/**/*.d.ts",
];
/**
* List protected source files that are currently deleted.
*
* Purpose: keep automated stale-work snapshots from committing suspicious loss
* of hand-written extension declaration files.
*
* Consumer: pre-dispatch and doctor git stale-change snapshot checks.
*/
export function listProtectedSnapshotDeletions(basePath, opts = {}) {
const args = ["diff", "--name-only", "--diff-filter=D"];
if (opts.cached === true) args.push("--cached");
args.push("--", ...PROTECTED_SNAPSHOT_DELETE_PATHS);
try {
const out = execFileSync("git", args, {
cwd: basePath,
stdio: ["ignore", "pipe", "pipe"],
encoding: "utf-8",
env: GIT_NO_PROMPT_ENV,
}).trim();
return out ? out.split("\n").filter(Boolean) : [];
} catch {
return [];
}
}
/**
* Format a blocking diagnostic for protected snapshot deletions.
*
* Purpose: explain why SF refused an automated snapshot and name the files that
* need an explicit human or task-level decision.
*
* Consumer: pre-dispatch and doctor git stale-change snapshot checks.
*/
export function formatProtectedSnapshotDeletionMessage(paths) {
const shown = paths.slice(0, 8);
const suffix =
paths.length > shown.length
? `, and ${paths.length - shown.length} more`
: "";
return `Protected declaration deletions detected; refusing automated snapshot: ${shown.join(", ")}${suffix}`;
}

View file

@ -0,0 +1,31 @@
import assert from "node:assert/strict";
import { readFileSync } from "node:fs";
import { join } from "node:path";
import { describe, test } from "vitest";
const autoSource = readFileSync(
join(process.cwd(), "src/resources/extensions/sf/auto.js"),
"utf-8",
);
describe("auto startup doctor", () => {
test("startAuto_when_session_not_running_runs_startup_doctor_fix_before_resume_dispatch", () => {
const runningGuard = autoSource.indexOf('classification === "running"');
const doctorCall = autoSource.indexOf(
"await runStartupDoctorFix(ctx, base)",
);
const pausedResume = autoSource.indexOf("// If resuming from paused state");
assert.ok(runningGuard !== -1, "running-session guard must exist");
assert.ok(doctorCall !== -1, "fresh startup must run doctor fix");
assert.ok(pausedResume !== -1, "paused resume marker must exist");
assert.ok(
runningGuard < doctorCall,
"doctor must not run while another session is active",
);
assert.ok(
doctorCall < pausedResume,
"doctor must run before resume/dispatch decisions",
);
});
});

View file

@ -0,0 +1,100 @@
import assert from "node:assert/strict";
import {
existsSync,
mkdirSync,
mkdtempSync,
rmSync,
writeFileSync,
} from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, test } from "vitest";
import {
applyEnvironmentFixes,
environmentResultsToDoctorIssues,
runEnvironmentChecks,
} from "../doctor-environment.js";
const tmpDirs = [];
function makeProject() {
const dir = mkdtempSync(join(tmpdir(), "sf-env-fix-"));
tmpDirs.push(dir);
writeFileSync(
join(dir, "package.json"),
JSON.stringify({ scripts: {} }, null, 2),
);
writeFileSync(
join(dir, "package-lock.json"),
JSON.stringify({ lockfileVersion: 3 }, null, 2),
);
return dir;
}
afterEach(() => {
while (tmpDirs.length > 0) {
const dir = tmpDirs.pop();
if (dir) rmSync(dir, { recursive: true, force: true });
}
});
describe("doctor environment dependency fixes", () => {
test("environmentResultsToDoctorIssues_when_node_modules_missing_marks_dependencies_fixable", () => {
const project = makeProject();
const issues = environmentResultsToDoctorIssues(
runEnvironmentChecks(project),
);
const deps = issues.find((issue) => issue.code === "env_dependencies");
assert.equal(deps?.fixable, true);
assert.match(deps?.message ?? "", /node_modules missing/);
});
test("applyEnvironmentFixes_when_fix_enabled_runs_detected_package_manager", () => {
const project = makeProject();
const fakeBin = mkdtempSync(join(tmpdir(), "sf-env-bin-"));
tmpDirs.push(fakeBin);
writeFileSync(
join(fakeBin, "npm"),
"#!/usr/bin/env sh\nmkdir -p node_modules\nprintf '{}\\n' > node_modules/.package-lock.json\n",
{ mode: 0o755 },
);
const results = runEnvironmentChecks(project);
const fixesApplied = [];
const originalPath = process.env.PATH;
process.env.PATH = `${fakeBin}:${originalPath ?? ""}`;
try {
const fixed = applyEnvironmentFixes(project, results, {
shouldFix: (code) => code === "env_dependencies",
fixesApplied,
});
assert.equal(fixed, true);
assert.equal(
existsSync(join(project, "node_modules", ".package-lock.json")),
true,
);
assert.deepEqual(fixesApplied, ["dependencies: ran npm install"]);
} finally {
process.env.PATH = originalPath;
}
});
test("applyEnvironmentFixes_when_fix_disabled_does_not_install", () => {
const project = makeProject();
mkdirSync(join(project, "node_modules"), { recursive: true });
rmSync(join(project, "node_modules"), { recursive: true, force: true });
const results = runEnvironmentChecks(project);
const fixesApplied = [];
const fixed = applyEnvironmentFixes(project, results, {
shouldFix: () => false,
fixesApplied,
});
assert.equal(fixed, false);
assert.deepEqual(fixesApplied, []);
assert.equal(existsSync(join(project, "node_modules")), false);
});
});

View file

@ -0,0 +1,110 @@
import assert from "node:assert/strict";
import {
existsSync,
mkdirSync,
mkdtempSync,
rmSync,
writeFileSync,
} from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, test } from "vitest";
import { normalizeLegacyPlanSlugDirectories } from "../doctor-engine-checks.js";
const tmpDirs = [];
function makeProject() {
const dir = mkdtempSync(join(tmpdir(), "sf-plan-dir-"));
tmpDirs.push(dir);
mkdirSync(join(dir, ".sf", "milestones"), { recursive: true });
return dir;
}
afterEach(() => {
while (tmpDirs.length > 0) {
const dir = tmpDirs.pop();
if (dir) rmSync(dir, { recursive: true, force: true });
}
});
describe("doctor plan directory normalization", () => {
test("normalizeLegacyPlanSlugDirectories_when_fix_enabled_renames_milestone_and_slice_dirs", () => {
const project = makeProject();
const legacySlice = join(
project,
".sf",
"milestones",
"M001-long-name",
"slices",
"S02-research-work",
);
mkdirSync(legacySlice, { recursive: true });
writeFileSync(join(legacySlice, "S02-RESEARCH.md"), "# Research\n");
const issues = [];
const fixesApplied = [];
normalizeLegacyPlanSlugDirectories(
project,
issues,
fixesApplied,
(code) => code === "legacy_plan_slug_directory",
);
assert.equal(existsSync(join(project, ".sf", "milestones", "M001")), true);
assert.equal(
existsSync(join(project, ".sf", "milestones", "M001", "slices", "S02")),
true,
);
assert.equal(
existsSync(join(project, ".sf", "milestones", "M001-long-name")),
false,
);
assert.equal(
existsSync(
join(
project,
".sf",
"milestones",
"M001",
"slices",
"S02-research-work",
),
),
false,
);
assert.equal(
issues.filter((issue) => issue.code === "legacy_plan_slug_directory")
.length,
2,
);
assert.equal(fixesApplied.length, 2);
});
test("normalizeLegacyPlanSlugDirectories_when_target_exists_reports_conflict_without_rename", () => {
const project = makeProject();
mkdirSync(join(project, ".sf", "milestones", "M001"), { recursive: true });
mkdirSync(join(project, ".sf", "milestones", "M001-long-name"), {
recursive: true,
});
const issues = [];
const fixesApplied = [];
normalizeLegacyPlanSlugDirectories(
project,
issues,
fixesApplied,
(code) => code === "legacy_plan_slug_directory",
);
const issue = issues.find(
(candidate) => candidate.code === "legacy_plan_slug_directory",
);
assert.equal(issue?.fixable, false);
assert.match(issue?.message ?? "", /target already exists/);
assert.equal(
existsSync(join(project, ".sf", "milestones", "M001-long-name")),
true,
);
assert.deepEqual(fixesApplied, []);
});
});

View file

@ -0,0 +1,111 @@
import assert from "node:assert/strict";
import { execFileSync } from "node:child_process";
import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { afterEach, describe, test } from "vitest";
import { preDispatchHealthGate } from "../doctor-proactive.js";
import {
formatProtectedSnapshotDeletionMessage,
listProtectedSnapshotDeletions,
} from "../snapshot-safety.js";
const tmpDirs = [];
function git(cwd, args, env = {}) {
return execFileSync("git", args, {
cwd,
stdio: ["ignore", "pipe", "pipe"],
encoding: "utf-8",
env: {
...process.env,
GIT_AUTHOR_NAME: "SF Test",
GIT_AUTHOR_EMAIL: "sf-test@example.invalid",
GIT_COMMITTER_NAME: "SF Test",
GIT_COMMITTER_EMAIL: "sf-test@example.invalid",
...env,
},
}).trim();
}
function makeRepo() {
const dir = mkdtempSync(join(tmpdir(), "sf-snapshot-safety-"));
tmpDirs.push(dir);
git(dir, ["init", "-b", "main"]);
return dir;
}
afterEach(() => {
while (tmpDirs.length > 0) {
const dir = tmpDirs.pop();
if (dir) rmSync(dir, { recursive: true, force: true });
}
});
describe("snapshot safety", () => {
test("listProtectedSnapshotDeletions_when_extension_declaration_deleted_returns_path", () => {
const repo = makeRepo();
const dts = join(
repo,
"src/resources/extensions/sf/code-intelligence.d.ts",
);
mkdirSync(join(repo, "src/resources/extensions/sf"), { recursive: true });
writeFileSync(dts, "export function codebaseSearch(): void;\n");
git(repo, ["add", "."]);
git(repo, ["commit", "-m", "seed"]);
rmSync(dts);
assert.deepEqual(listProtectedSnapshotDeletions(repo), [
"src/resources/extensions/sf/code-intelligence.d.ts",
]);
});
test("listProtectedSnapshotDeletions_when_unrelated_declaration_deleted_ignores_path", () => {
const repo = makeRepo();
const dts = join(repo, "src/generated/types.d.ts");
mkdirSync(join(repo, "src/generated"), { recursive: true });
writeFileSync(dts, "export type Generated = string;\n");
git(repo, ["add", "."]);
git(repo, ["commit", "-m", "seed"]);
rmSync(dts);
assert.deepEqual(listProtectedSnapshotDeletions(repo), []);
});
test("formatProtectedSnapshotDeletionMessage_when_many_paths_limits_output", () => {
const paths = Array.from(
{ length: 10 },
(_, i) => `src/resources/extensions/sf/file-${i}.d.ts`,
);
const message = formatProtectedSnapshotDeletionMessage(paths);
assert.match(message, /refusing automated snapshot/);
assert.match(message, /file-0\.d\.ts/);
assert.match(message, /and 2 more/);
});
test("preDispatchHealthGate_when_protected_declaration_deleted_blocks_snapshot", async () => {
const repo = makeRepo();
const dts = join(
repo,
"src/resources/extensions/sf/code-intelligence.d.ts",
);
mkdirSync(join(repo, "src/resources/extensions/sf"), { recursive: true });
writeFileSync(dts, "export function codebaseSearch(): void;\n");
git(repo, ["add", "."]);
git(repo, ["commit", "-m", "seed"], {
GIT_AUTHOR_DATE: "2020-01-01T00:00:00Z",
GIT_COMMITTER_DATE: "2020-01-01T00:00:00Z",
});
rmSync(dts);
const result = await preDispatchHealthGate(repo);
assert.equal(result.proceed, false);
assert.match(result.reason, /Protected declaration deletions detected/);
assert.equal(git(repo, ["log", "--oneline"]).split("\n").length, 1);
});
});