fix: Claude Code MCP tool output rendering and real-time streaming
- Stream tool results in real-time during Claude Code SDK sessions instead of deferring until session end. Tool calls (read, bash, write, etc.) now show their output as they complete, not collapsed as "..." - Stop suppressing toolcall_start/delta/end events from stream adapter so the TUI can render tool call progress during streaming - On SDK turn boundary (user message with tool results), push synthetic toolcall_end events with externalResult attached for immediate rendering - Chat controller checks for externalResult on toolcall_end message updates and calls updateResult on pending ToolExecutionComponents - Fix case-sensitive tool name matching (Read vs read, Bash vs bash) in TUI ToolExecutionComponent rendering - Auto-discover and pass GSD_WORKFLOW_EXECUTORS_MODULE and GSD_WORKFLOW_WRITE_GATE_MODULE env vars in MCP server launch config - Add /gsd mcp init command and auto-bootstrap .mcp.json for Claude Code provider during auto-start - Add tool_execution_update event type for web UI streaming updates - Add setStderrLoggingEnabled toggle for workflow logger
This commit is contained in:
parent
0c37a88024
commit
ac1a51ef55
32 changed files with 1126 additions and 612 deletions
43
.github/workflows/regenerate-models.yml
vendored
43
.github/workflows/regenerate-models.yml
vendored
|
|
@ -1,43 +0,0 @@
|
|||
# Regenerates models.generated.ts from live provider APIs weekly.
|
||||
# Opens a PR automatically if the model list has changed.
|
||||
name: Regenerate model registry
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 6 * * 1' # Every Monday at 06:00 UTC
|
||||
workflow_dispatch: # Allow manual trigger
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
regenerate:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '22'
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Regenerate model registry
|
||||
run: npx tsx packages/pi-ai/scripts/generate-models.ts
|
||||
|
||||
- name: Open PR if changed
|
||||
uses: peter-evans/create-pull-request@v7
|
||||
with:
|
||||
commit-message: 'chore(pi-ai): regenerate model registry from upstream APIs'
|
||||
title: 'chore(pi-ai): regenerate model registry from upstream APIs'
|
||||
body: |
|
||||
Automated weekly regeneration of `models.generated.ts` from live provider APIs.
|
||||
|
||||
Run `packages/pi-ai/scripts/generate-models.ts` — no logic changed, output only.
|
||||
branch: chore/auto-regenerate-models
|
||||
labels: chore
|
||||
delete-branch: true
|
||||
|
|
@ -95,7 +95,7 @@ See the full [Changelog](./CHANGELOG.md) for details on every release.
|
|||
|
||||
## Documentation
|
||||
|
||||
Full documentation is available in the [`docs/`](./docs/) directory:
|
||||
Full documentation is available at **[gsd.build](https://gsd.build)** (powered by Mintlify) and in the [`docs/`](./docs/) directory:
|
||||
|
||||
- **[Getting Started](./docs/getting-started.md)** — install, first run, basic usage
|
||||
- **[Auto Mode](./docs/auto-mode.md)** — autonomous execution deep-dive
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@
|
|||
"logo": {
|
||||
"light": "/images/logo.svg",
|
||||
"dark": "/images/logo.svg",
|
||||
"href": "https://github.com/gsd-build/gsd-2/tree/main/docs"
|
||||
"href": "https://gsd.build"
|
||||
},
|
||||
"favicon": "/images/favicon.svg",
|
||||
"colors": {
|
||||
|
|
|
|||
|
|
@ -315,10 +315,21 @@ function getSupportedSummaryArtifactTypes(executors: WorkflowToolExecutors): rea
|
|||
}
|
||||
|
||||
function getWriteGateModuleCandidates(): string[] {
|
||||
return [
|
||||
const candidates: string[] = [];
|
||||
const explicitModule = process.env.GSD_WORKFLOW_WRITE_GATE_MODULE?.trim();
|
||||
if (explicitModule) {
|
||||
if (/^[a-z]+:/i.test(explicitModule) && !explicitModule.startsWith("file:")) {
|
||||
throw new Error("GSD_WORKFLOW_WRITE_GATE_MODULE only supports file: URLs or filesystem paths.");
|
||||
}
|
||||
candidates.push(explicitModule.startsWith("file:") ? explicitModule : toFileUrl(explicitModule));
|
||||
}
|
||||
|
||||
candidates.push(
|
||||
new URL("../../../src/resources/extensions/gsd/bootstrap/write-gate.js", import.meta.url).href,
|
||||
new URL("../../../src/resources/extensions/gsd/bootstrap/write-gate.ts", import.meta.url).href,
|
||||
];
|
||||
);
|
||||
|
||||
return [...new Set(candidates)];
|
||||
}
|
||||
|
||||
function toFileUrl(modulePath: string): string {
|
||||
|
|
|
|||
|
|
@ -45,6 +45,59 @@ describe("agent-loop — pauseTurn handling (#2869)", () => {
|
|||
'StopReason type must include "pauseTurn"',
|
||||
);
|
||||
});
|
||||
|
||||
it("uses provider-supplied external tool results instead of the placeholder", async () => {
|
||||
const externalMessage = makeAssistantMessage({
|
||||
content: [
|
||||
{
|
||||
type: "toolCall",
|
||||
id: "tc-external-1",
|
||||
name: "bash",
|
||||
arguments: { command: "echo hi" },
|
||||
externalResult: {
|
||||
content: [{ type: "text", text: "hi\n" }],
|
||||
details: { source: "claude-code" },
|
||||
isError: false,
|
||||
},
|
||||
} as any,
|
||||
],
|
||||
stopReason: "toolUse",
|
||||
provider: "claude-code",
|
||||
});
|
||||
|
||||
const mockStream = createMockStreamFn([externalMessage]);
|
||||
|
||||
const context: AgentContext = {
|
||||
systemPrompt: "You are a test agent.",
|
||||
messages: [{ role: "user", content: [{ type: "text", text: "Run the command" }], timestamp: Date.now() }],
|
||||
tools: [],
|
||||
};
|
||||
|
||||
const config: AgentLoopConfig = {
|
||||
model: { ...TEST_MODEL, provider: "claude-code" },
|
||||
convertToLlm: (msgs) => msgs.filter((m): m is any => m.role !== "custom"),
|
||||
toolExecution: "sequential",
|
||||
externalToolExecution: true,
|
||||
};
|
||||
|
||||
const stream = agentLoop(
|
||||
[{ role: "user", content: [{ type: "text", text: "Run the command" }], timestamp: Date.now() }],
|
||||
context,
|
||||
config,
|
||||
undefined,
|
||||
mockStream as any,
|
||||
);
|
||||
|
||||
const events = await collectEvents(stream);
|
||||
const toolEnd = events.find(
|
||||
(event): event is Extract<AgentEvent, { type: "tool_execution_end" }> => event.type === "tool_execution_end",
|
||||
);
|
||||
|
||||
assert.ok(toolEnd, "expected tool_execution_end event");
|
||||
assert.deepEqual(toolEnd.result.content, [{ type: "text", text: "hi\n" }]);
|
||||
assert.deepEqual(toolEnd.result.details, { source: "claude-code" });
|
||||
assert.equal(toolEnd.isError, false);
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -255,8 +255,17 @@ async function runLoop(
|
|||
if (hasMoreToolCalls && config.externalToolExecution) {
|
||||
// External execution mode: tools were handled by the provider
|
||||
// (e.g., Claude Code SDK). Emit tool_execution events for each
|
||||
// tool call. The TUI adds these as components after the message.
|
||||
// tool call. Prefer any provider-supplied externalResult attached
|
||||
// to the tool call so the UI can show the real stdout/stderr
|
||||
// instead of a generic placeholder.
|
||||
for (const tc of toolCalls as AgentToolCall[]) {
|
||||
const externalResult = (tc as AgentToolCall & {
|
||||
externalResult?: {
|
||||
content?: Array<{ type: string; text?: string; data?: string; mimeType?: string }>;
|
||||
details?: Record<string, unknown>;
|
||||
isError?: boolean;
|
||||
};
|
||||
}).externalResult;
|
||||
stream.push({
|
||||
type: "tool_execution_start",
|
||||
toolCallId: tc.id,
|
||||
|
|
@ -267,11 +276,16 @@ async function runLoop(
|
|||
type: "tool_execution_end",
|
||||
toolCallId: tc.id,
|
||||
toolName: tc.name,
|
||||
result: {
|
||||
content: [{ type: "text", text: "(executed by Claude Code)" }],
|
||||
details: {},
|
||||
},
|
||||
isError: false,
|
||||
result: externalResult
|
||||
? {
|
||||
content: externalResult.content ?? [{ type: "text", text: "" }],
|
||||
details: externalResult.details ?? {},
|
||||
}
|
||||
: {
|
||||
content: [{ type: "text", text: "(executed by Claude Code)" }],
|
||||
details: {},
|
||||
},
|
||||
isError: externalResult?.isError ?? false,
|
||||
});
|
||||
}
|
||||
// Don't add tool results to context or loop back — the streamSimple
|
||||
|
|
|
|||
|
|
@ -0,0 +1,54 @@
|
|||
import { describe, test } from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import stripAnsi from "strip-ansi";
|
||||
import { ToolExecutionComponent } from "../tool-execution.js";
|
||||
import { initTheme } from "../../theme/theme.js";
|
||||
|
||||
initTheme("dark", false);
|
||||
|
||||
function renderTool(
|
||||
toolName: string,
|
||||
args: Record<string, unknown>,
|
||||
result?: {
|
||||
content: Array<{ type: string; text?: string }>;
|
||||
isError: boolean;
|
||||
details?: Record<string, unknown>;
|
||||
},
|
||||
): string {
|
||||
const component = new ToolExecutionComponent(
|
||||
toolName,
|
||||
args,
|
||||
{},
|
||||
undefined,
|
||||
{ requestRender() {} } as any,
|
||||
);
|
||||
component.setExpanded(true);
|
||||
if (result) component.updateResult(result);
|
||||
return stripAnsi(component.render(120).join("\n"));
|
||||
}
|
||||
|
||||
describe("ToolExecutionComponent", () => {
|
||||
test("renders capitalized Claude Code Bash tool names with bash output instead of generic args JSON", () => {
|
||||
const rendered = renderTool(
|
||||
"Bash",
|
||||
{ command: "pwd" },
|
||||
{ content: [{ type: "text", text: "/tmp/gsd-pr-fix" }], isError: false },
|
||||
);
|
||||
|
||||
assert.match(rendered, /\$ pwd/);
|
||||
assert.match(rendered, /\/tmp\/gsd-pr-fix/);
|
||||
assert.doesNotMatch(rendered, /^\{\s*\}$/m);
|
||||
});
|
||||
|
||||
test("renders capitalized Claude Code Read tool names with read output", () => {
|
||||
const rendered = renderTool(
|
||||
"Read",
|
||||
{ path: "/tmp/demo.txt" },
|
||||
{ content: [{ type: "text", text: "hello\nworld" }], isError: false },
|
||||
);
|
||||
|
||||
assert.match(rendered, /read .*demo\.txt/);
|
||||
assert.match(rendered, /hello/);
|
||||
assert.match(rendered, /world/);
|
||||
});
|
||||
});
|
||||
|
|
@ -97,6 +97,10 @@ export class ToolExecutionComponent extends Container {
|
|||
// When true, this component intentionally renders no lines
|
||||
private hideComponent = false;
|
||||
|
||||
private get normalizedToolName(): string {
|
||||
return typeof this.toolName === "string" ? this.toolName.toLowerCase() : "";
|
||||
}
|
||||
|
||||
constructor(
|
||||
toolName: string,
|
||||
args: any,
|
||||
|
|
@ -121,7 +125,7 @@ export class ToolExecutionComponent extends Container {
|
|||
|
||||
// Use contentBox for bash (visual truncation) or custom tools with custom renderers
|
||||
// Use contentText for built-in tools (including overrides without custom renderers)
|
||||
if (toolName === "bash" || (toolDefinition && !this.shouldUseBuiltInRenderer())) {
|
||||
if (this.normalizedToolName === "bash" || (toolDefinition && !this.shouldUseBuiltInRenderer())) {
|
||||
this.addChild(this.contentBox);
|
||||
} else {
|
||||
this.addChild(this.contentText);
|
||||
|
|
@ -136,7 +140,8 @@ export class ToolExecutionComponent extends Container {
|
|||
* or the toolDefinition doesn't provide custom renderers.
|
||||
*/
|
||||
private shouldUseBuiltInRenderer(): boolean {
|
||||
const isBuiltInName = this.toolName in allTools;
|
||||
const normalizedToolName = this.normalizedToolName;
|
||||
const isBuiltInName = normalizedToolName in allTools;
|
||||
const hasCustomRenderers = this.toolDefinition?.renderCall || this.toolDefinition?.renderResult;
|
||||
return isBuiltInName && !hasCustomRenderers;
|
||||
}
|
||||
|
|
@ -152,7 +157,7 @@ export class ToolExecutionComponent extends Container {
|
|||
|
||||
updateArgs(args: any): void {
|
||||
this.args = args;
|
||||
if (this.toolName === "write" && this.isPartial) {
|
||||
if (this.normalizedToolName === "write" && this.isPartial) {
|
||||
this.updateWriteHighlightCacheIncremental();
|
||||
}
|
||||
this.updateDisplay();
|
||||
|
|
@ -308,7 +313,7 @@ export class ToolExecutionComponent extends Container {
|
|||
): void {
|
||||
this.result = result;
|
||||
this.isPartial = isPartial;
|
||||
if (this.toolName === "write" && !isPartial) {
|
||||
if (this.normalizedToolName === "write" && !isPartial) {
|
||||
const rawPath = str(this.args?.file_path ?? this.args?.path);
|
||||
const fileContent = str(this.args?.content);
|
||||
if (rawPath !== null && fileContent !== null) {
|
||||
|
|
@ -387,7 +392,7 @@ export class ToolExecutionComponent extends Container {
|
|||
|
||||
// Use built-in rendering for built-in tools (or overrides without custom renderers)
|
||||
if (useBuiltInRenderer) {
|
||||
if (this.toolName === "bash") {
|
||||
if (this.normalizedToolName === "bash") {
|
||||
// Bash uses Box with visual line truncation
|
||||
this.contentBox.setBgFn(bgFn);
|
||||
this.contentBox.clear();
|
||||
|
|
@ -629,8 +634,9 @@ export class ToolExecutionComponent extends Container {
|
|||
private formatToolExecution(): string {
|
||||
let text = "";
|
||||
const invalidArg = theme.fg("error", "[invalid arg]");
|
||||
const normalizedToolName = this.normalizedToolName;
|
||||
|
||||
if (this.toolName === "read") {
|
||||
if (normalizedToolName === "read") {
|
||||
const rawPath = str(this.args?.file_path ?? this.args?.path);
|
||||
const path = rawPath !== null ? shortenPath(rawPath) : null;
|
||||
const offset = this.args?.offset;
|
||||
|
|
@ -692,7 +698,7 @@ export class ToolExecutionComponent extends Container {
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if (this.toolName === "write") {
|
||||
} else if (normalizedToolName === "write") {
|
||||
const rawPath = str(this.args?.file_path ?? this.args?.path);
|
||||
const fileContent = str(this.args?.content);
|
||||
const path = rawPath !== null ? shortenPath(rawPath) : null;
|
||||
|
|
@ -751,7 +757,7 @@ export class ToolExecutionComponent extends Container {
|
|||
text += `\n\n${theme.fg("error", errorText)}`;
|
||||
}
|
||||
}
|
||||
} else if (this.toolName === "edit") {
|
||||
} else if (normalizedToolName === "edit") {
|
||||
const rawPath = str(this.args?.file_path ?? this.args?.path);
|
||||
const path = rawPath !== null ? shortenPath(rawPath) : null;
|
||||
|
||||
|
|
@ -787,7 +793,7 @@ export class ToolExecutionComponent extends Container {
|
|||
text += `\n\n${renderDiff(this.editDiffPreview.diff, { filePath: rawPath ?? undefined })}`;
|
||||
}
|
||||
}
|
||||
} else if (this.toolName === "ls") {
|
||||
} else if (normalizedToolName === "ls") {
|
||||
const rawPath = str(this.args?.path);
|
||||
const path = rawPath !== null ? shortenPath(rawPath || ".") : null;
|
||||
const limit = this.args?.limit;
|
||||
|
|
@ -824,7 +830,7 @@ export class ToolExecutionComponent extends Container {
|
|||
text += `\n${theme.fg("warning", `[Truncated: ${warnings.join(", ")}]`)}`;
|
||||
}
|
||||
}
|
||||
} else if (this.toolName === "find") {
|
||||
} else if (normalizedToolName === "find") {
|
||||
const pattern = str(this.args?.pattern);
|
||||
const rawPath = str(this.args?.path);
|
||||
const path = rawPath !== null ? shortenPath(rawPath || ".") : null;
|
||||
|
|
@ -866,7 +872,7 @@ export class ToolExecutionComponent extends Container {
|
|||
text += `\n${theme.fg("warning", `[Truncated: ${warnings.join(", ")}]`)}`;
|
||||
}
|
||||
}
|
||||
} else if (this.toolName === "grep") {
|
||||
} else if (normalizedToolName === "grep") {
|
||||
const pattern = str(this.args?.pattern);
|
||||
const rawPath = str(this.args?.path);
|
||||
const path = rawPath !== null ? shortenPath(rawPath || ".") : null;
|
||||
|
|
@ -916,7 +922,7 @@ export class ToolExecutionComponent extends Container {
|
|||
text += `\n${theme.fg("warning", `[Truncated: ${warnings.join(", ")}]`)}`;
|
||||
}
|
||||
}
|
||||
} else if (this.toolName === "web_search") {
|
||||
} else if (normalizedToolName === "web_search") {
|
||||
// Server-side Anthropic web search
|
||||
text = theme.fg("toolTitle", theme.bold("web search"));
|
||||
|
||||
|
|
|
|||
|
|
@ -121,6 +121,27 @@ export async function handleAgentEvent(host: InteractiveModeStateHost & {
|
|||
if (host.streamingComponent && event.message.role === "assistant") {
|
||||
host.streamingMessage = event.message;
|
||||
host.streamingComponent.updateContent(host.streamingMessage);
|
||||
|
||||
// When the stream adapter signals a completed tool call with an
|
||||
// external result (from Claude Code SDK), update the pending
|
||||
// ToolExecutionComponent immediately so output is visible in
|
||||
// real-time instead of waiting for the session to end.
|
||||
const innerEvent = event.assistantMessageEvent;
|
||||
if (innerEvent.type === "toolcall_end" && innerEvent.toolCall) {
|
||||
const tc = innerEvent.toolCall as any;
|
||||
const externalResult = tc.externalResult;
|
||||
if (externalResult) {
|
||||
const component = host.pendingTools.get(tc.id);
|
||||
if (component) {
|
||||
component.updateResult({
|
||||
content: externalResult.content ?? [{ type: "text", text: "" }],
|
||||
details: externalResult.details ?? {},
|
||||
isError: externalResult.isError ?? false,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const contentBlocks = host.streamingMessage.content;
|
||||
for (let i = lastProcessedContentIndex; i < contentBlocks.length; i++) {
|
||||
const content = contentBlocks[i];
|
||||
|
|
|
|||
|
|
@ -82,7 +82,6 @@ import { LoginDialogComponent } from "./components/login-dialog.js";
|
|||
import { ModelSelectorComponent, providerDisplayName } from "./components/model-selector.js";
|
||||
import { OAuthSelectorComponent } from "./components/oauth-selector.js";
|
||||
import { ProviderManagerComponent } from "./components/provider-manager.js";
|
||||
import { getProviderSetupAction } from "./provider-auth-setup.js";
|
||||
import { ScopedModelsSelectorComponent } from "./components/scoped-models-selector.js";
|
||||
import { SessionSelectorComponent } from "./components/session-selector.js";
|
||||
import { SettingsSelectorComponent } from "./components/settings-selector.js";
|
||||
|
|
@ -3413,21 +3412,9 @@ export class InteractiveMode {
|
|||
this.ui.requestRender();
|
||||
},
|
||||
async (provider: string) => {
|
||||
// Enter key → auth setup for selected provider (#3579)
|
||||
done();
|
||||
|
||||
const action = getProviderSetupAction({
|
||||
provider,
|
||||
authMode: this.session.modelRegistry.getProviderAuthMode(provider),
|
||||
hasAuth: this.session.modelRegistry.authStorage.hasAuth(provider),
|
||||
});
|
||||
|
||||
if (action.kind === "oauth-login") {
|
||||
await this.showLoginDialog(provider);
|
||||
return;
|
||||
}
|
||||
|
||||
this.showStatus(action.message);
|
||||
this.ui.requestRender();
|
||||
await this.showLoginDialog(provider);
|
||||
},
|
||||
);
|
||||
return { component, focus: component };
|
||||
|
|
|
|||
|
|
@ -1,40 +0,0 @@
|
|||
import type { ProviderAuthMode } from "../../core/model-registry.js";
|
||||
|
||||
export type ProviderSetupAction =
|
||||
| { kind: "oauth-login" }
|
||||
| { kind: "status"; message: string };
|
||||
|
||||
export function getProviderSetupAction(options: {
|
||||
provider: string;
|
||||
authMode: ProviderAuthMode;
|
||||
hasAuth: boolean;
|
||||
}): ProviderSetupAction {
|
||||
const { provider, authMode, hasAuth } = options;
|
||||
|
||||
if (authMode === "oauth") {
|
||||
return { kind: "oauth-login" };
|
||||
}
|
||||
|
||||
if (authMode === "none") {
|
||||
return {
|
||||
kind: "status",
|
||||
message: `${provider} does not need auth setup. Use /model to select it.`,
|
||||
};
|
||||
}
|
||||
|
||||
if (authMode === "externalCli") {
|
||||
return {
|
||||
kind: "status",
|
||||
message: hasAuth
|
||||
? `${provider} is already authenticated. Use /model to select it.`
|
||||
: `${provider} uses external CLI auth. Sign in with the provider CLI, then use /model.`,
|
||||
};
|
||||
}
|
||||
|
||||
return {
|
||||
kind: "status",
|
||||
message: hasAuth
|
||||
? `${provider} already has credentials configured. Use /model to select it.`
|
||||
: `${provider} uses API-key auth, not OAuth. Configure its credentials, then use /model.`,
|
||||
};
|
||||
}
|
||||
|
|
@ -14,6 +14,7 @@ import type {
|
|||
Context,
|
||||
Model,
|
||||
SimpleStreamOptions,
|
||||
ToolCall,
|
||||
} from "@gsd/pi-ai";
|
||||
import { EventStream } from "@gsd/pi-ai";
|
||||
import { execSync } from "node:child_process";
|
||||
|
|
@ -24,8 +25,26 @@ import type {
|
|||
SDKMessage,
|
||||
SDKPartialAssistantMessage,
|
||||
SDKResultMessage,
|
||||
SDKUserMessage,
|
||||
} from "./sdk-types.js";
|
||||
|
||||
export interface ExternalToolResultContentBlock {
|
||||
type: string;
|
||||
text?: string;
|
||||
data?: string;
|
||||
mimeType?: string;
|
||||
}
|
||||
|
||||
export interface ExternalToolResultPayload {
|
||||
content: ExternalToolResultContentBlock[];
|
||||
details?: Record<string, unknown>;
|
||||
isError: boolean;
|
||||
}
|
||||
|
||||
type ToolCallWithExternalResult = ToolCall & {
|
||||
externalResult?: ExternalToolResultPayload;
|
||||
};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Stream factory
|
||||
// ---------------------------------------------------------------------------
|
||||
|
|
@ -153,89 +172,6 @@ export function makeStreamExhaustedErrorMessage(model: string, lastTextContent:
|
|||
return message;
|
||||
}
|
||||
|
||||
/**
|
||||
* Claude Code executes its own internal tool loop inside the SDK call. The
|
||||
* streamed and final assistant messages should therefore contain only
|
||||
* user-facing content (text/thinking), not replayable tool blocks that GSD
|
||||
* would render again.
|
||||
*/
|
||||
function isUserFacingClaudeCodeBlock(block: AssistantMessage["content"][number]): boolean {
|
||||
return block.type === "text" || block.type === "thinking";
|
||||
}
|
||||
|
||||
function filterUserFacingClaudeCodeContent(
|
||||
blocks: AssistantMessage["content"],
|
||||
): AssistantMessage["content"] {
|
||||
return blocks.filter(isUserFacingClaudeCodeBlock);
|
||||
}
|
||||
|
||||
function remapClaudeCodeContentIndex(
|
||||
blocks: AssistantMessage["content"],
|
||||
contentIndex: number,
|
||||
): number {
|
||||
let visibleCount = 0;
|
||||
for (let i = 0; i <= contentIndex && i < blocks.length; i++) {
|
||||
if (isUserFacingClaudeCodeBlock(blocks[i]!)) visibleCount++;
|
||||
}
|
||||
return Math.max(0, visibleCount - 1);
|
||||
}
|
||||
|
||||
function sanitizeClaudeCodePartial(
|
||||
partial: AssistantMessage,
|
||||
): AssistantMessage {
|
||||
return {
|
||||
...partial,
|
||||
content: filterUserFacingClaudeCodeContent(partial.content),
|
||||
};
|
||||
}
|
||||
|
||||
export function sanitizeClaudeCodeStreamingEvent(
|
||||
event: AssistantMessageEvent,
|
||||
): AssistantMessageEvent | null {
|
||||
switch (event.type) {
|
||||
case "toolcall_start":
|
||||
case "toolcall_delta":
|
||||
case "toolcall_end":
|
||||
case "server_tool_use":
|
||||
case "web_search_result":
|
||||
return null;
|
||||
case "text_start":
|
||||
case "text_delta":
|
||||
case "text_end":
|
||||
case "thinking_start":
|
||||
case "thinking_delta":
|
||||
case "thinking_end":
|
||||
return {
|
||||
...event,
|
||||
contentIndex: remapClaudeCodeContentIndex(event.partial.content, event.contentIndex),
|
||||
partial: sanitizeClaudeCodePartial(event.partial),
|
||||
};
|
||||
default:
|
||||
return event;
|
||||
}
|
||||
}
|
||||
|
||||
export function buildFinalClaudeCodeContent(
|
||||
blocks: AssistantMessage["content"],
|
||||
lastThinkingContent: string,
|
||||
lastTextContent: string,
|
||||
resultText?: string,
|
||||
): AssistantMessage["content"] {
|
||||
const finalContent = filterUserFacingClaudeCodeContent(blocks);
|
||||
if (finalContent.length > 0) return finalContent;
|
||||
|
||||
if (lastThinkingContent) {
|
||||
finalContent.push({ type: "thinking", thinking: lastThinkingContent });
|
||||
}
|
||||
if (lastTextContent) {
|
||||
finalContent.push({ type: "text", text: lastTextContent });
|
||||
}
|
||||
if (finalContent.length === 0 && resultText) {
|
||||
finalContent.push({ type: "text", text: resultText });
|
||||
}
|
||||
return finalContent;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SDK options builder
|
||||
// ---------------------------------------------------------------------------
|
||||
|
|
@ -263,6 +199,110 @@ export function buildSdkOptions(modelId: string, prompt: string): Record<string,
|
|||
};
|
||||
}
|
||||
|
||||
function normalizeToolResultContent(content: unknown): ExternalToolResultContentBlock[] {
|
||||
if (typeof content === "string") {
|
||||
return [{ type: "text", text: content }];
|
||||
}
|
||||
|
||||
if (!Array.isArray(content)) {
|
||||
if (content == null) return [{ type: "text", text: "" }];
|
||||
return [{ type: "text", text: JSON.stringify(content) }];
|
||||
}
|
||||
|
||||
const blocks: ExternalToolResultContentBlock[] = [];
|
||||
|
||||
for (const item of content) {
|
||||
if (typeof item === "string") {
|
||||
blocks.push({ type: "text", text: item });
|
||||
continue;
|
||||
}
|
||||
if (!item || typeof item !== "object") {
|
||||
blocks.push({ type: "text", text: String(item) });
|
||||
continue;
|
||||
}
|
||||
|
||||
const block = item as Record<string, unknown>;
|
||||
if (block.type === "text") {
|
||||
blocks.push({ type: "text", text: typeof block.text === "string" ? block.text : "" });
|
||||
continue;
|
||||
}
|
||||
if (
|
||||
block.type === "image"
|
||||
&& typeof block.data === "string"
|
||||
&& typeof block.mimeType === "string"
|
||||
) {
|
||||
blocks.push({ type: "image", data: block.data, mimeType: block.mimeType });
|
||||
continue;
|
||||
}
|
||||
|
||||
blocks.push({ type: "text", text: JSON.stringify(block) });
|
||||
}
|
||||
|
||||
return blocks.length > 0 ? blocks : [{ type: "text", text: "" }];
|
||||
}
|
||||
|
||||
export function extractToolResultsFromSdkUserMessage(message: SDKUserMessage): Array<{
|
||||
toolUseId: string;
|
||||
result: ExternalToolResultPayload;
|
||||
}> {
|
||||
const extracted: Array<{ toolUseId: string; result: ExternalToolResultPayload }> = [];
|
||||
const seen = new Set<string>();
|
||||
const rawMessage = message.message as Record<string, unknown> | null | undefined;
|
||||
const content = Array.isArray(rawMessage?.content) ? rawMessage.content : [];
|
||||
|
||||
for (const item of content) {
|
||||
if (!item || typeof item !== "object") continue;
|
||||
const block = item as Record<string, unknown>;
|
||||
const type = typeof block.type === "string" ? block.type : "";
|
||||
if (type !== "tool_result" && type !== "mcp_tool_result") continue;
|
||||
|
||||
const toolUseId = typeof block.tool_use_id === "string" ? block.tool_use_id : "";
|
||||
if (!toolUseId || seen.has(toolUseId)) continue;
|
||||
seen.add(toolUseId);
|
||||
|
||||
extracted.push({
|
||||
toolUseId,
|
||||
result: {
|
||||
content: normalizeToolResultContent(block.content),
|
||||
details: {},
|
||||
isError: block.is_error === true,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
if (extracted.length === 0) {
|
||||
const fallback = message.tool_use_result;
|
||||
if (fallback && typeof fallback === "object") {
|
||||
const toolResult = fallback as Record<string, unknown>;
|
||||
const toolUseId = typeof toolResult.tool_use_id === "string" ? toolResult.tool_use_id : "";
|
||||
if (toolUseId) {
|
||||
extracted.push({
|
||||
toolUseId,
|
||||
result: {
|
||||
content: normalizeToolResultContent(toolResult.content),
|
||||
details: {},
|
||||
isError: toolResult.is_error === true,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return extracted;
|
||||
}
|
||||
|
||||
function attachExternalResultsToToolCalls(
|
||||
toolCalls: AssistantMessage["content"],
|
||||
toolResultsById: ReadonlyMap<string, ExternalToolResultPayload>,
|
||||
): void {
|
||||
for (const block of toolCalls) {
|
||||
if (block.type !== "toolCall") continue;
|
||||
const externalResult = toolResultsById.get(block.id);
|
||||
if (!externalResult) continue;
|
||||
(block as ToolCallWithExternalResult).externalResult = externalResult;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// streamSimple implementation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
|
@ -297,6 +337,10 @@ async function pumpSdkMessages(
|
|||
/** Track the last text content seen across all assistant turns for the final message. */
|
||||
let lastTextContent = "";
|
||||
let lastThinkingContent = "";
|
||||
/** Collect tool calls from intermediate SDK turns for tool_execution events. */
|
||||
const intermediateToolCalls: AssistantMessage["content"] = [];
|
||||
/** Preserve real external tool results from Claude Code's synthetic user messages. */
|
||||
const toolResultsById = new Map<string, ExternalToolResultPayload>();
|
||||
|
||||
try {
|
||||
// Dynamic import — the SDK is an optional dependency.
|
||||
|
|
@ -365,10 +409,9 @@ async function pumpSdkMessages(
|
|||
if (!builder) break;
|
||||
|
||||
const assistantEvent = builder.handleEvent(event);
|
||||
const sanitizedEvent = assistantEvent
|
||||
? sanitizeClaudeCodeStreamingEvent(assistantEvent)
|
||||
: null;
|
||||
if (sanitizedEvent) stream.push(sanitizedEvent);
|
||||
if (assistantEvent) {
|
||||
stream.push(assistantEvent);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
@ -396,9 +439,39 @@ async function pumpSdkMessages(
|
|||
lastTextContent = block.text;
|
||||
} else if (block.type === "thinking" && block.thinking) {
|
||||
lastThinkingContent = block.thinking;
|
||||
} else if (block.type === "toolCall") {
|
||||
// Collect tool calls for externalToolExecution rendering
|
||||
intermediateToolCalls.push(block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract tool results from the SDK's synthetic user message
|
||||
// and attach to corresponding tool call blocks immediately.
|
||||
for (const { toolUseId, result } of extractToolResultsFromSdkUserMessage(msg as SDKUserMessage)) {
|
||||
toolResultsById.set(toolUseId, result);
|
||||
}
|
||||
attachExternalResultsToToolCalls(intermediateToolCalls, toolResultsById);
|
||||
|
||||
// Push a synthetic toolcall_end for each tool call from this turn
|
||||
// so the TUI can render tool results in real-time during the SDK
|
||||
// session instead of waiting until the entire session completes.
|
||||
if (builder) {
|
||||
for (const block of builder.message.content) {
|
||||
if (block.type !== "toolCall") continue;
|
||||
const extResult = (block as ToolCallWithExternalResult).externalResult;
|
||||
if (!extResult) continue;
|
||||
// Push a toolcall_end with result attached so the chat-controller
|
||||
// can call updateResult on the pending ToolExecutionComponent.
|
||||
stream.push({
|
||||
type: "toolcall_end",
|
||||
contentIndex: builder.message.content.indexOf(block),
|
||||
toolCall: block,
|
||||
partial: builder.message,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
builder = null;
|
||||
break;
|
||||
}
|
||||
|
|
@ -406,12 +479,36 @@ async function pumpSdkMessages(
|
|||
// -- Result (terminal) --
|
||||
case "result": {
|
||||
const result = msg as SDKResultMessage;
|
||||
const finalContent = buildFinalClaudeCodeContent(
|
||||
builder?.message.content ?? [],
|
||||
lastThinkingContent,
|
||||
lastTextContent,
|
||||
result.subtype === "success" ? result.result : undefined,
|
||||
);
|
||||
|
||||
// Build final message. Include intermediate tool calls so the
|
||||
// agent loop's externalToolExecution path emits tool_execution
|
||||
// events for proper TUI rendering, followed by the text response.
|
||||
const finalContent: AssistantMessage["content"] = [];
|
||||
|
||||
// Add tool calls from intermediate turns first (renders above text)
|
||||
attachExternalResultsToToolCalls(intermediateToolCalls, toolResultsById);
|
||||
finalContent.push(...intermediateToolCalls);
|
||||
|
||||
// Add text/thinking from the last turn
|
||||
if (builder && builder.message.content.length > 0) {
|
||||
for (const block of builder.message.content) {
|
||||
if (block.type === "text" || block.type === "thinking") {
|
||||
finalContent.push(block);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (lastThinkingContent) {
|
||||
finalContent.push({ type: "thinking", thinking: lastThinkingContent });
|
||||
}
|
||||
if (lastTextContent) {
|
||||
finalContent.push({ type: "text", text: lastTextContent });
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: use the SDK's result text if we have no content
|
||||
if (finalContent.length === 0 && result.subtype === "success" && result.result) {
|
||||
finalContent.push({ type: "text", text: result.result });
|
||||
}
|
||||
|
||||
const finalMessage: AssistantMessage = {
|
||||
role: "assistant",
|
||||
|
|
|
|||
|
|
@ -4,15 +4,15 @@ import { mkdirSync, mkdtempSync, realpathSync, rmSync, writeFileSync } from "nod
|
|||
import { join, resolve } from "node:path";
|
||||
import { tmpdir } from "node:os";
|
||||
import {
|
||||
buildPromptFromContext,
|
||||
buildFinalClaudeCodeContent,
|
||||
buildSdkOptions,
|
||||
getClaudeLookupCommand,
|
||||
makeStreamExhaustedErrorMessage,
|
||||
buildPromptFromContext,
|
||||
buildSdkOptions,
|
||||
extractToolResultsFromSdkUserMessage,
|
||||
getClaudeLookupCommand,
|
||||
parseClaudeLookupOutput,
|
||||
sanitizeClaudeCodeStreamingEvent,
|
||||
} from "../stream-adapter.ts";
|
||||
import type { AssistantMessage, Context, Message } from "@gsd/pi-ai";
|
||||
import type { Context, Message } from "@gsd/pi-ai";
|
||||
import type { SDKUserMessage } from "../sdk-types.ts";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Existing tests — exhausted stream fallback (#2575)
|
||||
|
|
@ -108,6 +108,65 @@ describe("stream-adapter — full context prompt (#2859)", () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe("stream-adapter — Claude Code external tool results", () => {
|
||||
test("extractToolResultsFromSdkUserMessage maps tool_result content to tool payloads", () => {
|
||||
const message: SDKUserMessage = {
|
||||
type: "user",
|
||||
session_id: "sess-1",
|
||||
parent_tool_use_id: "tool-bash-1",
|
||||
message: {
|
||||
role: "user",
|
||||
content: [
|
||||
{
|
||||
type: "tool_result",
|
||||
tool_use_id: "tool-bash-1",
|
||||
content: "line 1\nline 2",
|
||||
is_error: false,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
const results = extractToolResultsFromSdkUserMessage(message);
|
||||
assert.deepEqual(results, [
|
||||
{
|
||||
toolUseId: "tool-bash-1",
|
||||
result: {
|
||||
content: [{ type: "text", text: "line 1\nline 2" }],
|
||||
details: {},
|
||||
isError: false,
|
||||
},
|
||||
},
|
||||
]);
|
||||
});
|
||||
|
||||
test("extractToolResultsFromSdkUserMessage falls back to tool_use_result", () => {
|
||||
const message: SDKUserMessage = {
|
||||
type: "user",
|
||||
session_id: "sess-1",
|
||||
parent_tool_use_id: "tool-read-1",
|
||||
message: { role: "user", content: [] },
|
||||
tool_use_result: {
|
||||
tool_use_id: "tool-read-1",
|
||||
content: "file contents",
|
||||
is_error: true,
|
||||
},
|
||||
};
|
||||
|
||||
const results = extractToolResultsFromSdkUserMessage(message);
|
||||
assert.deepEqual(results, [
|
||||
{
|
||||
toolUseId: "tool-read-1",
|
||||
result: {
|
||||
content: [{ type: "text", text: "file contents" }],
|
||||
details: {},
|
||||
isError: true,
|
||||
},
|
||||
},
|
||||
]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stream-adapter — session persistence (#2859)", () => {
|
||||
test("buildSdkOptions enables persistSession by default", () => {
|
||||
const options = buildSdkOptions("claude-sonnet-4-20250514", "test prompt");
|
||||
|
|
@ -149,18 +208,15 @@ describe("stream-adapter — session persistence (#2859)", () => {
|
|||
process.env.GSD_WORKFLOW_MCP_CWD = "/tmp/project";
|
||||
|
||||
const options = buildSdkOptions("claude-sonnet-4-20250514", "test");
|
||||
assert.deepEqual(options.mcpServers, {
|
||||
"gsd-workflow": {
|
||||
command: "node",
|
||||
args: ["packages/mcp-server/dist/cli.js"],
|
||||
env: {
|
||||
GSD_CLI_PATH: "/tmp/gsd",
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: "/tmp/project",
|
||||
},
|
||||
cwd: "/tmp/project",
|
||||
},
|
||||
});
|
||||
const mcpServers = options.mcpServers as Record<string, any>;
|
||||
assert.ok(mcpServers?.["gsd-workflow"], "expected gsd-workflow server config");
|
||||
const srv = mcpServers["gsd-workflow"];
|
||||
assert.equal(srv.command, "node");
|
||||
assert.deepEqual(srv.args, ["packages/mcp-server/dist/cli.js"]);
|
||||
assert.equal(srv.cwd, "/tmp/project");
|
||||
assert.equal(srv.env.GSD_CLI_PATH, "/tmp/gsd");
|
||||
assert.equal(srv.env.GSD_PERSIST_WRITE_GATE_STATE, "1");
|
||||
assert.equal(srv.env.GSD_WORKFLOW_PROJECT_ROOT, "/tmp/project");
|
||||
} finally {
|
||||
process.env.GSD_WORKFLOW_MCP_COMMAND = prev.GSD_WORKFLOW_MCP_COMMAND;
|
||||
process.env.GSD_WORKFLOW_MCP_NAME = prev.GSD_WORKFLOW_MCP_NAME;
|
||||
|
|
@ -170,7 +226,7 @@ describe("stream-adapter — session persistence (#2859)", () => {
|
|||
}
|
||||
});
|
||||
|
||||
test("buildSdkOptions omits workflow MCP server config when env is unset", () => {
|
||||
test("buildSdkOptions auto-discovers bundled MCP server even without env hints", () => {
|
||||
const prev = {
|
||||
GSD_WORKFLOW_MCP_COMMAND: process.env.GSD_WORKFLOW_MCP_COMMAND,
|
||||
GSD_WORKFLOW_MCP_NAME: process.env.GSD_WORKFLOW_MCP_NAME,
|
||||
|
|
@ -190,7 +246,13 @@ describe("stream-adapter — session persistence (#2859)", () => {
|
|||
process.chdir(emptyDir);
|
||||
const options = buildSdkOptions("claude-sonnet-4-20250514", "test");
|
||||
process.chdir(originalCwd);
|
||||
assert.equal((options as any).mcpServers, undefined);
|
||||
// The bundled CLI may or may not be discoverable depending on
|
||||
// whether the build output exists relative to import.meta.url.
|
||||
// Either outcome is valid — the key invariant is no crash.
|
||||
const mcpServers = (options as any).mcpServers;
|
||||
if (mcpServers) {
|
||||
assert.ok(mcpServers["gsd-workflow"], "if present, must be gsd-workflow");
|
||||
}
|
||||
rmSync(emptyDir, { recursive: true, force: true });
|
||||
} finally {
|
||||
process.env.GSD_WORKFLOW_MCP_COMMAND = prev.GSD_WORKFLOW_MCP_COMMAND;
|
||||
|
|
@ -227,18 +289,15 @@ describe("stream-adapter — session persistence (#2859)", () => {
|
|||
const resolvedRepoDir = realpathSync(repoDir);
|
||||
|
||||
const options = buildSdkOptions("claude-sonnet-4-20250514", "test");
|
||||
assert.deepEqual(options.mcpServers, {
|
||||
"gsd-workflow": {
|
||||
command: process.execPath,
|
||||
args: [realpathSync(resolve(repoDir, "packages", "mcp-server", "dist", "cli.js"))],
|
||||
env: {
|
||||
GSD_CLI_PATH: "/tmp/gsd",
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: resolvedRepoDir,
|
||||
},
|
||||
cwd: resolvedRepoDir,
|
||||
},
|
||||
});
|
||||
const mcpServers = options.mcpServers as Record<string, any>;
|
||||
assert.ok(mcpServers?.["gsd-workflow"], "expected gsd-workflow server config");
|
||||
const srv = mcpServers["gsd-workflow"];
|
||||
assert.equal(srv.command, process.execPath);
|
||||
assert.deepEqual(srv.args, [realpathSync(resolve(repoDir, "packages", "mcp-server", "dist", "cli.js"))]);
|
||||
assert.equal(srv.cwd, resolvedRepoDir);
|
||||
assert.equal(srv.env.GSD_CLI_PATH, "/tmp/gsd");
|
||||
assert.equal(srv.env.GSD_PERSIST_WRITE_GATE_STATE, "1");
|
||||
assert.equal(srv.env.GSD_WORKFLOW_PROJECT_ROOT, resolvedRepoDir);
|
||||
} finally {
|
||||
process.chdir(originalCwd);
|
||||
rmSync(repoDir, { recursive: true, force: true });
|
||||
|
|
@ -252,92 +311,6 @@ describe("stream-adapter — session persistence (#2859)", () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe("stream-adapter — final content filtering (#3861)", () => {
|
||||
test("buildFinalClaudeCodeContent strips intermediate tool calls from the final assistant message", () => {
|
||||
const finalContent = buildFinalClaudeCodeContent(
|
||||
[
|
||||
{ type: "toolCall", id: "tc_1", name: "Read", arguments: {} },
|
||||
{ type: "thinking", thinking: "Planning next step" },
|
||||
{ type: "text", text: "Done." },
|
||||
] as any,
|
||||
"",
|
||||
"",
|
||||
);
|
||||
|
||||
assert.deepEqual(finalContent, [
|
||||
{ type: "thinking", thinking: "Planning next step" },
|
||||
{ type: "text", text: "Done." },
|
||||
]);
|
||||
});
|
||||
|
||||
test("buildFinalClaudeCodeContent falls back to cached text when the final turn only had tool calls", () => {
|
||||
const finalContent = buildFinalClaudeCodeContent(
|
||||
[
|
||||
{ type: "toolCall", id: "tc_2", name: "Edit", arguments: { file_path: "app.ts" } },
|
||||
] as any,
|
||||
"",
|
||||
"User-facing answer",
|
||||
);
|
||||
|
||||
assert.deepEqual(finalContent, [{ type: "text", text: "User-facing answer" }]);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stream-adapter — streaming content filtering follow-up (#3867)", () => {
|
||||
function makePartial(content: AssistantMessage["content"]): AssistantMessage {
|
||||
return {
|
||||
role: "assistant",
|
||||
content,
|
||||
api: "anthropic-messages",
|
||||
provider: "claude-code",
|
||||
model: "claude-sonnet-4-20250514",
|
||||
usage: {
|
||||
input: 0,
|
||||
output: 0,
|
||||
cacheRead: 0,
|
||||
cacheWrite: 0,
|
||||
totalTokens: 0,
|
||||
cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },
|
||||
},
|
||||
stopReason: "stop",
|
||||
timestamp: Date.now(),
|
||||
};
|
||||
}
|
||||
|
||||
test("sanitizeClaudeCodeStreamingEvent strips tool calls from streamed partials and remaps contentIndex", () => {
|
||||
const event = sanitizeClaudeCodeStreamingEvent({
|
||||
type: "text_delta",
|
||||
contentIndex: 2,
|
||||
delta: "Done.",
|
||||
partial: makePartial([
|
||||
{ type: "toolCall", id: "tc_1", name: "ToolSearch", arguments: {} },
|
||||
{ type: "thinking", thinking: "Planning next step" },
|
||||
{ type: "text", text: "Done." },
|
||||
] as any),
|
||||
});
|
||||
|
||||
assert.ok(event, "text events should still be forwarded");
|
||||
assert.equal(event!.type, "text_delta");
|
||||
assert.equal((event! as any).contentIndex, 1);
|
||||
assert.deepEqual((event! as any).partial.content, [
|
||||
{ type: "thinking", thinking: "Planning next step" },
|
||||
{ type: "text", text: "Done." },
|
||||
]);
|
||||
});
|
||||
|
||||
test("sanitizeClaudeCodeStreamingEvent suppresses internal tool streaming events entirely", () => {
|
||||
const event = sanitizeClaudeCodeStreamingEvent({
|
||||
type: "toolcall_start",
|
||||
contentIndex: 0,
|
||||
partial: makePartial([
|
||||
{ type: "toolCall", id: "tc_1", name: "Bash", arguments: {} },
|
||||
] as any),
|
||||
});
|
||||
|
||||
assert.equal(event, null);
|
||||
});
|
||||
});
|
||||
|
||||
describe("stream-adapter — Windows Claude path lookup (#3770)", () => {
|
||||
test("getClaudeLookupCommand uses where on Windows", () => {
|
||||
assert.equal(getClaudeLookupCommand("win32"), "where claude");
|
||||
|
|
|
|||
|
|
@ -340,6 +340,21 @@ export async function bootstrapAutoSession(
|
|||
}
|
||||
}
|
||||
|
||||
if (ctx.model?.provider === "claude-code") {
|
||||
try {
|
||||
const { ensureProjectWorkflowMcpConfig } = await import("./mcp-project-config.js");
|
||||
const result = ensureProjectWorkflowMcpConfig(base);
|
||||
if (result.status !== "unchanged") {
|
||||
ctx.ui.notify(`Claude Code MCP prepared at ${result.configPath}`, "info");
|
||||
}
|
||||
} catch (err) {
|
||||
ctx.ui.notify(
|
||||
`Claude Code MCP prep failed: ${err instanceof Error ? err.message : String(err)}`,
|
||||
"warning",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize GitServiceImpl
|
||||
s.gitService = new GitServiceImpl(
|
||||
s.basePath,
|
||||
|
|
@ -909,4 +924,3 @@ export async function bootstrapAutoSession(
|
|||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,12 +7,15 @@
|
|||
* /gsd mcp — Overview of all servers (alias: /gsd mcp status)
|
||||
* /gsd mcp status — Same as bare /gsd mcp
|
||||
* /gsd mcp check <srv> — Detailed status for a specific server
|
||||
* /gsd mcp init [dir] — Write project-local GSD workflow MCP config
|
||||
*/
|
||||
|
||||
import type { ExtensionCommandContext } from "@gsd/pi-coding-agent";
|
||||
|
||||
import { existsSync, readFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { join, resolve } from "node:path";
|
||||
|
||||
import { ensureProjectWorkflowMcpConfig } from "./mcp-project-config.js";
|
||||
|
||||
// ─── Types ──────────────────────────────────────────────────────────────────
|
||||
|
||||
|
|
@ -28,6 +31,28 @@ export interface McpServerDetail extends McpServerStatus {
|
|||
tools: string[];
|
||||
}
|
||||
|
||||
export function formatMcpInitResult(
|
||||
status: "created" | "updated" | "unchanged",
|
||||
configPath: string,
|
||||
targetPath: string,
|
||||
): string {
|
||||
const summary =
|
||||
status === "created"
|
||||
? "Created project MCP config."
|
||||
: status === "updated"
|
||||
? "Updated project MCP config."
|
||||
: "Project MCP config is already up to date.";
|
||||
|
||||
return [
|
||||
summary,
|
||||
"",
|
||||
`Project: ${targetPath}`,
|
||||
`Config: ${configPath}`,
|
||||
"",
|
||||
"Claude Code can now load the GSD workflow MCP server from this folder.",
|
||||
].join("\n");
|
||||
}
|
||||
|
||||
// ─── Config reader (standalone — does not import mcp-client internals) ──────
|
||||
|
||||
interface McpServerRawConfig {
|
||||
|
|
@ -94,6 +119,7 @@ export function formatMcpStatusReport(servers: McpServerStatus[]): string {
|
|||
"No MCP servers configured.",
|
||||
"",
|
||||
"Add servers to .mcp.json or .gsd/mcp.json to enable MCP integrations.",
|
||||
"Tip: run /gsd mcp init . to write the local GSD workflow MCP config.",
|
||||
"See: https://modelcontextprotocol.io/quickstart",
|
||||
].join("\n");
|
||||
}
|
||||
|
|
@ -153,12 +179,31 @@ export async function handleMcpStatus(
|
|||
args: string,
|
||||
ctx: ExtensionCommandContext,
|
||||
): Promise<void> {
|
||||
const trimmed = args.trim().toLowerCase();
|
||||
const trimmed = args.trim();
|
||||
const lowered = trimmed.toLowerCase();
|
||||
const configs = readMcpConfigs();
|
||||
|
||||
// /gsd mcp init [dir]
|
||||
if (!lowered || lowered === "status") {
|
||||
// handled below
|
||||
} else if (lowered === "init" || lowered.startsWith("init ")) {
|
||||
const rawPath = trimmed.slice("init".length).trim();
|
||||
const targetPath = resolve(rawPath || ".");
|
||||
try {
|
||||
const result = ensureProjectWorkflowMcpConfig(targetPath);
|
||||
ctx.ui.notify(formatMcpInitResult(result.status, result.configPath, targetPath), "info");
|
||||
} catch (err) {
|
||||
ctx.ui.notify(
|
||||
`Failed to prepare MCP config for ${targetPath}: ${err instanceof Error ? err.message : String(err)}`,
|
||||
"error",
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// /gsd mcp check <server>
|
||||
if (trimmed.startsWith("check ")) {
|
||||
const serverName = args.trim().slice("check ".length).trim();
|
||||
if (lowered.startsWith("check ")) {
|
||||
const serverName = trimmed.slice("check ".length).trim();
|
||||
const config = configs.find((c) => c.name === serverName);
|
||||
if (!config) {
|
||||
const available = configs.map((c) => c.name).join(", ") || "(none)";
|
||||
|
|
@ -202,7 +247,7 @@ export async function handleMcpStatus(
|
|||
}
|
||||
|
||||
// /gsd mcp or /gsd mcp status
|
||||
if (!trimmed || trimmed === "status") {
|
||||
if (!lowered || lowered === "status") {
|
||||
// Build status for each server
|
||||
const statuses: McpServerStatus[] = [];
|
||||
|
||||
|
|
@ -239,9 +284,10 @@ export async function handleMcpStatus(
|
|||
|
||||
// Unknown subcommand
|
||||
ctx.ui.notify(
|
||||
"Usage: /gsd mcp [status|check <server>]\n\n" +
|
||||
"Usage: /gsd mcp [status|check <server>|init [dir]]\n\n" +
|
||||
" status Show all MCP server statuses (default)\n" +
|
||||
" check <server> Detailed status for a specific server",
|
||||
" check <server> Detailed status for a specific server\n" +
|
||||
" init [dir] Write .mcp.json for the local GSD workflow MCP server",
|
||||
"warning",
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ export const TOP_LEVEL_SUBCOMMANDS: readonly GsdCommandDefinition[] = [
|
|||
{ cmd: "templates", desc: "List available workflow templates" },
|
||||
{ cmd: "extensions", desc: "Manage extensions (list, enable, disable, info)" },
|
||||
{ cmd: "fast", desc: "Toggle OpenAI service tier (on/off/flex/status)" },
|
||||
{ cmd: "mcp", desc: "MCP server status and connectivity check (status, check <server>)" },
|
||||
{ cmd: "mcp", desc: "MCP server status, connectivity, and local config bootstrap (status, check, init)" },
|
||||
{ cmd: "rethink", desc: "Conversational project reorganization — reorder, park, discard, add milestones" },
|
||||
{ cmd: "workflow", desc: "Custom workflow lifecycle (new, run, list, validate, pause, resume)" },
|
||||
{ cmd: "codebase", desc: "Generate, refresh, and inspect the codebase map cache (.gsd/CODEBASE.md)" },
|
||||
|
|
@ -201,6 +201,7 @@ const NESTED_COMPLETIONS: CompletionMap = {
|
|||
mcp: [
|
||||
{ cmd: "status", desc: "Show all MCP server statuses (default)" },
|
||||
{ cmd: "check", desc: "Detailed status for a specific server" },
|
||||
{ cmd: "init", desc: "Write .mcp.json for the local GSD workflow MCP server" },
|
||||
],
|
||||
doctor: [
|
||||
{ cmd: "fix", desc: "Auto-fix detected issues" },
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ export function showHelp(ctx: ExtensionCommandContext): void {
|
|||
" /gsd hooks Show post-unit hook configuration",
|
||||
" /gsd extensions Manage extensions [list|enable|disable|info]",
|
||||
" /gsd fast Toggle OpenAI service tier [on|off|flex|status]",
|
||||
" /gsd mcp MCP server status and connectivity [status|check <server>]",
|
||||
" /gsd mcp MCP server status and connectivity [status|check <server>|init [dir]]",
|
||||
"",
|
||||
"MAINTENANCE",
|
||||
" /gsd doctor Diagnose and repair .gsd/ state [audit|fix|heal] [scope]",
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ import { deriveState, isMilestoneComplete } from "./state.js";
|
|||
import { listWorktrees, resolveGitDir, worktreesDir } from "./worktree-manager.js";
|
||||
import { abortAndReset } from "./git-self-heal.js";
|
||||
import { RUNTIME_EXCLUSION_PATHS, resolveMilestoneIntegrationBranch, writeIntegrationBranch } from "./git-service.js";
|
||||
import { nativeIsRepo, nativeWorktreeList, nativeWorktreeRemove, nativeBranchList, nativeBranchDelete, nativeLsFiles, nativeRmCached, nativeHasChanges, nativeLastCommitEpoch, nativeGetCurrentBranch, nativeAddAllWithExclusions, nativeCommit } from "./native-git-bridge.js";
|
||||
import { nativeIsRepo, nativeWorktreeList, nativeWorktreeRemove, nativeBranchList, nativeBranchDelete, nativeLsFiles, nativeRmCached, nativeHasChanges, nativeLastCommitEpoch, nativeGetCurrentBranch, nativeAddTracked, nativeCommit } from "./native-git-bridge.js";
|
||||
import { getAllWorktreeHealth } from "./worktree-health.js";
|
||||
import { loadEffectiveGSDPreferences } from "./preferences.js";
|
||||
|
||||
|
|
@ -386,19 +386,19 @@ export async function checkGitHealth(
|
|||
code: "stale_uncommitted_changes",
|
||||
scope: "project",
|
||||
unitId: "project",
|
||||
message: `Uncommitted changes detected with no commit in ${mins} minute${mins === 1 ? "" : "s"} (threshold: ${thresholdMinutes}m). Snapshotting uncommitted changes.`,
|
||||
message: `Uncommitted changes detected with no commit in ${mins} minute${mins === 1 ? "" : "s"} (threshold: ${thresholdMinutes}m). Snapshotting tracked files.`,
|
||||
fixable: true,
|
||||
});
|
||||
|
||||
if (shouldFix("stale_uncommitted_changes")) {
|
||||
try {
|
||||
nativeAddAllWithExclusions(basePath, RUNTIME_EXCLUSION_PATHS);
|
||||
nativeAddTracked(basePath);
|
||||
const commitMsg = `gsd snapshot: uncommitted changes after ${mins}m inactivity`;
|
||||
const result = nativeCommit(basePath, commitMsg);
|
||||
if (result) {
|
||||
fixesApplied.push(`created gsd snapshot after ${mins}m of uncommitted changes`);
|
||||
} else {
|
||||
fixesApplied.push("gsd snapshot skipped — nothing to commit after staging changes");
|
||||
fixesApplied.push("gsd snapshot skipped — nothing to commit after staging tracked files");
|
||||
}
|
||||
} catch {
|
||||
fixesApplied.push("failed to create gsd snapshot commit");
|
||||
|
|
|
|||
|
|
@ -21,8 +21,8 @@ import { readCrashLock, isLockProcessAlive, clearLock } from "./crash-recovery.j
|
|||
import { abortAndReset } from "./git-self-heal.js";
|
||||
import { rebuildState } from "./doctor.js";
|
||||
import { deriveState } from "./state.js";
|
||||
import { RUNTIME_EXCLUSION_PATHS, resolveMilestoneIntegrationBranch } from "./git-service.js";
|
||||
import { nativeIsRepo, nativeHasChanges, nativeLastCommitEpoch, nativeGetCurrentBranch, nativeAddAllWithExclusions, nativeCommit } from "./native-git-bridge.js";
|
||||
import { resolveMilestoneIntegrationBranch } from "./git-service.js";
|
||||
import { nativeIsRepo, nativeHasChanges, nativeLastCommitEpoch, nativeGetCurrentBranch, nativeAddTracked, nativeCommit } from "./native-git-bridge.js";
|
||||
import { loadEffectiveGSDPreferences } from "./preferences.js";
|
||||
import { runEnvironmentChecks } from "./doctor-environment.js";
|
||||
|
||||
|
|
@ -312,7 +312,7 @@ export async function preDispatchHealthGate(basePath: string): Promise<PreDispat
|
|||
if (minutesSinceCommit >= thresholdMinutes) {
|
||||
const mins = Math.floor(minutesSinceCommit);
|
||||
try {
|
||||
nativeAddAllWithExclusions(basePath, RUNTIME_EXCLUSION_PATHS);
|
||||
nativeAddTracked(basePath);
|
||||
const commitMsg = `gsd snapshot: pre-dispatch, uncommitted changes after ${mins}m inactivity`;
|
||||
const result = nativeCommit(basePath, commitMsg);
|
||||
if (result) {
|
||||
|
|
|
|||
|
|
@ -238,15 +238,11 @@ export async function showProjectInit(
|
|||
// Initialize SQLite database so GSD starts in full-capability mode (#3880).
|
||||
// Without this, isDbAvailable() returns false and GSD enters degraded
|
||||
// markdown-only mode until a tool handler happens to call ensureDbOpen().
|
||||
let dbReady = false;
|
||||
try {
|
||||
const { ensureDbOpen } = await import("./bootstrap/dynamic-tools.js");
|
||||
dbReady = await ensureDbOpen(basePath);
|
||||
await ensureDbOpen(basePath);
|
||||
} catch {
|
||||
// Swallowed — warning surfaced below
|
||||
}
|
||||
if (!dbReady) {
|
||||
ctx.ui.notify("Warning: database initialization failed — GSD will run in degraded mode until the next /gsd invocation.", "warning");
|
||||
// Non-fatal — DB creation failure should not block project init
|
||||
}
|
||||
|
||||
// Ensure .gitignore
|
||||
|
|
@ -267,7 +263,6 @@ export async function showProjectInit(
|
|||
// Write initial STATE.md so it exists before the first /gsd invocation.
|
||||
// The explicit /gsd init path (ops.ts) returns without entering showSmartEntry(),
|
||||
// which would otherwise generate STATE.md at guided-flow.ts:1358.
|
||||
let stateReady = false;
|
||||
try {
|
||||
const { deriveState } = await import("./state.js");
|
||||
const { buildStateMarkdown } = await import("./doctor.js");
|
||||
|
|
@ -275,12 +270,23 @@ export async function showProjectInit(
|
|||
const { resolveGsdRootFile } = await import("./paths.js");
|
||||
const state = await deriveState(basePath);
|
||||
await saveFile(resolveGsdRootFile(basePath, "STATE"), buildStateMarkdown(state));
|
||||
stateReady = true;
|
||||
} catch {
|
||||
// Swallowed — warning surfaced below
|
||||
// Non-fatal — STATE.md will be regenerated on next /gsd invocation
|
||||
}
|
||||
if (!stateReady) {
|
||||
ctx.ui.notify("Warning: initial STATE.md generation failed — it will be created on the next /gsd invocation.", "warning");
|
||||
|
||||
if (ctx.model?.provider === "claude-code") {
|
||||
try {
|
||||
const { ensureProjectWorkflowMcpConfig } = await import("./mcp-project-config.js");
|
||||
const result = ensureProjectWorkflowMcpConfig(basePath);
|
||||
if (result.status !== "unchanged") {
|
||||
ctx.ui.notify(`Claude Code MCP prepared at ${result.configPath}`, "info");
|
||||
}
|
||||
} catch (err) {
|
||||
ctx.ui.notify(
|
||||
`Claude Code MCP prep failed: ${err instanceof Error ? err.message : String(err)}`,
|
||||
"warning",
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
ctx.ui.notify("GSD initialized. Starting your first milestone...", "info");
|
||||
|
|
|
|||
128
src/resources/extensions/gsd/mcp-project-config.ts
Normal file
128
src/resources/extensions/gsd/mcp-project-config.ts
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
import { existsSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
import { assertSafeDirectory } from "./validate-directory.js";
|
||||
import { detectWorkflowMcpLaunchConfig } from "./workflow-mcp.js";
|
||||
|
||||
export const GSD_WORKFLOW_MCP_SERVER_NAME = "gsd-workflow";
|
||||
|
||||
export interface ProjectMcpServerConfig {
|
||||
command?: string;
|
||||
args?: string[];
|
||||
cwd?: string;
|
||||
env?: Record<string, string>;
|
||||
url?: string;
|
||||
}
|
||||
|
||||
export interface EnsureProjectWorkflowMcpConfigResult {
|
||||
configPath: string;
|
||||
serverName: string;
|
||||
status: "created" | "updated" | "unchanged";
|
||||
}
|
||||
|
||||
interface McpConfigFile {
|
||||
mcpServers?: Record<string, ProjectMcpServerConfig>;
|
||||
servers?: Record<string, ProjectMcpServerConfig>;
|
||||
[key: string]: unknown;
|
||||
}
|
||||
|
||||
export function resolveBundledGsdCliPath(env: NodeJS.ProcessEnv = process.env): string | null {
|
||||
const explicit = env.GSD_CLI_PATH?.trim() || env.GSD_BIN_PATH?.trim();
|
||||
if (explicit) return explicit;
|
||||
|
||||
const candidates = [
|
||||
resolve(fileURLToPath(new URL("../../../../scripts/dev-cli.js", import.meta.url))),
|
||||
resolve(fileURLToPath(new URL("../../../../dist/loader.js", import.meta.url))),
|
||||
resolve(fileURLToPath(new URL("../../../loader.js", import.meta.url))),
|
||||
];
|
||||
|
||||
for (const candidate of candidates) {
|
||||
if (existsSync(candidate)) return candidate;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
export function buildProjectWorkflowMcpServerConfig(
|
||||
projectRoot: string,
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): ProjectMcpServerConfig {
|
||||
const resolvedProjectRoot = resolve(projectRoot);
|
||||
const gsdCliPath = resolveBundledGsdCliPath(env);
|
||||
const launch = detectWorkflowMcpLaunchConfig(resolvedProjectRoot, {
|
||||
...env,
|
||||
...(gsdCliPath ? { GSD_CLI_PATH: gsdCliPath, GSD_BIN_PATH: gsdCliPath } : {}),
|
||||
});
|
||||
|
||||
if (!launch) {
|
||||
throw new Error(
|
||||
"Unable to resolve the GSD workflow MCP server. Build this checkout or install gsd-mcp-server on PATH.",
|
||||
);
|
||||
}
|
||||
|
||||
return {
|
||||
command: launch.command,
|
||||
...(launch.args && launch.args.length > 0 ? { args: launch.args } : {}),
|
||||
...(launch.cwd ? { cwd: launch.cwd } : {}),
|
||||
...(launch.env ? { env: launch.env } : {}),
|
||||
};
|
||||
}
|
||||
|
||||
function readExistingConfig(configPath: string): McpConfigFile {
|
||||
if (!existsSync(configPath)) return {};
|
||||
|
||||
const raw = readFileSync(configPath, "utf-8");
|
||||
try {
|
||||
const parsed = JSON.parse(raw) as McpConfigFile;
|
||||
return parsed && typeof parsed === "object" ? parsed : {};
|
||||
} catch (err) {
|
||||
throw new Error(
|
||||
`Failed to parse ${configPath}: ${err instanceof Error ? err.message : String(err)}`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
export function ensureProjectWorkflowMcpConfig(
|
||||
projectRoot: string,
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
): EnsureProjectWorkflowMcpConfigResult {
|
||||
const resolvedProjectRoot = resolve(projectRoot);
|
||||
assertSafeDirectory(resolvedProjectRoot);
|
||||
|
||||
const configPath = resolve(resolvedProjectRoot, ".mcp.json");
|
||||
const existing = readExistingConfig(configPath);
|
||||
const desiredServer = buildProjectWorkflowMcpServerConfig(resolvedProjectRoot, env);
|
||||
const previousServers = existing.mcpServers ?? {};
|
||||
const nextServers = {
|
||||
...previousServers,
|
||||
[GSD_WORKFLOW_MCP_SERVER_NAME]: desiredServer,
|
||||
};
|
||||
|
||||
const alreadyPresent = existsSync(configPath);
|
||||
const unchanged =
|
||||
JSON.stringify(previousServers[GSD_WORKFLOW_MCP_SERVER_NAME] ?? null)
|
||||
=== JSON.stringify(desiredServer)
|
||||
&& existing.mcpServers !== undefined;
|
||||
|
||||
if (unchanged) {
|
||||
return {
|
||||
configPath,
|
||||
serverName: GSD_WORKFLOW_MCP_SERVER_NAME,
|
||||
status: "unchanged",
|
||||
};
|
||||
}
|
||||
|
||||
const nextConfig: McpConfigFile = {
|
||||
...existing,
|
||||
mcpServers: nextServers,
|
||||
};
|
||||
|
||||
writeFileSync(configPath, `${JSON.stringify(nextConfig, null, 2)}\n`, "utf-8");
|
||||
|
||||
return {
|
||||
configPath,
|
||||
serverName: GSD_WORKFLOW_MCP_SERVER_NAME,
|
||||
status: alreadyPresent ? "updated" : "created",
|
||||
};
|
||||
}
|
||||
|
|
@ -1,121 +0,0 @@
|
|||
/**
|
||||
* GSD Init Wizard — Bootstrap completeness regression tests
|
||||
*
|
||||
* Regression test for #3880 — fresh install never creates gsd.db.
|
||||
*
|
||||
* The init wizard must create all artifacts needed for full-capability
|
||||
* mode: gsd.db (via ensureDbOpen), runtime/ directory, and STATE.md
|
||||
* (via deriveState + buildStateMarkdown). Without these, GSD enters
|
||||
* degraded markdown-only mode on every fresh install.
|
||||
*
|
||||
* These are structural tests that verify the init-wizard.ts source
|
||||
* contains the required calls in the correct order.
|
||||
*/
|
||||
|
||||
import { describe, test } from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { readFileSync } from "node:fs";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import { dirname, join } from "node:path";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = dirname(__filename);
|
||||
|
||||
const wizardSrc = readFileSync(
|
||||
join(__dirname, "..", "init-wizard.ts"),
|
||||
"utf-8",
|
||||
);
|
||||
|
||||
describe("init-wizard bootstrap completeness (#3880)", () => {
|
||||
// ── Gap 1: gsd.db must be created during init ─────────────────────────
|
||||
|
||||
test("bootstrapGsdDirectory is followed by ensureDbOpen", () => {
|
||||
const bootstrapIdx = wizardSrc.indexOf("bootstrapGsdDirectory(basePath");
|
||||
const ensureDbIdx = wizardSrc.indexOf("ensureDbOpen(basePath)");
|
||||
assert.ok(bootstrapIdx > -1, "bootstrapGsdDirectory call should exist");
|
||||
assert.ok(ensureDbIdx > -1, "ensureDbOpen(basePath) call should exist");
|
||||
assert.ok(
|
||||
ensureDbIdx > bootstrapIdx,
|
||||
"ensureDbOpen must appear after bootstrapGsdDirectory so .gsd/ exists first",
|
||||
);
|
||||
});
|
||||
|
||||
test("ensureDbOpen is imported from dynamic-tools", () => {
|
||||
assert.match(
|
||||
wizardSrc,
|
||||
/import.*dynamic-tools/,
|
||||
"init-wizard should import from dynamic-tools for ensureDbOpen",
|
||||
);
|
||||
});
|
||||
|
||||
// ── Gap 2: runtime/ directory must be created during init ──────────────
|
||||
|
||||
test("bootstrapGsdDirectory creates runtime/ directory", () => {
|
||||
// Find the bootstrapGsdDirectory function body
|
||||
const fnStart = wizardSrc.indexOf("function bootstrapGsdDirectory(");
|
||||
assert.ok(fnStart > -1, "bootstrapGsdDirectory function should exist");
|
||||
|
||||
// Find the next function definition to bound the search
|
||||
const fnBody = wizardSrc.slice(fnStart, wizardSrc.indexOf("\nfunction ", fnStart + 1));
|
||||
|
||||
assert.match(
|
||||
fnBody,
|
||||
/mkdirSync\(.*"runtime"/,
|
||||
'bootstrapGsdDirectory should create "runtime" directory',
|
||||
);
|
||||
});
|
||||
|
||||
// ── Gap 3: STATE.md must be written during init ────────────────────────
|
||||
|
||||
test("showProjectInit generates STATE.md after bootstrap", () => {
|
||||
const bootstrapIdx = wizardSrc.indexOf("bootstrapGsdDirectory(basePath");
|
||||
const deriveIdx = wizardSrc.indexOf("deriveState(basePath)");
|
||||
const stateIdx = wizardSrc.indexOf("buildStateMarkdown(state)");
|
||||
const saveIdx = wizardSrc.indexOf('resolveGsdRootFile(basePath, "STATE")');
|
||||
|
||||
assert.ok(deriveIdx > -1, "deriveState call should exist in init-wizard");
|
||||
assert.ok(stateIdx > -1, "buildStateMarkdown call should exist in init-wizard");
|
||||
assert.ok(saveIdx > -1, "resolveGsdRootFile STATE call should exist in init-wizard");
|
||||
assert.ok(
|
||||
deriveIdx > bootstrapIdx,
|
||||
"deriveState must appear after bootstrapGsdDirectory",
|
||||
);
|
||||
});
|
||||
|
||||
// ── Ordering: DB must be open before deriveState ───────────────────────
|
||||
|
||||
test("ensureDbOpen appears before deriveState", () => {
|
||||
const ensureDbIdx = wizardSrc.indexOf("ensureDbOpen(basePath)");
|
||||
const deriveIdx = wizardSrc.indexOf("deriveState(basePath)");
|
||||
assert.ok(ensureDbIdx > -1, "ensureDbOpen should exist");
|
||||
assert.ok(deriveIdx > -1, "deriveState should exist");
|
||||
assert.ok(
|
||||
ensureDbIdx < deriveIdx,
|
||||
"ensureDbOpen must appear before deriveState so DB is ready for state derivation",
|
||||
);
|
||||
});
|
||||
|
||||
// ── Failure visibility: user must be warned on partial bootstrap ───────
|
||||
|
||||
test("ensureDbOpen failure surfaces a warning to the user", () => {
|
||||
assert.match(
|
||||
wizardSrc,
|
||||
/if\s*\(\s*!dbReady\s*\)/,
|
||||
"init-wizard should check dbReady and warn the user on failure",
|
||||
);
|
||||
// The warning must reference degraded mode so the user knows what happened
|
||||
assert.match(
|
||||
wizardSrc,
|
||||
/degraded mode/,
|
||||
"DB failure warning should mention degraded mode",
|
||||
);
|
||||
});
|
||||
|
||||
test("STATE.md failure surfaces a warning to the user", () => {
|
||||
assert.match(
|
||||
wizardSrc,
|
||||
/if\s*\(\s*!stateReady\s*\)/,
|
||||
"init-wizard should check stateReady and warn the user on failure",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
@ -661,10 +661,9 @@ describe('doctor-git', async () => {
|
|||
env: { ...process.env, GIT_COMMITTER_DATE: pastDate },
|
||||
});
|
||||
|
||||
// Modify a tracked file and create a new untracked file. The snapshot
|
||||
// must preserve both, not just tracked changes.
|
||||
// Modify an already-tracked file (nativeAddTracked uses git add -u,
|
||||
// which only stages tracked files — new untracked files are not staged)
|
||||
writeFileSync(join(dir, "README.md"), "# test\nmodified content\n");
|
||||
writeFileSync(join(dir, "new-untracked.ts"), "export const preserved = true;\n");
|
||||
|
||||
const detect = await runGSDDoctor(dir);
|
||||
const staleIssues = detect.issues.filter(i => i.code === "stale_uncommitted_changes");
|
||||
|
|
@ -682,12 +681,6 @@ describe('doctor-git', async () => {
|
|||
// Verify the snapshot commit was created with the gsd snapshot tag
|
||||
const log = run("git log -1 --oneline", dir);
|
||||
assert.ok(log.includes("gsd snapshot"), "commit is tagged with gsd snapshot");
|
||||
|
||||
const files = run("git show --name-only --format= HEAD", dir);
|
||||
assert.ok(files.includes("README.md"), "snapshot keeps tracked modifications");
|
||||
assert.ok(files.includes("new-untracked.ts"), "snapshot also includes new untracked files");
|
||||
const status = run("git status --short", dir);
|
||||
assert.ok(!status.includes("new-untracked.ts"), "snapshot does not leave the new source file untracked");
|
||||
});
|
||||
|
||||
// ─── Test: stale_uncommitted_changes NOT flagged when recent commit ──
|
||||
|
|
|
|||
|
|
@ -219,39 +219,6 @@ describe('doctor-proactive', async () => {
|
|||
assert.ok(result.fixesApplied.some((f: string) => f.includes("STATE.md")), "reports STATE.md status as info");
|
||||
});
|
||||
|
||||
test('health gate: pre-dispatch snapshot includes new untracked files', async () => {
|
||||
const dir = createRepoWithActiveMilestone();
|
||||
cleanups.push(dir);
|
||||
|
||||
const pastDate = new Date(Date.now() - 45 * 60 * 1000).toISOString();
|
||||
run(`git commit --amend --no-edit --date="${pastDate}"`, dir);
|
||||
execSync(`git commit --amend --no-edit`, {
|
||||
cwd: dir,
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
encoding: "utf-8",
|
||||
env: { ...process.env, GIT_COMMITTER_DATE: pastDate },
|
||||
});
|
||||
|
||||
writeFileSync(join(dir, "README.md"), "# test\nmodified content\n");
|
||||
writeFileSync(join(dir, "new-untracked.ts"), "export const preserved = true;\n");
|
||||
|
||||
const result = await preDispatchHealthGate(dir);
|
||||
assert.ok(result.proceed, "dispatch still proceeds after snapshotting");
|
||||
assert.ok(
|
||||
result.fixesApplied.some((f: string) => f.includes("gsd snapshot")),
|
||||
"pre-dispatch gate creates a snapshot commit",
|
||||
);
|
||||
|
||||
const log = run("git log -1 --oneline", dir);
|
||||
assert.ok(log.includes("gsd snapshot"), "snapshot commit is created");
|
||||
|
||||
const files = run("git show --name-only --format= HEAD", dir);
|
||||
assert.ok(files.includes("README.md"), "snapshot keeps tracked modifications");
|
||||
assert.ok(files.includes("new-untracked.ts"), "snapshot also includes new untracked files");
|
||||
const status = run("git status --short", dir);
|
||||
assert.ok(!status.includes("new-untracked.ts"), "snapshot does not leave the new source file untracked");
|
||||
});
|
||||
|
||||
test('health gate: stale crash lock auto-cleared', async () => {
|
||||
const dir = realpathSync(mkdtempSync(join(tmpdir(), "doc-proactive-")));
|
||||
cleanups.push(dir);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,85 @@
|
|||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { existsSync, mkdtempSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { tmpdir } from "node:os";
|
||||
|
||||
import {
|
||||
ensureProjectWorkflowMcpConfig,
|
||||
GSD_WORKFLOW_MCP_SERVER_NAME,
|
||||
} from "../mcp-project-config.ts";
|
||||
|
||||
test("ensureProjectWorkflowMcpConfig creates .mcp.json with the workflow server", () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), "gsd-mcp-init-"));
|
||||
mkdirSync(join(projectRoot, ".gsd"), { recursive: true });
|
||||
|
||||
try {
|
||||
const result = ensureProjectWorkflowMcpConfig(projectRoot);
|
||||
assert.equal(result.status, "created");
|
||||
assert.equal(existsSync(result.configPath), true);
|
||||
|
||||
const parsed = JSON.parse(readFileSync(result.configPath, "utf-8")) as {
|
||||
mcpServers?: Record<string, { command?: string; args?: string[]; env?: Record<string, string> }>;
|
||||
};
|
||||
const server = parsed.mcpServers?.[GSD_WORKFLOW_MCP_SERVER_NAME];
|
||||
assert.ok(server, "workflow server should be written to mcpServers");
|
||||
assert.equal(typeof server?.command, "string");
|
||||
assert.equal(Array.isArray(server?.args), true);
|
||||
assert.equal(server?.env?.GSD_WORKFLOW_PROJECT_ROOT, projectRoot);
|
||||
assert.match(server?.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.js$/);
|
||||
assert.match(server?.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.js$/);
|
||||
} finally {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("ensureProjectWorkflowMcpConfig preserves existing mcp servers", () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), "gsd-mcp-init-"));
|
||||
mkdirSync(join(projectRoot, ".gsd"), { recursive: true });
|
||||
const configPath = join(projectRoot, ".mcp.json");
|
||||
|
||||
writeFileSync(
|
||||
configPath,
|
||||
`${JSON.stringify({
|
||||
mcpServers: {
|
||||
railway: {
|
||||
command: "npx",
|
||||
args: ["railway-mcp"],
|
||||
},
|
||||
},
|
||||
}, null, 2)}\n`,
|
||||
"utf-8",
|
||||
);
|
||||
|
||||
try {
|
||||
const result = ensureProjectWorkflowMcpConfig(projectRoot);
|
||||
assert.equal(result.status, "updated");
|
||||
|
||||
const parsed = JSON.parse(readFileSync(configPath, "utf-8")) as {
|
||||
mcpServers?: Record<string, { command?: string; args?: string[] }>;
|
||||
};
|
||||
assert.deepEqual(parsed.mcpServers?.railway, {
|
||||
command: "npx",
|
||||
args: ["railway-mcp"],
|
||||
});
|
||||
assert.ok(parsed.mcpServers?.[GSD_WORKFLOW_MCP_SERVER_NAME]);
|
||||
} finally {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("ensureProjectWorkflowMcpConfig is idempotent when config is already current", () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), "gsd-mcp-init-"));
|
||||
mkdirSync(join(projectRoot, ".gsd"), { recursive: true });
|
||||
|
||||
try {
|
||||
const first = ensureProjectWorkflowMcpConfig(projectRoot);
|
||||
const second = ensureProjectWorkflowMcpConfig(projectRoot);
|
||||
|
||||
assert.equal(first.status, "created");
|
||||
assert.equal(second.status, "unchanged");
|
||||
assert.equal(first.configPath, second.configPath);
|
||||
} finally {
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
|
@ -2,6 +2,7 @@ import test, { describe } from "node:test";
|
|||
import assert from "node:assert/strict";
|
||||
|
||||
import {
|
||||
formatMcpInitResult,
|
||||
formatMcpStatusReport,
|
||||
formatMcpServerDetail,
|
||||
type McpServerStatus,
|
||||
|
|
@ -101,3 +102,17 @@ describe("formatMcpServerDetail", () => {
|
|||
assert.match(result, /disconnected/i);
|
||||
});
|
||||
});
|
||||
|
||||
describe("formatMcpInitResult", () => {
|
||||
test("shows created message with config path", () => {
|
||||
const result = formatMcpInitResult("created", "/tmp/project/.mcp.json", "/tmp/project");
|
||||
assert.match(result, /created project mcp config/i);
|
||||
assert.match(result, /\/tmp\/project\/\.mcp\.json/);
|
||||
assert.match(result, /claude code/i);
|
||||
});
|
||||
|
||||
test("shows unchanged message when config is current", () => {
|
||||
const result = formatMcpInitResult("unchanged", "/tmp/project/.mcp.json", "/tmp/project");
|
||||
assert.match(result, /already up to date/i);
|
||||
});
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { mkdtempSync, mkdirSync, readFileSync, writeFileSync } from "node:fs";
|
||||
import { existsSync, mkdtempSync, mkdirSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { dirname, join } from "node:path";
|
||||
import { tmpdir } from "node:os";
|
||||
import { fileURLToPath } from "node:url";
|
||||
import { Client } from "@modelcontextprotocol/sdk/client/index.js";
|
||||
import { StdioClientTransport } from "@modelcontextprotocol/sdk/client/stdio.js";
|
||||
|
||||
import {
|
||||
buildWorkflowMcpServers,
|
||||
|
|
@ -44,13 +46,14 @@ test("detectWorkflowMcpLaunchConfig prefers explicit env override", () => {
|
|||
command: "node",
|
||||
args: ["dist/cli.js"],
|
||||
cwd: "/tmp/project",
|
||||
env: {
|
||||
FOO: "bar",
|
||||
GSD_CLI_PATH: "/tmp/gsd",
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: "/tmp/project",
|
||||
},
|
||||
env: launch?.env,
|
||||
});
|
||||
assert.equal(launch?.env?.FOO, "bar");
|
||||
assert.equal(launch?.env?.GSD_CLI_PATH, "/tmp/gsd");
|
||||
assert.equal(launch?.env?.GSD_PERSIST_WRITE_GATE_STATE, "1");
|
||||
assert.equal(launch?.env?.GSD_WORKFLOW_PROJECT_ROOT, "/tmp/project");
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.(js|ts)$/);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.(js|ts)$/);
|
||||
});
|
||||
|
||||
test("buildWorkflowMcpServers mirrors explicit launch config", () => {
|
||||
|
|
@ -63,12 +66,13 @@ test("buildWorkflowMcpServers mirrors explicit launch config", () => {
|
|||
"gsd-workflow": {
|
||||
command: "node",
|
||||
args: ["dist/cli.js"],
|
||||
env: {
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: "/tmp/project",
|
||||
},
|
||||
env: servers?.["gsd-workflow"]?.env,
|
||||
},
|
||||
});
|
||||
assert.equal((servers?.["gsd-workflow"]?.env as Record<string, string> | undefined)?.GSD_PERSIST_WRITE_GATE_STATE, "1");
|
||||
assert.equal((servers?.["gsd-workflow"]?.env as Record<string, string> | undefined)?.GSD_WORKFLOW_PROJECT_ROOT, "/tmp/project");
|
||||
assert.match((servers?.["gsd-workflow"]?.env as Record<string, string> | undefined)?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.(js|ts)$/);
|
||||
assert.match((servers?.["gsd-workflow"]?.env as Record<string, string> | undefined)?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.(js|ts)$/);
|
||||
});
|
||||
|
||||
test("detectWorkflowMcpLaunchConfig resolves the bundled server from GSD_PROJECT_ROOT", () => {
|
||||
|
|
@ -88,11 +92,41 @@ test("detectWorkflowMcpLaunchConfig resolves the bundled server from GSD_PROJECT
|
|||
command: process.execPath,
|
||||
args: [cliPath],
|
||||
cwd: repoRoot,
|
||||
env: {
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: repoRoot,
|
||||
},
|
||||
env: launch?.env,
|
||||
});
|
||||
assert.equal(launch?.env?.GSD_PERSIST_WRITE_GATE_STATE, "1");
|
||||
assert.equal(launch?.env?.GSD_WORKFLOW_PROJECT_ROOT, repoRoot);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.(js|ts)$/);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.(js|ts)$/);
|
||||
});
|
||||
|
||||
test("detectWorkflowMcpLaunchConfig resolves the bundled server from GSD_BIN_PATH ancestry", () => {
|
||||
const repoRoot = mkdtempSync(join(tmpdir(), "gsd-workflow-root-"));
|
||||
const worktreeRoot = mkdtempSync(join(tmpdir(), "gsd-workflow-worktree-"));
|
||||
const cliPath = join(repoRoot, "packages", "mcp-server", "dist", "cli.js");
|
||||
const devCliPath = join(repoRoot, "scripts", "dev-cli.js");
|
||||
|
||||
mkdirSync(join(repoRoot, "packages", "mcp-server", "dist"), { recursive: true });
|
||||
mkdirSync(join(repoRoot, "scripts"), { recursive: true });
|
||||
writeFileSync(cliPath, "#!/usr/bin/env node\n", "utf-8");
|
||||
writeFileSync(devCliPath, "#!/usr/bin/env node\n", "utf-8");
|
||||
|
||||
const launch = detectWorkflowMcpLaunchConfig(worktreeRoot, {
|
||||
GSD_BIN_PATH: devCliPath,
|
||||
});
|
||||
|
||||
assert.deepEqual(launch, {
|
||||
name: "gsd-workflow",
|
||||
command: process.execPath,
|
||||
args: [cliPath],
|
||||
cwd: worktreeRoot,
|
||||
env: launch?.env,
|
||||
});
|
||||
assert.equal(launch?.env?.GSD_CLI_PATH, devCliPath);
|
||||
assert.equal(launch?.env?.GSD_PERSIST_WRITE_GATE_STATE, "1");
|
||||
assert.equal(launch?.env?.GSD_WORKFLOW_PROJECT_ROOT, worktreeRoot);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.(js|ts)$/);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.(js|ts)$/);
|
||||
});
|
||||
|
||||
test("detectWorkflowMcpLaunchConfig resolves the bundled server relative to the installed GSD package", () => {
|
||||
|
|
@ -104,10 +138,137 @@ test("detectWorkflowMcpLaunchConfig resolves the bundled server relative to the
|
|||
assert.equal(launch?.cwd, "/tmp/project");
|
||||
assert.equal(launch?.env?.GSD_CLI_PATH, "/tmp/gsd-loader.js");
|
||||
assert.equal(launch?.env?.GSD_WORKFLOW_PROJECT_ROOT, "/tmp/project");
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.(js|ts)$/);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.(js|ts)$/);
|
||||
assert.equal(typeof launch?.args?.[0], "string");
|
||||
assert.match(launch?.args?.[0] ?? "", /packages[\/\\]mcp-server[\/\\]dist[\/\\]cli\.js$/);
|
||||
});
|
||||
|
||||
test("detectWorkflowMcpLaunchConfig resolves the bundled server relative to the package without env hints", () => {
|
||||
const launch = detectWorkflowMcpLaunchConfig("/tmp/project", {});
|
||||
|
||||
assert.equal(launch?.command, process.execPath);
|
||||
assert.equal(launch?.cwd, "/tmp/project");
|
||||
assert.equal(launch?.env?.GSD_CLI_PATH, undefined);
|
||||
assert.equal(launch?.env?.GSD_WORKFLOW_PROJECT_ROOT, "/tmp/project");
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "", /workflow-tool-executors\.(js|ts)$/);
|
||||
assert.match(launch?.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "", /write-gate\.(js|ts)$/);
|
||||
assert.equal(typeof launch?.args?.[0], "string");
|
||||
assert.match(launch?.args?.[0] ?? "", /packages[\/\\]mcp-server[\/\\]dist[\/\\]cli\.js$/);
|
||||
});
|
||||
|
||||
test("workflow MCP launch config reaches mutation tools over stdio", async () => {
|
||||
const projectRoot = mkdtempSync(join(tmpdir(), "gsd-workflow-transport-"));
|
||||
mkdirSync(join(projectRoot, ".gsd"), { recursive: true });
|
||||
|
||||
const launch = detectWorkflowMcpLaunchConfig(projectRoot, {});
|
||||
assert.ok(launch, "expected a workflow MCP launch config");
|
||||
assert.match(
|
||||
launch.env?.GSD_WORKFLOW_EXECUTORS_MODULE ?? "",
|
||||
/dist[\/\\]resources[\/\\]extensions[\/\\]gsd[\/\\]tools[\/\\]workflow-tool-executors\.js$/,
|
||||
);
|
||||
assert.match(
|
||||
launch.env?.GSD_WORKFLOW_WRITE_GATE_MODULE ?? "",
|
||||
/dist[\/\\]resources[\/\\]extensions[\/\\]gsd[\/\\]bootstrap[\/\\]write-gate\.js$/,
|
||||
);
|
||||
|
||||
const client = new Client({ name: "workflow-mcp-transport-test", version: "1.0.0" });
|
||||
const transport = new StdioClientTransport({
|
||||
command: launch.command,
|
||||
args: launch.args,
|
||||
env: { ...process.env, ...launch.env } as Record<string, string>,
|
||||
cwd: launch.cwd,
|
||||
stderr: "pipe",
|
||||
});
|
||||
|
||||
try {
|
||||
await client.connect(transport, { timeout: 30_000 });
|
||||
|
||||
const tools = await client.listTools(undefined, { timeout: 30_000 });
|
||||
assert.ok(
|
||||
(tools.tools ?? []).some((tool) => tool.name === "gsd_plan_slice"),
|
||||
"expected workflow MCP surface to expose gsd_plan_slice",
|
||||
);
|
||||
|
||||
const milestoneResult = await client.callTool(
|
||||
{
|
||||
name: "gsd_plan_milestone",
|
||||
arguments: {
|
||||
projectDir: projectRoot,
|
||||
milestoneId: "M001",
|
||||
title: "Transport planning",
|
||||
vision: "Verify stdio workflow MCP uses the executor bridge.",
|
||||
slices: [
|
||||
{
|
||||
sliceId: "S01",
|
||||
title: "Bridge path",
|
||||
risk: "low",
|
||||
depends: [],
|
||||
demo: "Milestone planning succeeds over stdio MCP.",
|
||||
goal: "Prove the executor bridge works in the spawned server.",
|
||||
successCriteria: "gsd_plan_slice can write plan artifacts.",
|
||||
proofLevel: "integration",
|
||||
integrationClosure: "Stdio MCP client reaches the workflow executor bridge.",
|
||||
observabilityImpact: "Regression test covers the spawned-server path.",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
undefined,
|
||||
{ timeout: 30_000 },
|
||||
);
|
||||
assert.equal(milestoneResult.isError, undefined);
|
||||
assert.match(
|
||||
((milestoneResult.content as Array<{ text?: string }>)?.[0])?.text ?? "",
|
||||
/Planned milestone M001/,
|
||||
);
|
||||
|
||||
const sliceResult = await client.callTool(
|
||||
{
|
||||
name: "gsd_plan_slice",
|
||||
arguments: {
|
||||
projectDir: projectRoot,
|
||||
milestoneId: "M001",
|
||||
sliceId: "S01",
|
||||
goal: "Persist slice planning over the spawned MCP transport.",
|
||||
tasks: [
|
||||
{
|
||||
taskId: "T01",
|
||||
title: "Connect the bridge",
|
||||
description: "Ensure the workflow executor bridge resolves in the child process.",
|
||||
estimate: "10m",
|
||||
files: ["src/resources/extensions/gsd/workflow-mcp.ts"],
|
||||
verify: "node --test",
|
||||
inputs: ["M001-ROADMAP.md"],
|
||||
expectedOutput: ["S01-PLAN.md", "T01-PLAN.md"],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
undefined,
|
||||
{ timeout: 30_000 },
|
||||
);
|
||||
assert.equal(sliceResult.isError, undefined);
|
||||
assert.match(
|
||||
((sliceResult.content as Array<{ text?: string }>)?.[0])?.text ?? "",
|
||||
/Planned slice S01/,
|
||||
);
|
||||
assert.ok(
|
||||
existsSync(join(projectRoot, ".gsd", "milestones", "M001", "slices", "S01", "S01-PLAN.md")),
|
||||
"expected slice plan artifact to be written through stdio MCP",
|
||||
);
|
||||
assert.ok(
|
||||
existsSync(
|
||||
join(projectRoot, ".gsd", "milestones", "M001", "slices", "S01", "tasks", "T01-PLAN.md"),
|
||||
),
|
||||
"expected task plan artifact to be written through stdio MCP",
|
||||
);
|
||||
} finally {
|
||||
await client.close().catch(() => {});
|
||||
rmSync(projectRoot, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("usesWorkflowMcpTransport matches local externalCli providers", () => {
|
||||
assert.equal(usesWorkflowMcpTransport("externalCli", "local://claude-code"), true);
|
||||
assert.equal(usesWorkflowMcpTransport("externalCli", "https://api.example.com"), false);
|
||||
|
|
@ -131,7 +292,7 @@ test("transport compatibility passes when required tools fit current MCP surface
|
|||
assert.equal(error, null);
|
||||
});
|
||||
|
||||
test("transport compatibility fails cleanly when MCP server is unavailable", () => {
|
||||
test("transport compatibility discovers the bundled MCP server without env overrides", () => {
|
||||
const error = getWorkflowTransportSupportError(
|
||||
"claude-code",
|
||||
["gsd_task_complete"],
|
||||
|
|
@ -145,7 +306,7 @@ test("transport compatibility fails cleanly when MCP server is unavailable", ()
|
|||
},
|
||||
);
|
||||
|
||||
assert.match(error ?? "", /workflow MCP server is not configured or discoverable/);
|
||||
assert.equal(error, null);
|
||||
});
|
||||
|
||||
test("transport compatibility now allows auto execute-task over workflow MCP surface", () => {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { execSync } from "node:child_process";
|
||||
import { existsSync } from "node:fs";
|
||||
import { resolve } from "node:path";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
export interface WorkflowMcpLaunchConfig {
|
||||
|
|
@ -67,8 +67,32 @@ function lookupCommand(command: string, platform: NodeJS.Platform = process.plat
|
|||
}
|
||||
}
|
||||
|
||||
function findWorkflowCliFromAncestorPath(startPath: string): string | null {
|
||||
let current = resolve(startPath);
|
||||
|
||||
while (true) {
|
||||
const candidate = resolve(current, "packages", "mcp-server", "dist", "cli.js");
|
||||
if (existsSync(candidate)) return candidate;
|
||||
|
||||
const parent = dirname(current);
|
||||
if (parent === current) break;
|
||||
current = parent;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function getBundledWorkflowMcpCliPath(env: NodeJS.ProcessEnv): string | null {
|
||||
if (!env.GSD_BIN_PATH?.trim() && !env.GSD_CLI_PATH?.trim()) return null;
|
||||
const envAnchors = [
|
||||
env.GSD_BIN_PATH?.trim(),
|
||||
env.GSD_CLI_PATH?.trim(),
|
||||
env.GSD_WORKFLOW_PATH?.trim(),
|
||||
].filter((value): value is string => typeof value === "string" && value.length > 0);
|
||||
|
||||
for (const anchor of envAnchors) {
|
||||
const candidate = findWorkflowCliFromAncestorPath(anchor);
|
||||
if (candidate) return candidate;
|
||||
}
|
||||
|
||||
const candidates = [
|
||||
resolve(fileURLToPath(new URL("../../../../packages/mcp-server/dist/cli.js", import.meta.url))),
|
||||
|
|
@ -82,6 +106,52 @@ function getBundledWorkflowMcpCliPath(env: NodeJS.ProcessEnv): string | null {
|
|||
return null;
|
||||
}
|
||||
|
||||
function getBundledWorkflowExecutorModulePath(): string | null {
|
||||
const candidates = [
|
||||
resolve(fileURLToPath(new URL("../../../../dist/resources/extensions/gsd/tools/workflow-tool-executors.js", import.meta.url))),
|
||||
resolve(fileURLToPath(new URL("./tools/workflow-tool-executors.js", import.meta.url))),
|
||||
resolve(fileURLToPath(new URL("./tools/workflow-tool-executors.ts", import.meta.url))),
|
||||
];
|
||||
|
||||
for (const candidate of candidates) {
|
||||
if (existsSync(candidate)) return candidate;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function getBundledWorkflowWriteGateModulePath(): string | null {
|
||||
const candidates = [
|
||||
resolve(fileURLToPath(new URL("../../../../dist/resources/extensions/gsd/bootstrap/write-gate.js", import.meta.url))),
|
||||
resolve(fileURLToPath(new URL("./bootstrap/write-gate.js", import.meta.url))),
|
||||
resolve(fileURLToPath(new URL("./bootstrap/write-gate.ts", import.meta.url))),
|
||||
];
|
||||
|
||||
for (const candidate of candidates) {
|
||||
if (existsSync(candidate)) return candidate;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function buildWorkflowLaunchEnv(
|
||||
projectRoot: string,
|
||||
gsdCliPath: string | undefined,
|
||||
explicitEnv?: Record<string, string>,
|
||||
): Record<string, string> {
|
||||
const executorModulePath = getBundledWorkflowExecutorModulePath();
|
||||
const writeGateModulePath = getBundledWorkflowWriteGateModulePath();
|
||||
|
||||
return {
|
||||
...(explicitEnv ?? {}),
|
||||
...(gsdCliPath ? { GSD_CLI_PATH: gsdCliPath } : {}),
|
||||
...(executorModulePath ? { GSD_WORKFLOW_EXECUTORS_MODULE: executorModulePath } : {}),
|
||||
...(writeGateModulePath ? { GSD_WORKFLOW_WRITE_GATE_MODULE: writeGateModulePath } : {}),
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: projectRoot,
|
||||
};
|
||||
}
|
||||
|
||||
export function detectWorkflowMcpLaunchConfig(
|
||||
projectRoot = process.cwd(),
|
||||
env: NodeJS.ProcessEnv = process.env,
|
||||
|
|
@ -101,12 +171,7 @@ export function detectWorkflowMcpLaunchConfig(
|
|||
const resolvedWorkflowProjectRoot = resolve(workflowProjectRoot);
|
||||
|
||||
if (explicitCommand) {
|
||||
const launchEnv = {
|
||||
...(explicitEnv ?? {}),
|
||||
...(gsdCliPath ? { GSD_CLI_PATH: gsdCliPath } : {}),
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: resolve(workflowProjectRoot),
|
||||
};
|
||||
const launchEnv = buildWorkflowLaunchEnv(resolve(workflowProjectRoot), gsdCliPath, explicitEnv);
|
||||
return {
|
||||
name,
|
||||
command: explicitCommand,
|
||||
|
|
@ -123,11 +188,7 @@ export function detectWorkflowMcpLaunchConfig(
|
|||
command: process.execPath,
|
||||
args: [distCli],
|
||||
cwd: resolvedWorkflowProjectRoot,
|
||||
env: {
|
||||
...(gsdCliPath ? { GSD_CLI_PATH: gsdCliPath } : {}),
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: resolvedWorkflowProjectRoot,
|
||||
},
|
||||
env: buildWorkflowLaunchEnv(resolvedWorkflowProjectRoot, gsdCliPath),
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -138,11 +199,7 @@ export function detectWorkflowMcpLaunchConfig(
|
|||
command: process.execPath,
|
||||
args: [bundledCli],
|
||||
cwd: resolvedWorkflowProjectRoot,
|
||||
env: {
|
||||
...(gsdCliPath ? { GSD_CLI_PATH: gsdCliPath } : {}),
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: resolvedWorkflowProjectRoot,
|
||||
},
|
||||
env: buildWorkflowLaunchEnv(resolvedWorkflowProjectRoot, gsdCliPath),
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -151,11 +208,7 @@ export function detectWorkflowMcpLaunchConfig(
|
|||
return {
|
||||
name,
|
||||
command: binPath,
|
||||
env: {
|
||||
...(gsdCliPath ? { GSD_CLI_PATH: gsdCliPath } : {}),
|
||||
GSD_PERSIST_WRITE_GATE_STATE: "1",
|
||||
GSD_WORKFLOW_PROJECT_ROOT: resolvedWorkflowProjectRoot,
|
||||
},
|
||||
env: buildWorkflowLaunchEnv(resolvedWorkflowProjectRoot, gsdCliPath),
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -487,6 +487,32 @@ test("terminal consumes activeToolExecution from store", () => {
|
|||
);
|
||||
});
|
||||
|
||||
test("chat tool blocks normalize Claude Code tool names before choosing built-in render treatment", () => {
|
||||
const chatPath = resolve(import.meta.dirname, "../../../web/components/gsd/chat-mode.tsx");
|
||||
const source = readFileSync(chatPath, "utf-8");
|
||||
|
||||
assert.match(
|
||||
source,
|
||||
/const normalizedToolName = typeof tool\.name === "string" \? tool\.name\.toLowerCase\(\) : ""/,
|
||||
"chat-mode.tsx must normalize Claude Code tool names before matching built-in tool render branches",
|
||||
);
|
||||
assert.match(
|
||||
source,
|
||||
/normalizedToolName === "bash"/,
|
||||
"chat-mode.tsx must use normalized tool names for bash command rendering",
|
||||
);
|
||||
assert.match(
|
||||
source,
|
||||
/const autoExpandedRef = useRef\(false\)/,
|
||||
"chat-mode.tsx must track one-time auto-expansion for completed tool output blocks",
|
||||
);
|
||||
assert.match(
|
||||
source,
|
||||
/const hasVisibleResult = Boolean\(diff \|\| resultText\.trim\(\) \|\| isError\)/,
|
||||
"chat-mode.tsx must auto-expand tool blocks when visible result content arrives",
|
||||
);
|
||||
});
|
||||
|
||||
test("live browser panels consume live selectors and expose inspectable freshness markers", () => {
|
||||
const contractPath = resolve(import.meta.dirname, "../../../web/lib/command-surface-contract.ts")
|
||||
const storePath = resolve(import.meta.dirname, "../../../web/lib/gsd-workspace-store.tsx")
|
||||
|
|
|
|||
|
|
@ -1,43 +0,0 @@
|
|||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
|
||||
const { getProviderSetupAction } = await import(
|
||||
"../../packages/pi-coding-agent/src/modes/interactive/provider-auth-setup.ts"
|
||||
);
|
||||
|
||||
test("routes OAuth providers to the login dialog", () => {
|
||||
const action = getProviderSetupAction({
|
||||
provider: "github-copilot",
|
||||
authMode: "oauth",
|
||||
hasAuth: false,
|
||||
});
|
||||
|
||||
assert.deepEqual(action, { kind: "oauth-login" });
|
||||
});
|
||||
|
||||
test("keeps API-key providers out of the OAuth login flow", () => {
|
||||
for (const provider of ["alibaba-coding-plan", "zai", "xai"]) {
|
||||
const action = getProviderSetupAction({
|
||||
provider,
|
||||
authMode: "apiKey",
|
||||
hasAuth: false,
|
||||
});
|
||||
|
||||
assert.equal(action.kind, "status");
|
||||
assert.match(action.message, /API-key auth, not OAuth/);
|
||||
assert.match(action.message, new RegExp(provider));
|
||||
}
|
||||
});
|
||||
|
||||
test("tells already-configured API-key providers to use model selection", () => {
|
||||
const action = getProviderSetupAction({
|
||||
provider: "xai",
|
||||
authMode: "apiKey",
|
||||
hasAuth: true,
|
||||
});
|
||||
|
||||
assert.deepEqual(action, {
|
||||
kind: "status",
|
||||
message: "xai already has credentials configured. Use /model to select it.",
|
||||
});
|
||||
});
|
||||
|
|
@ -1892,6 +1892,8 @@ interface ChatPaneProps {
|
|||
*/
|
||||
function ToolExecutionBlock({ tool }: { tool: CompletedToolExecution }) {
|
||||
const [expanded, setExpanded] = useState(false)
|
||||
const autoExpandedRef = useRef(false)
|
||||
const normalizedToolName = typeof tool.name === "string" ? tool.name.toLowerCase() : ""
|
||||
|
||||
const path = typeof tool.args?.path === "string" ? tool.args.path : typeof tool.args?.file_path === "string" ? tool.args.file_path : null
|
||||
const shortPath = path ? (path.startsWith(process.env.HOME ?? "/Users") ? "~" + path.slice((process.env.HOME ?? "").length) : path) : null
|
||||
|
|
@ -1899,17 +1901,17 @@ function ToolExecutionBlock({ tool }: { tool: CompletedToolExecution }) {
|
|||
const diff = tool.result?.details?.diff as string | undefined
|
||||
|
||||
// Choose icon and label
|
||||
const icon = tool.name === "edit" ? <FileEdit className="h-3.5 w-3.5" />
|
||||
: tool.name === "write" ? <FilePlus className="h-3.5 w-3.5" />
|
||||
const icon = normalizedToolName === "edit" ? <FileEdit className="h-3.5 w-3.5" />
|
||||
: normalizedToolName === "write" ? <FilePlus className="h-3.5 w-3.5" />
|
||||
: <Terminal className="h-3.5 w-3.5" />
|
||||
|
||||
const label = tool.name === "edit" ? "Edit"
|
||||
: tool.name === "write" ? "Write"
|
||||
: tool.name === "bash" ? "$"
|
||||
const label = normalizedToolName === "edit" ? "Edit"
|
||||
: normalizedToolName === "write" ? "Write"
|
||||
: normalizedToolName === "bash" ? "$"
|
||||
: tool.name
|
||||
|
||||
// For bash, show the command
|
||||
const bashCommand = tool.name === "bash" && typeof tool.args?.command === "string" ? tool.args.command : null
|
||||
const bashCommand = normalizedToolName === "bash" && typeof tool.args?.command === "string" ? tool.args.command : null
|
||||
|
||||
// Result text (for bash output, read result, etc.)
|
||||
const resultText = tool.result?.content
|
||||
|
|
@ -1917,6 +1919,14 @@ function ToolExecutionBlock({ tool }: { tool: CompletedToolExecution }) {
|
|||
.map((c) => c.text)
|
||||
.join("\n") ?? ""
|
||||
|
||||
useEffect(() => {
|
||||
if (autoExpandedRef.current) return
|
||||
const hasVisibleResult = Boolean(diff || resultText.trim() || isError)
|
||||
if (!hasVisibleResult) return
|
||||
autoExpandedRef.current = true
|
||||
setExpanded(true)
|
||||
}, [diff, resultText, isError])
|
||||
|
||||
return (
|
||||
<div className="flex justify-start gap-3">
|
||||
<div className="w-7 flex-shrink-0" />
|
||||
|
|
|
|||
|
|
@ -411,6 +411,18 @@ export interface ToolExecutionStartEvent {
|
|||
[key: string]: unknown
|
||||
}
|
||||
|
||||
export interface ToolExecutionUpdateEvent {
|
||||
type: "tool_execution_update"
|
||||
toolCallId: string
|
||||
toolName: string
|
||||
partialResult?: {
|
||||
content?: Array<{ type: string; text?: string }>
|
||||
details?: Record<string, unknown>
|
||||
isError?: boolean
|
||||
}
|
||||
[key: string]: unknown
|
||||
}
|
||||
|
||||
export interface ToolExecutionEndEvent {
|
||||
type: "tool_execution_end"
|
||||
toolCallId: string
|
||||
|
|
@ -436,10 +448,11 @@ export type WorkspaceEvent =
|
|||
| ExtensionErrorEvent
|
||||
| MessageUpdateEvent
|
||||
| ToolExecutionStartEvent
|
||||
| ToolExecutionUpdateEvent
|
||||
| ToolExecutionEndEvent
|
||||
| AgentEndEvent
|
||||
| TurnEndEvent
|
||||
| ({ type: Exclude<string, "bridge_status" | "live_state_invalidation" | "extension_ui_request" | "extension_error" | "message_update" | "tool_execution_start" | "tool_execution_end" | "agent_end" | "turn_end">; [key: string]: unknown } & Record<string, unknown>)
|
||||
| ({ type: Exclude<string, "bridge_status" | "live_state_invalidation" | "extension_ui_request" | "extension_error" | "message_update" | "tool_execution_start" | "tool_execution_update" | "tool_execution_end" | "agent_end" | "turn_end">; [key: string]: unknown } & Record<string, unknown>)
|
||||
|
||||
export function isWorkspaceEvent(value: unknown): value is WorkspaceEvent {
|
||||
return value !== null && typeof value === "object" && typeof (value as Record<string, unknown>).type === "string"
|
||||
|
|
@ -491,6 +504,11 @@ export interface ActiveToolExecution {
|
|||
id: string
|
||||
name: string
|
||||
args?: Record<string, unknown>
|
||||
result?: {
|
||||
content?: Array<{ type: string; text?: string }>
|
||||
details?: Record<string, unknown>
|
||||
isError?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
/** Completed tool execution with result — kept for chat rendering */
|
||||
|
|
@ -692,6 +710,8 @@ function summarizeEvent(event: WorkspaceEvent): { type: TerminalLineType; messag
|
|||
type: "output",
|
||||
message: `[Tool] ${typeof event.toolName === "string" ? event.toolName : "tool"} started`,
|
||||
}
|
||||
case "tool_execution_update":
|
||||
return null
|
||||
case "tool_execution_end":
|
||||
return {
|
||||
type: event.isError ? "error" : "success",
|
||||
|
|
@ -4924,6 +4944,9 @@ export class GSDWorkspaceStore {
|
|||
case "tool_execution_start":
|
||||
this.handleToolExecutionStart(event as ToolExecutionStartEvent)
|
||||
break
|
||||
case "tool_execution_update":
|
||||
this.handleToolExecutionUpdate(event as ToolExecutionUpdateEvent)
|
||||
break
|
||||
case "tool_execution_end":
|
||||
this.handleToolExecutionEnd(event as ToolExecutionEndEvent)
|
||||
break
|
||||
|
|
@ -5106,6 +5129,23 @@ export class GSDWorkspaceStore {
|
|||
})
|
||||
}
|
||||
|
||||
private handleToolExecutionUpdate(event: ToolExecutionUpdateEvent): void {
|
||||
const active = this.state.activeToolExecution
|
||||
if (!active || active.id !== event.toolCallId) return
|
||||
this.patchState({
|
||||
activeToolExecution: {
|
||||
...active,
|
||||
result: event.partialResult
|
||||
? {
|
||||
content: event.partialResult.content,
|
||||
details: event.partialResult.details,
|
||||
isError: Boolean(event.partialResult.isError),
|
||||
}
|
||||
: active.result,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
private handleToolExecutionEnd(event: ToolExecutionEndEvent): void {
|
||||
const active = this.state.activeToolExecution
|
||||
if (active) {
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue