fix(gsd): wire ADR-005 infrastructure into live paths

Addresses Codex adversarial review findings — the ADR-005 registries
and filters were built but not connected to the actual model selection
and provider adapter paths.

Fix 1+2: Tool filtering applied after model selection
- selectAndApplyModel() now calls adjustToolSet() after pi.setModel()
- Incompatible tools are removed via pi.setActiveTools()
- adjust_tool_set hook fires to allow extension overrides
- Verbose output reports filtered tools with provider context

Fix 3: ProviderSwitchReport wired through all 6 provider adapters
- New transformMessagesWithReport() convenience wrapper creates report,
  passes it to transformMessages(), and logs non-empty reports to stderr
  when GSD_VERBOSE=1 or PI_VERBOSE=1
- All adapters updated: anthropic, google, openai-responses,
  openai-completions, mistral, bedrock
This commit is contained in:
Jeremy 2026-04-10 12:49:49 -05:00
parent b1c0dafc70
commit 2ad315b9fb
9 changed files with 81 additions and 21 deletions

View file

@ -15,7 +15,7 @@ export * from "./providers/openai-responses.js";
export * from "./providers/provider-capabilities.js";
export * from "./providers/register-builtins.js";
export type { ProviderSwitchReport } from "./providers/transform-messages.js";
export { createEmptyReport, hasTransformations } from "./providers/transform-messages.js";
export { createEmptyReport, hasTransformations, transformMessagesWithReport } from "./providers/transform-messages.js";
export * from "./stream.js";
export * from "./types.js";
export * from "./utils/event-stream.js";

View file

@ -43,7 +43,7 @@ import { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { adjustMaxTokensForThinking, buildBaseOptions, clampReasoning } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
import { transformMessagesWithReport } from "./transform-messages.js";
export interface BedrockOptions extends StreamOptions {
region?: string;
@ -487,7 +487,7 @@ function convertMessages(
cacheRetention: CacheRetention,
): Message[] {
const result: Message[] = [];
const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
const transformedMessages = transformMessagesWithReport(context.messages, model, normalizeToolCallId, "bedrock-converse-stream");
for (let i = 0; i < transformedMessages.length; i++) {
const m = transformedMessages[i];

View file

@ -33,7 +33,7 @@ import type { AssistantMessageEventStream } from "../utils/event-stream.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { hasXmlParameterTags, repairToolJson } from "../utils/repair-tool-json.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transform-messages.js";
import { transformMessagesWithReport } from "./transform-messages.js";
export type AnthropicEffort = "low" | "medium" | "high" | "max";
@ -235,7 +235,7 @@ export function convertMessages(
): MessageParam[] {
const params: MessageParam[] = [];
const transformedMessages = transformMessages(messages, model, normalizeToolCallId);
const transformedMessages = transformMessagesWithReport(messages, model, normalizeToolCallId, "anthropic-messages");
for (let i = 0; i < transformedMessages.length; i++) {
const msg = transformedMessages[i];

View file

@ -5,7 +5,7 @@
import { type Content, FinishReason, FunctionCallingConfigMode, type Part } from "@google/genai";
import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "../types.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transform-messages.js";
import { transformMessagesWithReport } from "./transform-messages.js";
type GoogleApiType = "google-generative-ai" | "google-gemini-cli" | "google-vertex";
@ -80,7 +80,7 @@ export function convertMessages<T extends GoogleApiType>(model: Model<T>, contex
return id.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 64);
};
const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
const transformedMessages = transformMessagesWithReport(context.messages, model, normalizeToolCallId, "google-generative-ai");
for (const msg of transformedMessages) {
if (msg.role === "user") {

View file

@ -39,7 +39,7 @@ import { shortHash } from "../utils/hash.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { buildBaseOptions, clampReasoning } from "./simple-options.js";
import { transformMessages } from "./transform-messages.js";
import { transformMessagesWithReport } from "./transform-messages.js";
const MISTRAL_TOOL_CALL_ID_LENGTH = 9;
const MAX_MISTRAL_ERROR_BODY_CHARS = 4000;
@ -79,7 +79,7 @@ export const streamMistral: StreamFunction<"mistral-conversations", MistralOptio
});
const normalizeMistralToolCallId = createMistralToolCallIdNormalizer();
const transformedMessages = transformMessages(context.messages, model, (id) => normalizeMistralToolCallId(id));
const transformedMessages = transformMessagesWithReport(context.messages, model, (id) => normalizeMistralToolCallId(id), "mistral-conversations");
let payload = buildChatPayload(model, context, transformedMessages, options);
const nextPayload = await options?.onPayload?.(payload, model);

View file

@ -39,7 +39,7 @@ import {
finalizeStream,
handleStreamError,
} from "./openai-shared.js";
import { transformMessages } from "./transform-messages.js";
import { transformMessagesWithReport } from "./transform-messages.js";
/**
* Check if conversation messages contain tool calls or tool results.
@ -441,7 +441,7 @@ export function convertMessages(
return id;
};
const transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id));
const transformedMessages = transformMessagesWithReport(context.messages, model, (id) => normalizeToolCallId(id), "openai-completions");
if (context.systemPrompt) {
const useDeveloperRole = model.reasoning && compat.supportsDeveloperRole;

View file

@ -30,7 +30,7 @@ import type { AssistantMessageEventStream } from "../utils/event-stream.js";
import { shortHash } from "../utils/hash.js";
import { parseStreamingJson } from "../utils/json-parse.js";
import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
import { transformMessages } from "./transform-messages.js";
import { transformMessagesWithReport } from "./transform-messages.js";
// =============================================================================
// Utilities
@ -108,7 +108,7 @@ export function convertResponsesMessages<TApi extends Api>(
return `${normalizedCallId}|${normalizedItemId}`;
};
const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
const transformedMessages = transformMessagesWithReport(context.messages, model, normalizeToolCallId, "openai-responses");
const includeSystemPrompt = options?.includeSystemPrompt ?? true;
if (includeSystemPrompt && context.systemPrompt) {

View file

@ -49,6 +49,39 @@ export function hasTransformations(report: ProviderSwitchReport): boolean {
);
}
/**
* Create a report, run transformMessages, and log if non-empty.
* Convenience wrapper for provider adapters (ADR-005).
*/
export function transformMessagesWithReport<TApi extends Api>(
messages: Message[],
model: Model<TApi>,
normalizeToolCallId?: (id: string, model: Model<TApi>, source: AssistantMessage) => string,
sourceApi?: string,
): Message[] {
const report = createEmptyReport(sourceApi ?? "unknown", model.api);
const result = transformMessages(messages, model, normalizeToolCallId, report);
if (hasTransformations(report)) {
logProviderSwitchReport(report);
}
return result;
}
/** Log a non-empty ProviderSwitchReport as a debug-level warning. */
function logProviderSwitchReport(report: ProviderSwitchReport): void {
const parts: string[] = [`Provider switch ${report.fromApi}${report.toApi}:`];
if (report.thinkingBlocksDropped > 0) parts.push(`${report.thinkingBlocksDropped} thinking blocks dropped`);
if (report.thinkingBlocksDowngraded > 0) parts.push(`${report.thinkingBlocksDowngraded} thinking blocks downgraded`);
if (report.toolCallIdsRemapped > 0) parts.push(`${report.toolCallIdsRemapped} tool call IDs remapped`);
if (report.syntheticToolResultsInserted > 0) parts.push(`${report.syntheticToolResultsInserted} synthetic tool results inserted`);
if (report.thoughtSignaturesDropped > 0) parts.push(`${report.thoughtSignaturesDropped} thought signatures dropped`);
// Use process.stderr for debug output — this is observable in verbose/debug modes
// without polluting stdout which may be used for structured output (RPC/MCP).
if (process.env.GSD_VERBOSE === "1" || process.env.PI_VERBOSE === "1") {
process.stderr.write(`[provider-switch] ${parts.join(", ")}\n`);
}
}
/**
* Normalize tool call ID for cross-provider compatibility.
* OpenAI Responses API generates IDs that are 450+ chars with special characters like `|`.

View file

@ -5,12 +5,13 @@
*/
import type { Api, Model } from "@gsd/pi-ai";
import { getProviderCapabilities } from "@gsd/pi-ai";
import type { ExtensionAPI, ExtensionContext } from "@gsd/pi-coding-agent";
import type { GSDPreferences } from "./preferences.js";
import { resolveModelWithFallbacksForUnit, resolveDynamicRoutingConfig } from "./preferences.js";
import type { ComplexityTier } from "./complexity-classifier.js";
import { classifyUnitComplexity, tierLabel } from "./complexity-classifier.js";
import { resolveModelForComplexity, escalateTier, getEligibleModels, loadCapabilityOverrides } from "./model-router.js";
import { resolveModelForComplexity, escalateTier, getEligibleModels, loadCapabilityOverrides, adjustToolSet, filterToolsForProvider } from "./model-router.js";
import { getLedger, getProjectTotals } from "./metrics.js";
import { unitPhaseLabel } from "./auto-dashboard.js";
@ -212,13 +213,6 @@ export async function selectAndApplyModel(
"info",
);
}
// ADR-005: Report tools filtered due to provider incompatibility
if (routingResult.filteredTools && routingResult.filteredTools.length > 0) {
ctx.ui.notify(
`Tool compatibility: ${routingResult.filteredTools.length} tools filtered for provider — ${routingResult.filteredTools.join(", ")}`,
"info",
);
}
}
}
routingTierLabel = ` [${tierLabel(classification.tier)}]`;
@ -251,12 +245,45 @@ export async function selectAndApplyModel(
const ok = await pi.setModel(model, { persist: false });
if (ok) {
appliedModel = model;
// ADR-005: Adjust active tool set for the selected model's provider capabilities.
// Hard-filter incompatible tools, then let extensions override via adjust_tool_set hook.
const activeToolNames = pi.getActiveTools();
const { toolNames: compatibleTools, removedTools } = adjustToolSet(activeToolNames, model.api);
let finalToolNames = compatibleTools;
// Fire adjust_tool_set hook — extensions can override the filtered tool set
if (routingConfig.hooks !== false) {
const hookResult = await pi.emitAdjustToolSet({
selectedModelApi: model.api,
selectedModelProvider: model.provider,
selectedModelId: model.id,
activeToolNames,
filteredTools: removedTools,
});
if (hookResult?.toolNames) {
finalToolNames = hookResult.toolNames;
}
}
// Apply the filtered tool set if any tools were removed
if (removedTools.length > 0 || finalToolNames.length !== activeToolNames.length) {
pi.setActiveTools(finalToolNames);
}
if (verbose) {
const fallbackNote = modelId === effectiveModelConfig.primary
? ""
: ` (fallback from ${effectiveModelConfig.primary})`;
const phase = unitPhaseLabel(unitType);
ctx.ui.notify(`Model [${phase}]${routingTierLabel}: ${model.provider}/${model.id}${fallbackNote}`, "info");
// ADR-005: Report tools filtered due to provider incompatibility
if (removedTools.length > 0) {
ctx.ui.notify(
`Tool compatibility: ${removedTools.length} tools filtered for ${model.api}${removedTools.join(", ")}`,
"info",
);
}
}
break;
} else {