fix(footer): display active inference model during execution (#1982)

* fix(footer): display active inference model instead of configured model (#1844)

The footer read state.model which updates immediately on model selection,
but the running agent loop captures the model at _runLoop() start time.
This caused the footer to show the wrong model when the user switched
models mid-inference.

Add activeInferenceModel to AgentState, set it when _runLoop begins, and
clear it when the loop ends. The footer now prefers activeInferenceModel
over model, so it always shows the model actually being used for the
current inference.

Bug 2 follow-up to PR #1975 which fixed Bug 1 (queued messages cancel
tool calls).

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

* ci: retrigger after stale check

* fix(test): rewrite agent test to use structural assertions

The mock StreamFn returned a plain AsyncGenerator but
AssistantMessageEventStream requires additional properties,
causing CI build failure. Rewrote tests as source-verification
assertions (matching other GSD test patterns) and excluded
test files from tsconfig build.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>

---------

Co-authored-by: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Tom Boucher 2026-03-22 19:06:49 -04:00 committed by GitHub
parent 615c6845b2
commit 8d4b9d08a5
5 changed files with 73 additions and 7 deletions

View file

@ -0,0 +1,53 @@
// Agent activeInferenceModel regression tests
// Verifies that activeInferenceModel is set/cleared correctly in _runLoop,
// and that the footer reads activeInferenceModel instead of state.model.
// Regression test for https://github.com/gsd-build/gsd-2/issues/1844 Bug 2
import { describe, it } from "node:test";
import assert from "node:assert/strict";
import { readFileSync } from "node:fs";
import { join, dirname } from "node:path";
import { fileURLToPath } from "node:url";
const __dirname = dirname(fileURLToPath(import.meta.url));
describe("Agent — activeInferenceModel (#1844 Bug 2)", () => {
it("activeInferenceModel is declared in AgentState interface", () => {
const typesSource = readFileSync(join(__dirname, "types.ts"), "utf-8");
assert.match(typesSource, /activeInferenceModel\??:\s*Model/,
"AgentState must declare activeInferenceModel field");
});
it("_runLoop sets activeInferenceModel before streaming and clears in finally", () => {
const agentSource = readFileSync(join(__dirname, "agent.ts"), "utf-8");
// Must set activeInferenceModel = model before streaming starts
const setLine = agentSource.indexOf("this._state.activeInferenceModel = model");
assert.ok(setLine > -1, "agent.ts must set activeInferenceModel = model in _runLoop");
// Must clear activeInferenceModel = undefined after streaming completes
const clearLine = agentSource.indexOf("this._state.activeInferenceModel = undefined");
assert.ok(clearLine > -1, "agent.ts must clear activeInferenceModel in finally block");
// The set must come before the clear
assert.ok(setLine < clearLine, "activeInferenceModel must be set before cleared");
});
it("footer displays activeInferenceModel instead of state.model", () => {
const footerPath = join(__dirname, "..", "..", "pi-coding-agent", "src",
"modes", "interactive", "components", "footer.ts");
const footerSource = readFileSync(footerPath, "utf-8");
assert.match(footerSource, /activeInferenceModel/,
"footer.ts must reference activeInferenceModel for display");
});
it("activeInferenceModel is set before AbortController creation", () => {
const agentSource = readFileSync(join(__dirname, "agent.ts"), "utf-8");
const setLine = agentSource.indexOf("this._state.activeInferenceModel = model");
const abortLine = agentSource.indexOf("this.abortController = new AbortController");
assert.ok(setLine > -1 && abortLine > -1);
assert.ok(setLine < abortLine,
"activeInferenceModel must be set before streaming infrastructure is created");
});
});

View file

@ -457,6 +457,8 @@ export class Agent {
const model = this._state.model;
if (!model) throw new Error("No model configured");
this._state.activeInferenceModel = model;
this.runningPrompt = new Promise<void>((resolve) => {
this.resolveRunningPrompt = resolve;
});
@ -581,6 +583,7 @@ export class Agent {
this._state.isStreaming = false;
this._state.streamMessage = null;
this._state.pendingToolCalls = new Set<string>();
this._state.activeInferenceModel = undefined;
this.abortController = undefined;
this.resolveRunningPrompt?.();
this.runningPrompt = undefined;

View file

@ -239,6 +239,12 @@ export interface AgentState {
streamMessage: AgentMessage | null;
pendingToolCalls: Set<string>;
error?: string;
/**
* The model currently being used for inference. Set at _runLoop() start,
* cleared when the loop ends. When present, UI should display this instead
* of `model` to avoid showing a stale value after a mid-turn model switch.
*/
activeInferenceModel?: Model<any>;
}
export interface AgentToolResult<T> {

View file

@ -23,5 +23,5 @@
"rootDir": "./src"
},
"include": ["src/**/*.ts"],
"exclude": ["node_modules", "dist", "**/*.d.ts", "src/**/*.d.ts"]
"exclude": ["node_modules", "dist", "**/*.d.ts", "src/**/*.d.ts", "src/**/*.test.ts"]
}

View file

@ -68,10 +68,14 @@ export class FooterComponent implements Component {
const totalCacheWrite = usageTotals.cacheWrite;
const totalCost = usageTotals.cost;
// Use activeInferenceModel during streaming to show the model actually
// being used, not the configured model which may have been switched mid-turn.
const displayModel = state.activeInferenceModel ?? state.model;
// Calculate context usage from session (handles compaction correctly).
// After compaction, tokens are unknown until the next LLM response.
const contextUsage = this.session.getContextUsage();
const contextWindow = contextUsage?.contextWindow ?? state.model?.contextWindow ?? 0;
const contextWindow = contextUsage?.contextWindow ?? displayModel?.contextWindow ?? 0;
const contextPercentValue = contextUsage?.percent ?? 0;
const contextPercent = contextUsage?.percent !== null ? contextPercentValue.toFixed(1) : "?";
@ -102,7 +106,7 @@ export class FooterComponent implements Component {
if (totalCacheWrite) statsParts.push(`W${formatTokens(totalCacheWrite)}`);
// Show cost with "(sub)" indicator if using OAuth subscription
const usingSubscription = state.model ? this.session.modelRegistry.isUsingOAuth(state.model) : false;
const usingSubscription = displayModel ? this.session.modelRegistry.isUsingOAuth(displayModel) : false;
if (totalCost || usingSubscription) {
const costStr = `$${totalCost.toFixed(3)}${usingSubscription ? " (sub)" : ""}`;
statsParts.push(costStr);
@ -127,7 +131,7 @@ export class FooterComponent implements Component {
let statsLeft = statsParts.join(" ");
// Add model name on the right side, plus thinking level if model supports it
const modelName = state.model?.id || "no-model";
const modelName = displayModel?.id || "no-model";
let statsLeftWidth = visibleWidth(statsLeft);
@ -142,7 +146,7 @@ export class FooterComponent implements Component {
// Add thinking level indicator if model supports reasoning
let rightSideWithoutProvider = modelName;
if (state.model?.reasoning) {
if (displayModel?.reasoning) {
const thinkingLevel = state.thinkingLevel || "off";
rightSideWithoutProvider =
thinkingLevel === "off" ? `${modelName} • thinking off` : `${modelName}${thinkingLevel}`;
@ -150,8 +154,8 @@ export class FooterComponent implements Component {
// Prepend the provider in parentheses if there are multiple providers and there's enough room
let rightSide = rightSideWithoutProvider;
if (this.footerData.getAvailableProviderCount() > 1 && state.model) {
rightSide = `(${state.model!.provider}) ${rightSideWithoutProvider}`;
if (this.footerData.getAvailableProviderCount() > 1 && displayModel) {
rightSide = `(${displayModel.provider}) ${rightSideWithoutProvider}`;
if (statsLeftWidth + minPadding + visibleWidth(rightSide) > width) {
// Too wide, fall back
rightSide = rightSideWithoutProvider;