diff --git a/src/resources/extensions/gsd/memory-extractor.ts b/src/resources/extensions/gsd/memory-extractor.ts index c63a385a5..acca3c7a0 100644 --- a/src/resources/extensions/gsd/memory-extractor.ts +++ b/src/resources/extensions/gsd/memory-extractor.ts @@ -87,14 +87,22 @@ export function buildMemoryLLMCall(ctx: ExtensionContext): LLMCallFn | null { const selectedModel = model as Model; + // Resolve API key via modelRegistry so OAuth tokens (auth.json) are used. + // Without this, streamSimpleAnthropic only checks env vars via getEnvApiKey, + // which returns undefined for OAuth users (Claude Max / Claude Pro). + // See: https://github.com/gsd-build/gsd-2/issues/2959 + const resolvedKeyPromise = ctx.modelRegistry.getApiKey(selectedModel).catch(() => undefined); + return async (system: string, user: string): Promise => { const { completeSimple } = await import('@gsd/pi-ai'); + const resolvedApiKey = await resolvedKeyPromise; const result: AssistantMessage = await completeSimple(selectedModel, { systemPrompt: system, messages: [{ role: 'user', content: [{ type: 'text', text: user }], timestamp: Date.now() }], }, { maxTokens: 2048, temperature: 0, + ...(resolvedApiKey ? { apiKey: resolvedApiKey } : {}), }); // Extract text from response diff --git a/src/resources/extensions/gsd/tests/memory-extractor.test.ts b/src/resources/extensions/gsd/tests/memory-extractor.test.ts index 4df555470..47839f67b 100644 --- a/src/resources/extensions/gsd/tests/memory-extractor.test.ts +++ b/src/resources/extensions/gsd/tests/memory-extractor.test.ts @@ -1,4 +1,4 @@ -import { parseMemoryResponse, _resetExtractionState } from '../memory-extractor.ts'; +import { parseMemoryResponse, _resetExtractionState, buildMemoryLLMCall } from '../memory-extractor.ts'; import { openDatabase, closeDatabase, @@ -9,7 +9,7 @@ import { getActiveMemoriesRanked, } from '../memory-store.ts'; import type { MemoryAction } from '../memory-store.ts'; -import { describe, test, beforeEach, afterEach } from 'node:test'; +import { describe, test, beforeEach, afterEach, mock } from 'node:test'; import assert from 'node:assert/strict'; // ═══════════════════════════════════════════════════════════════════════════ @@ -169,3 +169,86 @@ test('memory-extractor: reset extraction state', () => { assert.ok(true, '_resetExtractionState should not throw'); }); +// ═══════════════════════════════════════════════════════════════════════════ +// memory-extractor: buildMemoryLLMCall resolves OAuth API key via modelRegistry +// Regression test for #2959 — OAuth users had broken memory extraction +// because streamSimpleAnthropic only checked env vars, not auth.json. +// ═══════════════════════════════════════════════════════════════════════════ + +test('memory-extractor: buildMemoryLLMCall resolves API key from modelRegistry for OAuth users', async () => { + const OAUTH_TOKEN = 'sk-ant-oat-test-oauth-token-12345'; + let getApiKeyCalled = false; + + const fakeModel = { + id: 'claude-haiku-test', + provider: 'anthropic', + api: 'anthropic-messages', + cost: { input: 0.25, output: 1.25 }, + }; + + const ctx = { + modelRegistry: { + getAvailable: () => [fakeModel], + getApiKey: async (_model: any) => { + getApiKeyCalled = true; + return OAUTH_TOKEN; + }, + }, + } as any; + + const llmCallFn = buildMemoryLLMCall(ctx); + assert.ok(llmCallFn !== null, 'buildMemoryLLMCall should return a function when models are available'); + + // The function should have resolved the API key eagerly via modelRegistry.getApiKey. + // Give the async getApiKey a tick to resolve. + await new Promise(resolve => setTimeout(resolve, 50)); + assert.ok(getApiKeyCalled, 'buildMemoryLLMCall must call modelRegistry.getApiKey() to resolve OAuth tokens'); +}); + +test('memory-extractor: buildMemoryLLMCall returns null when no models available', () => { + const ctx = { + modelRegistry: { + getAvailable: () => [], + getApiKey: async () => undefined, + }, + } as any; + + const llmCallFn = buildMemoryLLMCall(ctx); + assert.strictEqual(llmCallFn, null, 'should return null when no models available'); +}); + +test('memory-extractor: buildMemoryLLMCall prefers haiku model', async () => { + let resolvedModelId: string | undefined; + + const haikuModel = { + id: 'claude-3-5-haiku-20241022', + provider: 'anthropic', + api: 'anthropic-messages', + cost: { input: 0.25, output: 1.25 }, + }; + const sonnetModel = { + id: 'claude-sonnet-4-20250514', + provider: 'anthropic', + api: 'anthropic-messages', + cost: { input: 3, output: 15 }, + }; + + const ctx = { + modelRegistry: { + getAvailable: () => [sonnetModel, haikuModel], + getApiKey: async (model: any) => { + resolvedModelId = model.id; + return 'sk-ant-oat-test-token'; + }, + }, + } as any; + + const llmCallFn = buildMemoryLLMCall(ctx); + assert.ok(llmCallFn !== null, 'should return a function'); + + // Wait for the async getApiKey to resolve + await new Promise(resolve => setTimeout(resolve, 50)); + assert.strictEqual(resolvedModelId, 'claude-3-5-haiku-20241022', + 'should resolve API key for haiku model, not sonnet'); +}); +